summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicola Zaghen <nicola.zaghen@imgtec.com>2018-05-14 12:53:11 +0000
committerNicola Zaghen <nicola.zaghen@imgtec.com>2018-05-14 12:53:11 +0000
commit0818e789cb58fbf6b5e225a3f1c722294881c445 (patch)
treeaee1e0200e749e7a4ec1f434f8f35ab36314bf95
parentfc2b453631fa152b81c860ce991f4a85dbf94e6f (diff)
downloadllvm-0818e789cb58fbf6b5e225a3f1c722294881c445.tar.gz
Rename DEBUG macro to LLVM_DEBUG.
The DEBUG() macro is very generic so it might clash with other projects. The renaming was done as follows: - git grep -l 'DEBUG' | xargs sed -i 's/\bDEBUG\s\?(/LLVM_DEBUG(/g' - git diff -U0 master | ../clang/tools/clang-format/clang-format-diff.py -i -p1 -style LLVM - Manual change to APInt - Manually chage DOCS as regex doesn't match it. In the transition period the DEBUG() macro is still present and aliased to the LLVM_DEBUG() one. Differential Revision: https://reviews.llvm.org/D43624 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@332240 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--docs/Bugpoint.rst16
-rw-r--r--docs/CommandGuide/opt.rst2
-rw-r--r--docs/CommandLine.rst6
-rw-r--r--docs/ProgrammersManual.rst18
-rw-r--r--include/llvm/Analysis/BlockFrequencyInfoImpl.h47
-rw-r--r--include/llvm/Analysis/CGSCCPassManager.h46
-rw-r--r--include/llvm/Analysis/RegionInfoImpl.h2
-rw-r--r--include/llvm/Analysis/SparsePropagation.h10
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h8
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h89
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h83
-rw-r--r--include/llvm/Support/Debug.h25
-rw-r--r--include/llvm/Support/GenericDomTreeConstruction.h251
-rw-r--r--include/llvm/Support/UnicodeCharRanges.h16
-rw-r--r--include/llvm/Transforms/InstCombine/InstCombineWorklist.h5
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdaterImpl.h2
-rw-r--r--lib/Analysis/BlockFrequencyInfoImpl.cpp80
-rw-r--r--lib/Analysis/BranchProbabilityInfo.cpp18
-rw-r--r--lib/Analysis/CFLAndersAliasAnalysis.cpp5
-rw-r--r--lib/Analysis/CFLSteensAliasAnalysis.cpp5
-rw-r--r--lib/Analysis/CGSCCPassManager.cpp35
-rw-r--r--lib/Analysis/CallGraphSCCPass.cpp50
-rw-r--r--lib/Analysis/CodeMetrics.cpp2
-rw-r--r--lib/Analysis/DemandedBits.cpp8
-rw-r--r--lib/Analysis/DependenceAnalysis.cpp479
-rw-r--r--lib/Analysis/IVUsers.cpp17
-rw-r--r--lib/Analysis/IndirectCallPromotionAnalysis.cpp10
-rw-r--r--lib/Analysis/InlineCost.cpp14
-rw-r--r--lib/Analysis/LazyCallGraph.cpp19
-rw-r--r--lib/Analysis/LazyValueInfo.cpp55
-rw-r--r--lib/Analysis/LoopAccessAnalysis.cpp198
-rw-r--r--lib/Analysis/LoopPass.cpp4
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp15
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp12
-rw-r--r--lib/Analysis/MemorySSA.cpp25
-rw-r--r--lib/Analysis/RegionPass.cpp13
-rw-r--r--lib/Analysis/ScalarEvolution.cpp100
-rw-r--r--lib/CodeGen/AggressiveAntiDepBreaker.cpp152
-rw-r--r--lib/CodeGen/AllocationOrder.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/DIE.cpp5
-rw-r--r--lib/CodeGen/AsmPrinter/DIEHash.cpp16
-rw-r--r--lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp4
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.cpp4
-rw-r--r--lib/CodeGen/AtomicExpandPass.cpp8
-rw-r--r--lib/CodeGen/BranchFolding.cpp63
-rw-r--r--lib/CodeGen/BranchRelaxation.cpp41
-rw-r--r--lib/CodeGen/BreakFalseDeps.cpp8
-rw-r--r--lib/CodeGen/CalcSpillWeights.cpp4
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp135
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.cpp18
-rw-r--r--lib/CodeGen/DFAPacketizer.cpp28
-rw-r--r--lib/CodeGen/DeadMachineInstructionElim.cpp2
-rw-r--r--lib/CodeGen/DetectDeadLanes.cpp35
-rw-r--r--lib/CodeGen/EarlyIfConversion.cpp97
-rw-r--r--lib/CodeGen/ExecutionDomainFix.cpp14
-rw-r--r--lib/CodeGen/ExpandPostRAPseudos.cpp31
-rw-r--r--lib/CodeGen/FaultMaps.cpp22
-rw-r--r--lib/CodeGen/GlobalISel/Combiner.cpp6
-rw-r--r--lib/CodeGen/GlobalISel/IRTranslator.cpp2
-rw-r--r--lib/CodeGen/GlobalISel/InstructionSelect.cpp10
-rw-r--r--lib/CodeGen/GlobalISel/Legalizer.cpp10
-rw-r--r--lib/CodeGen/GlobalISel/LegalizerHelper.cpp18
-rw-r--r--lib/CodeGen/GlobalISel/LegalizerInfo.cpp33
-rw-r--r--lib/CodeGen/GlobalISel/Localizer.cpp18
-rw-r--r--lib/CodeGen/GlobalISel/RegBankSelect.cpp44
-rw-r--r--lib/CodeGen/GlobalISel/RegisterBankInfo.cpp22
-rw-r--r--lib/CodeGen/GlobalISel/Utils.cpp2
-rw-r--r--lib/CodeGen/GlobalMerge.cpp4
-rw-r--r--lib/CodeGen/IfConversion.cpp53
-rw-r--r--lib/CodeGen/InlineSpiller.cpp83
-rw-r--r--lib/CodeGen/InterleavedAccessPass.cpp6
-rw-r--r--lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp12
-rw-r--r--lib/CodeGen/LiveDebugValues.cpp42
-rw-r--r--lib/CodeGen/LiveDebugVariables.cpp46
-rw-r--r--lib/CodeGen/LiveIntervals.cpp45
-rw-r--r--lib/CodeGen/LiveRangeEdit.cpp16
-rw-r--r--lib/CodeGen/LiveRangeShrink.cpp2
-rw-r--r--lib/CodeGen/LiveRegMatrix.cpp36
-rw-r--r--lib/CodeGen/LocalStackSlotAllocation.cpp17
-rw-r--r--lib/CodeGen/MIRCanonicalizerPass.cpp89
-rw-r--r--lib/CodeGen/MachineBasicBlock.cpp14
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp161
-rw-r--r--lib/CodeGen/MachineCSE.cpp17
-rw-r--r--lib/CodeGen/MachineCombiner.cpp51
-rw-r--r--lib/CodeGen/MachineCopyPropagation.cpp34
-rw-r--r--lib/CodeGen/MachineFrameInfo.cpp6
-rw-r--r--lib/CodeGen/MachineLICM.cpp45
-rw-r--r--lib/CodeGen/MachineOutliner.cpp17
-rw-r--r--lib/CodeGen/MachinePipeliner.cpp80
-rw-r--r--lib/CodeGen/MachineRegionInfo.cpp2
-rw-r--r--lib/CodeGen/MachineSSAUpdater.cpp2
-rw-r--r--lib/CodeGen/MachineScheduler.cpp362
-rw-r--r--lib/CodeGen/MachineSink.cpp34
-rw-r--r--lib/CodeGen/MachineTraceMetrics.cpp54
-rw-r--r--lib/CodeGen/MacroFusion.cpp20
-rw-r--r--lib/CodeGen/PHIElimination.cpp23
-rw-r--r--lib/CodeGen/PeepholeOptimizer.cpp49
-rw-r--r--lib/CodeGen/PostRASchedulerList.cpp44
-rw-r--r--lib/CodeGen/ProcessImplicitDefs.cpp16
-rw-r--r--lib/CodeGen/PrologEpilogInserter.cpp24
-rw-r--r--lib/CodeGen/ReachingDefAnalysis.cpp14
-rw-r--r--lib/CodeGen/RegAllocBase.cpp12
-rw-r--r--lib/CodeGen/RegAllocBasic.cpp13
-rw-r--r--lib/CodeGen/RegAllocFast.cpp95
-rw-r--r--lib/CodeGen/RegAllocGreedy.cpp196
-rw-r--r--lib/CodeGen/RegAllocPBQP.cpp22
-rw-r--r--lib/CodeGen/RegUsageInfoCollector.cpp16
-rw-r--r--lib/CodeGen/RegUsageInfoPropagate.cpp25
-rw-r--r--lib/CodeGen/RegisterClassInfo.cpp2
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp196
-rw-r--r--lib/CodeGen/RegisterScavenging.cpp23
-rw-r--r--lib/CodeGen/RenameIndependentSubregs.cpp14
-rw-r--r--lib/CodeGen/ResetMachineFunctionPass.cpp2
-rw-r--r--lib/CodeGen/SafeStack.cpp48
-rw-r--r--lib/CodeGen/SafeStackColoring.cpp16
-rw-r--r--lib/CodeGen/SafeStackLayout.cpp28
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp41
-rw-r--r--lib/CodeGen/ScoreboardHazardRecognizer.cpp16
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp84
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp13
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp77
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp16
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp17
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp17
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp32
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp36
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp142
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp10
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp84
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp185
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp2
-rw-r--r--lib/CodeGen/ShrinkWrap.cpp52
-rw-r--r--lib/CodeGen/SjLjEHPrepare.cpp4
-rw-r--r--lib/CodeGen/SlotIndexes.cpp8
-rw-r--r--lib/CodeGen/SplitKit.cpp170
-rw-r--r--lib/CodeGen/StackColoring.cpp72
-rw-r--r--lib/CodeGen/StackMapLivenessAnalysis.cpp8
-rw-r--r--lib/CodeGen/StackMaps.cpp22
-rw-r--r--lib/CodeGen/StackSlotColoring.cpp29
-rw-r--r--lib/CodeGen/TailDuplicator.cpp20
-rw-r--r--lib/CodeGen/TargetRegisterInfo.cpp3
-rw-r--r--lib/CodeGen/TwoAddressInstructionPass.cpp47
-rw-r--r--lib/CodeGen/VirtRegMap.cpp17
-rw-r--r--lib/CodeGen/WinEHPrepare.cpp31
-rw-r--r--lib/ExecutionEngine/ExecutionEngine.cpp13
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp10
-rw-r--r--lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp24
-rw-r--r--lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp9
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp84
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp7
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp87
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp16
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h45
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h63
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h13
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp44
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h12
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h12
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h2
-rw-r--r--lib/IR/ConstantsContext.h4
-rw-r--r--lib/IR/Core.cpp19
-rw-r--r--lib/IR/Pass.cpp8
-rw-r--r--lib/IR/SafepointIRVerifier.cpp21
-rw-r--r--lib/IR/ValueSymbolTable.cpp7
-rw-r--r--lib/LTO/ThinLTOCodeGenerator.cpp6
-rw-r--r--lib/MC/MachObjectWriter.cpp11
-rw-r--r--lib/MC/WasmObjectWriter.cpp59
-rw-r--r--lib/Object/WasmObjectFile.cpp4
-rw-r--r--lib/ProfileData/Coverage/CoverageMapping.cpp17
-rw-r--r--lib/ProfileData/Coverage/CoverageMappingReader.cpp2
-rw-r--r--lib/Support/APInt.cpp72
-rw-r--r--lib/Support/CachePruning.cpp28
-rw-r--r--lib/Support/CommandLine.cpp6
-rw-r--r--lib/Support/DAGDeltaAlgorithm.cpp124
-rw-r--r--lib/Support/Debug.cpp7
-rw-r--r--lib/Support/RandomNumberGenerator.cpp6
-rw-r--r--lib/Target/AArch64/AArch64A53Fix835769.cpp25
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp61
-rw-r--r--lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp6
-rw-r--r--lib/Target/AArch64/AArch64CollectLOH.cpp40
-rw-r--r--lib/Target/AArch64/AArch64CondBrTuning.cpp29
-rw-r--r--lib/Target/AArch64/AArch64ConditionOptimizer.cpp30
-rw-r--r--lib/Target/AArch64/AArch64ConditionalCompares.cpp91
-rw-r--r--lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp25
-rw-r--r--lib/Target/AArch64/AArch64FalkorHWPFFix.cpp14
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp43
-rw-r--r--lib/Target/AArch64/AArch64ISelDAGToDAG.cpp18
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp133
-rw-r--r--lib/Target/AArch64/AArch64InstructionSelector.cpp107
-rw-r--r--lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp79
-rw-r--r--lib/Target/AArch64/AArch64PBQPRegAlloc.cpp24
-rw-r--r--lib/Target/AArch64/AArch64PromoteConstant.cpp64
-rw-r--r--lib/Target/AArch64/AArch64RedundantCopyElimination.cpp8
-rw-r--r--lib/Target/AArch64/AArch64StorePairSuppress.cpp12
-rw-r--r--lib/Target/AArch64/AArch64TargetTransformInfo.cpp8
-rw-r--r--lib/Target/AMDGPU/AMDGPUInline.cpp4
-rw-r--r--lib/Target/AMDGPU/AMDGPULibCalls.cpp81
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp392
-rw-r--r--lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp6
-rw-r--r--lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp47
-rw-r--r--lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp11
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp10
-rw-r--r--lib/Target/AMDGPU/AMDILCFGStructurizer.cpp247
-rw-r--r--lib/Target/AMDGPU/GCNILPSched.cpp27
-rw-r--r--lib/Target/AMDGPU/GCNIterativeScheduler.cpp65
-rw-r--r--lib/Target/AMDGPU/GCNMinRegStrategy.cpp51
-rw-r--r--lib/Target/AMDGPU/GCNSchedStrategy.cpp77
-rw-r--r--lib/Target/AMDGPU/R600ClauseMergePass.cpp6
-rw-r--r--lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp15
-rw-r--r--lib/Target/AMDGPU/R600MachineScheduler.cpp37
-rw-r--r--lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp16
-rw-r--r--lib/Target/AMDGPU/R600Packetizer.cpp6
-rw-r--r--lib/Target/AMDGPU/SIAnnotateControlFlow.cpp2
-rw-r--r--lib/Target/AMDGPU/SIFixSGPRCopies.cpp32
-rw-r--r--lib/Target/AMDGPU/SIFixVGPRCopies.cpp2
-rw-r--r--lib/Target/AMDGPU/SIFoldOperands.cpp16
-rw-r--r--lib/Target/AMDGPU/SIInsertWaitcnts.cpp44
-rw-r--r--lib/Target/AMDGPU/SILoadStoreOptimizer.cpp6
-rw-r--r--lib/Target/AMDGPU/SIMachineScheduler.cpp99
-rw-r--r--lib/Target/AMDGPU/SIOptimizeExecMasking.cpp23
-rw-r--r--lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp7
-rw-r--r--lib/Target/AMDGPU/SIPeepholeSDWA.cpp8
-rw-r--r--lib/Target/AMDGPU/SIShrinkInstructions.cpp6
-rw-r--r--lib/Target/AMDGPU/SIWholeQuadMode.cpp5
-rw-r--r--lib/Target/ARC/ARCBranchFinalize.cpp16
-rw-r--r--lib/Target/ARC/ARCFrameLowering.cpp62
-rw-r--r--lib/Target/ARC/ARCISelLowering.cpp4
-rw-r--r--lib/Target/ARC/ARCInstrInfo.cpp8
-rw-r--r--lib/Target/ARC/ARCRegisterInfo.cpp32
-rw-r--r--lib/Target/ARC/Disassembler/ARCDisassembler.cpp24
-rw-r--r--lib/Target/ARM/A15SDOptimizer.cpp19
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp4
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp8
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp111
-rw-r--r--lib/Target/ARM/ARMFrameLowering.cpp68
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp17
-rw-r--r--lib/Target/ARM/ARMInstructionSelector.cpp45
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp4
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp8
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp9
-rw-r--r--lib/Target/ARM/MLxExpansionPass.cpp22
-rw-r--r--lib/Target/ARM/Thumb2SizeReduction.cpp12
-rw-r--r--lib/Target/AVR/AVRISelDAGToDAG.cpp2
-rw-r--r--lib/Target/AVR/AsmParser/AVRAsmParser.cpp4
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp31
-rw-r--r--lib/Target/BPF/BPFMIPeephole.cpp29
-rw-r--r--lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp18
-rw-r--r--lib/Target/Hexagon/HexagonBitSimplify.cpp13
-rw-r--r--lib/Target/Hexagon/HexagonBlockRanges.cpp10
-rw-r--r--lib/Target/Hexagon/HexagonBranchRelaxation.cpp10
-rw-r--r--lib/Target/Hexagon/HexagonCommonGEP.cpp87
-rw-r--r--lib/Target/Hexagon/HexagonConstExtenders.cpp26
-rw-r--r--lib/Target/Hexagon/HexagonConstPropagation.cpp55
-rw-r--r--lib/Target/Hexagon/HexagonCopyToCombine.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonEarlyIfConv.cpp40
-rw-r--r--lib/Target/Hexagon/HexagonExpandCondsets.cpp44
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp55
-rw-r--r--lib/Target/Hexagon/HexagonGenPredicate.cpp18
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp21
-rw-r--r--lib/Target/Hexagon/HexagonHazardRecognizer.cpp14
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp50
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp10
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp42
-rw-r--r--lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp26
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp144
-rw-r--r--lib/Target/Hexagon/HexagonNewValueJump.cpp13
-rw-r--r--lib/Target/Hexagon/HexagonOptAddrMode.cpp68
-rw-r--r--lib/Target/Hexagon/HexagonSplitDouble.cpp30
-rw-r--r--lib/Target/Hexagon/HexagonStoreWidening.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonTargetObjectFile.cpp26
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp19
-rw-r--r--lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp39
-rw-r--r--lib/Target/Hexagon/HexagonVectorPrint.cpp17
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp33
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp23
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp28
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp30
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp13
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp6
-rw-r--r--lib/Target/Lanai/LanaiISelDAGToDAG.cpp2
-rw-r--r--lib/Target/Lanai/LanaiISelLowering.cpp4
-rw-r--r--lib/Target/MSP430/MSP430BranchSelector.cpp12
-rw-r--r--lib/Target/MSP430/MSP430ISelDAGToDAG.cpp6
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp28
-rw-r--r--lib/Target/Mips/Disassembler/MipsDisassembler.cpp34
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp14
-rw-r--r--lib/Target/Mips/MicroMipsSizeReduction.cpp6
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp6
-rw-r--r--lib/Target/Mips/Mips16RegisterInfo.cpp4
-rw-r--r--lib/Target/Mips/MipsConstantIslandPass.cpp96
-rw-r--r--lib/Target/Mips/MipsFastISel.cpp53
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.cpp2
-rw-r--r--lib/Target/Mips/MipsInstructionSelector.cpp4
-rw-r--r--lib/Target/Mips/MipsModuleISelDAGToDAG.cpp2
-rw-r--r--lib/Target/Mips/MipsOs16.cpp16
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp16
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.cpp8
-rw-r--r--lib/Target/Mips/MipsSERegisterInfo.cpp3
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp3
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp6
-rw-r--r--lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp12
-rw-r--r--lib/Target/NVPTX/NVVMReflect.cpp2
-rw-r--r--lib/Target/Nios2/Nios2ISelDAGToDAG.cpp2
-rw-r--r--lib/Target/PowerPC/PPCAsmPrinter.cpp18
-rw-r--r--lib/Target/PowerPC/PPCBranchCoalescing.cpp117
-rw-r--r--lib/Target/PowerPC/PPCCTRLoops.cpp28
-rw-r--r--lib/Target/PowerPC/PPCExpandISEL.cpp46
-rw-r--r--lib/Target/PowerPC/PPCHazardRecognizers.cpp8
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp146
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp18
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp21
-rw-r--r--lib/Target/PowerPC/PPCLoopPreIncPrep.cpp8
-rw-r--r--lib/Target/PowerPC/PPCMIPeephole.cpp120
-rw-r--r--lib/Target/PowerPC/PPCPreEmitPeephole.cpp8
-rw-r--r--lib/Target/PowerPC/PPCReduceCRLogicals.cpp39
-rw-r--r--lib/Target/PowerPC/PPCTLSDynamicCall.cpp2
-rw-r--r--lib/Target/PowerPC/PPCVSXFMAMutate.cpp12
-rw-r--r--lib/Target/PowerPC/PPCVSXSwapRemoval.cpp133
-rw-r--r--lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp7
-rw-r--r--lib/Target/RISCV/RISCVISelDAGToDAG.cpp27
-rw-r--r--lib/Target/RISCV/RISCVISelLowering.cpp8
-rw-r--r--lib/Target/SystemZ/SystemZHazardRecognizer.cpp35
-rw-r--r--lib/Target/SystemZ/SystemZISelDAGToDAG.cpp14
-rw-r--r--lib/Target/SystemZ/SystemZMachineScheduler.cpp46
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp3
-rw-r--r--lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp4
-rw-r--r--lib/Target/WebAssembly/WebAssemblyCFGSort.cpp6
-rw-r--r--lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp6
-rw-r--r--lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp10
-rw-r--r--lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp6
-rw-r--r--lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp13
-rw-r--r--lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp6
-rw-r--r--lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp6
-rw-r--r--lib/Target/WebAssembly/WebAssemblyPeephole.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRegColoring.cpp16
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp16
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRegStackify.cpp24
-rw-r--r--lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyStoreResults.cpp8
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.cpp2
-rw-r--r--lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp16
-rw-r--r--lib/Target/X86/X86CmovConversion.cpp12
-rw-r--r--lib/Target/X86/X86DomainReassignment.cpp10
-rw-r--r--lib/Target/X86/X86FixupBWInsts.cpp4
-rw-r--r--lib/Target/X86/X86FixupLEAs.cpp36
-rw-r--r--lib/Target/X86/X86FlagsCopyLowering.cpp28
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp57
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp25
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp4
-rw-r--r--lib/Target/X86/X86InstructionSelector.cpp40
-rw-r--r--lib/Target/X86/X86OptimizeLEAs.cpp7
-rw-r--r--lib/Target/X86/X86RetpolineThunks.cpp2
-rw-r--r--lib/Target/X86/X86Subtarget.cpp6
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp8
-rw-r--r--lib/Target/X86/X86WinEHState.cpp12
-rw-r--r--lib/Target/XCore/XCoreRegisterInfo.cpp18
-rw-r--r--lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp7
-rw-r--r--lib/Transforms/Coroutines/CoroFrame.cpp30
-rw-r--r--lib/Transforms/Coroutines/CoroSplit.cpp6
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp32
-rw-r--r--lib/Transforms/IPO/BlockExtractor.cpp9
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp40
-rw-r--r--lib/Transforms/IPO/ForceFunctionAttrs.cpp4
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp12
-rw-r--r--lib/Transforms/IPO/FunctionImport.cpp120
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp37
-rw-r--r--lib/Transforms/IPO/Inliner.cpp56
-rw-r--r--lib/Transforms/IPO/Internalize.cpp6
-rw-r--r--lib/Transforms/IPO/LowerTypeTests.cpp2
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp168
-rw-r--r--lib/Transforms/IPO/SampleProfile.cpp69
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp18
-rw-r--r--lib/Transforms/InstCombine/InstCombineInternal.h6
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp11
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp41
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp28
-rw-r--r--lib/Transforms/Instrumentation/BoundsChecking.cpp4
-rw-r--r--lib/Transforms/Instrumentation/CFGMST.h14
-rw-r--r--lib/Transforms/Instrumentation/GCOVProfiling.cpp8
-rw-r--r--lib/Transforms/Instrumentation/HWAddressSanitizer.cpp6
-rw-r--r--lib/Transforms/Instrumentation/IndirectCallPromotion.cpp28
-rw-r--r--lib/Transforms/Instrumentation/InstrProfiling.cpp4
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp66
-rw-r--r--lib/Transforms/Instrumentation/PGOInstrumentation.cpp58
-rw-r--r--lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp35
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp2
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCAPElim.cpp10
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCContract.cpp38
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCExpand.cpp13
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCOpts.cpp156
-rw-r--r--lib/Transforms/ObjCARC/PtrState.cpp40
-rw-r--r--lib/Transforms/Scalar/ADCE.cpp25
-rw-r--r--lib/Transforms/Scalar/AlignmentFromAssumptions.cpp36
-rw-r--r--lib/Transforms/Scalar/BDCE.cpp2
-rw-r--r--lib/Transforms/Scalar/CallSiteSplitting.cpp6
-rw-r--r--lib/Transforms/Scalar/ConstantHoisting.cpp77
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp2
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp79
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp54
-rw-r--r--lib/Transforms/Scalar/Float2Int.cpp13
-rw-r--r--lib/Transforms/Scalar/GVN.cpp116
-rw-r--r--lib/Transforms/Scalar/GVNHoist.cpp14
-rw-r--r--lib/Transforms/Scalar/GVNSink.cpp27
-rw-r--r--lib/Transforms/Scalar/GuardWidening.cpp13
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp51
-rw-r--r--lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp66
-rw-r--r--lib/Transforms/Scalar/InferAddressSpaces.cpp12
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp79
-rw-r--r--lib/Transforms/Scalar/LICM.cpp15
-rw-r--r--lib/Transforms/Scalar/LoopDataPrefetch.cpp10
-rw-r--r--lib/Transforms/Scalar/LoopDeletion.cpp25
-rw-r--r--lib/Transforms/Scalar/LoopDistribute.cpp39
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp24
-rw-r--r--lib/Transforms/Scalar/LoopInterchange.cpp154
-rw-r--r--lib/Transforms/Scalar/LoopLoadElimination.cpp37
-rw-r--r--lib/Transforms/Scalar/LoopPredication.cpp86
-rw-r--r--lib/Transforms/Scalar/LoopRerollPass.cpp119
-rw-r--r--lib/Transforms/Scalar/LoopSink.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp225
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp68
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp52
-rw-r--r--lib/Transforms/Scalar/LoopVersioningLICM.cpp72
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp26
-rw-r--r--lib/Transforms/Scalar/MergeICmps.cpp101
-rw-r--r--lib/Transforms/Scalar/MergedLoadStoreMotion.cpp12
-rw-r--r--lib/Transforms/Scalar/NewGVN.cpp236
-rw-r--r--lib/Transforms/Scalar/PlaceSafepoints.cpp8
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp57
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp21
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp64
-rw-r--r--lib/Transforms/Scalar/SROA.cpp188
-rw-r--r--lib/Transforms/Scalar/SimpleLoopUnswitch.cpp37
-rw-r--r--lib/Transforms/Scalar/Sink.cpp10
-rw-r--r--lib/Transforms/Scalar/SpeculateAroundPHIs.cpp57
-rw-r--r--lib/Transforms/Scalar/SpeculativeExecution.cpp4
-rw-r--r--lib/Transforms/Scalar/StructurizeCFG.cpp15
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp6
-rw-r--r--lib/Transforms/Utils/AddDiscriminators.cpp6
-rw-r--r--lib/Transforms/Utils/CodeExtractor.cpp21
-rw-r--r--lib/Transforms/Utils/CtorUtils.cpp2
-rw-r--r--lib/Transforms/Utils/Evaluator.cpp134
-rw-r--r--lib/Transforms/Utils/FlattenCFG.cpp4
-rw-r--r--lib/Transforms/Utils/FunctionComparator.cpp2
-rw-r--r--lib/Transforms/Utils/LibCallsShrinkWrap.cpp22
-rw-r--r--lib/Transforms/Utils/Local.cpp32
-rw-r--r--lib/Transforms/Utils/LoopRotationUtils.cpp21
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp23
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp54
-rw-r--r--lib/Transforms/Utils/LoopUnrollPeel.cpp25
-rw-r--r--lib/Transforms/Utils/LoopUnrollRuntime.cpp30
-rw-r--r--lib/Transforms/Utils/LoopUtils.cpp35
-rw-r--r--lib/Transforms/Utils/LowerSwitch.cpp37
-rw-r--r--lib/Transforms/Utils/PredicateInfo.cpp19
-rw-r--r--lib/Transforms/Utils/SSAUpdater.cpp2
-rw-r--r--lib/Transforms/Utils/SSAUpdaterBulk.cpp20
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp82
-rw-r--r--lib/Transforms/Utils/SimplifyIndVar.cpp27
-rw-r--r--lib/Transforms/Utils/SplitModule.cpp12
-rw-r--r--lib/Transforms/Utils/VNCoercion.cpp4
-rw-r--r--lib/Transforms/Vectorize/LoadStoreVectorizer.cpp32
-rw-r--r--lib/Transforms/Vectorize/LoopVectorizationLegality.cpp84
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp253
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp277
-rw-r--r--lib/Transforms/Vectorize/VPlan.cpp14
-rw-r--r--tools/bugpoint/ExtractFunction.cpp6
-rw-r--r--tools/bugpoint/OptimizerDriver.cpp8
-rw-r--r--tools/bugpoint/ToolRunner.cpp53
-rw-r--r--tools/lli/lli.cpp4
-rw-r--r--tools/llvm-dwarfdump/Statistics.cpp15
-rw-r--r--tools/llvm-mca/Backend.cpp8
-rw-r--r--tools/llvm-mca/Dispatch.cpp13
-rw-r--r--tools/llvm-mca/InstrBuilder.cpp22
-rw-r--r--tools/llvm-mca/LSUnit.cpp16
-rw-r--r--tools/llvm-mca/Scheduler.cpp19
-rw-r--r--tools/verify-uselistorder/verify-uselistorder.cpp42
-rw-r--r--unittests/IR/CFGBuilder.cpp12
-rw-r--r--unittests/IR/DominatorTreeBatchUpdatesTest.cpp12
-rw-r--r--utils/TableGen/AsmMatcherEmitter.cpp4
-rw-r--r--utils/TableGen/AsmWriterEmitter.cpp4
-rw-r--r--utils/TableGen/CodeGenDAGPatterns.cpp48
-rw-r--r--utils/TableGen/CodeGenRegisters.cpp89
-rw-r--r--utils/TableGen/CodeGenSchedule.cpp161
-rw-r--r--utils/TableGen/DAGISelEmitter.cpp17
-rw-r--r--utils/TableGen/DAGISelMatcherOpt.cpp15
-rw-r--r--utils/TableGen/DFAPacketizerEmitter.cpp143
-rw-r--r--utils/TableGen/FixedLenDecoderEmitter.cpp72
-rw-r--r--utils/TableGen/GlobalISelEmitter.cpp4
-rw-r--r--utils/TableGen/PseudoLoweringEmitter.cpp9
-rw-r--r--utils/TableGen/RISCVCompressInstEmitter.cpp34
-rw-r--r--utils/TableGen/RegisterBankEmitter.cpp6
-rw-r--r--utils/TableGen/SubtargetEmitter.cpp13
502 files changed, 9320 insertions, 8807 deletions
diff --git a/docs/Bugpoint.rst b/docs/Bugpoint.rst
index 27732e0fffbd..f3bb54cffb54 100644
--- a/docs/Bugpoint.rst
+++ b/docs/Bugpoint.rst
@@ -198,14 +198,14 @@ desired ranges. For example:
static int calledCount = 0;
calledCount++;
- DEBUG(if (calledCount < 212) return false);
- DEBUG(if (calledCount > 217) return false);
- DEBUG(if (calledCount == 213) return false);
- DEBUG(if (calledCount == 214) return false);
- DEBUG(if (calledCount == 215) return false);
- DEBUG(if (calledCount == 216) return false);
- DEBUG(dbgs() << "visitXOR calledCount: " << calledCount << "\n");
- DEBUG(dbgs() << "I: "; I->dump());
+ LLVM_DEBUG(if (calledCount < 212) return false);
+ LLVM_DEBUG(if (calledCount > 217) return false);
+ LLVM_DEBUG(if (calledCount == 213) return false);
+ LLVM_DEBUG(if (calledCount == 214) return false);
+ LLVM_DEBUG(if (calledCount == 215) return false);
+ LLVM_DEBUG(if (calledCount == 216) return false);
+ LLVM_DEBUG(dbgs() << "visitXOR calledCount: " << calledCount << "\n");
+ LLVM_DEBUG(dbgs() << "I: "; I->dump());
could be added to ``visitXOR`` to limit ``visitXor`` to being applied only to
calls 212 and 217. This is from an actual test case and raises an important
diff --git a/docs/CommandGuide/opt.rst b/docs/CommandGuide/opt.rst
index 7b9255d26423..2b2fffa063a0 100644
--- a/docs/CommandGuide/opt.rst
+++ b/docs/CommandGuide/opt.rst
@@ -96,7 +96,7 @@ OPTIONS
.. option:: -debug
If this is a debug build, this option will enable debug printouts from passes
- which use the ``DEBUG()`` macro. See the `LLVM Programmer's Manual
+ which use the ``LLVM_DEBUG()`` macro. See the `LLVM Programmer's Manual
<../ProgrammersManual.html>`_, section ``#DEBUG`` for more information.
.. option:: -load=<plugin>
diff --git a/docs/CommandLine.rst b/docs/CommandLine.rst
index 9496157d4346..9a6a196b431c 100644
--- a/docs/CommandLine.rst
+++ b/docs/CommandLine.rst
@@ -886,12 +886,12 @@ To do this, set up your .h file with your option, like this for example:
// debug build, then the code specified as the option to the macro will be
// executed. Otherwise it will not be.
#ifdef NDEBUG
- #define DEBUG(X)
+ #define LLVM_DEBUG(X)
#else
- #define DEBUG(X) do { if (DebugFlag) { X; } } while (0)
+ #define LLVM_DEBUG(X) do { if (DebugFlag) { X; } } while (0)
#endif
-This allows clients to blissfully use the ``DEBUG()`` macro, or the
+This allows clients to blissfully use the ``LLVM_DEBUG()`` macro, or the
``DebugFlag`` explicitly if they want to. Now we just need to be able to set
the ``DebugFlag`` boolean when the option is set. To do this, we pass an
additional argument to our command line argument processor, and we specify where
diff --git a/docs/ProgrammersManual.rst b/docs/ProgrammersManual.rst
index 5e510fb7e7b7..aef0d207f07d 100644
--- a/docs/ProgrammersManual.rst
+++ b/docs/ProgrammersManual.rst
@@ -1020,7 +1020,7 @@ be passed by value.
.. _DEBUG:
-The ``DEBUG()`` macro and ``-debug`` option
+The ``LLVM_DEBUG()`` macro and ``-debug`` option
-------------------------------------------
Often when working on your pass you will put a bunch of debugging printouts and
@@ -1033,14 +1033,14 @@ them out, allowing you to enable them if you need them in the future.
The ``llvm/Support/Debug.h`` (`doxygen
<http://llvm.org/doxygen/Debug_8h_source.html>`__) file provides a macro named
-``DEBUG()`` that is a much nicer solution to this problem. Basically, you can
-put arbitrary code into the argument of the ``DEBUG`` macro, and it is only
+``LLVM_DEBUG()`` that is a much nicer solution to this problem. Basically, you can
+put arbitrary code into the argument of the ``LLVM_DEBUG`` macro, and it is only
executed if '``opt``' (or any other tool) is run with the '``-debug``' command
line argument:
.. code-block:: c++
- DEBUG(dbgs() << "I am here!\n");
+ LLVM_DEBUG(dbgs() << "I am here!\n");
Then you can run your pass like this:
@@ -1051,13 +1051,13 @@ Then you can run your pass like this:
$ opt < a.bc > /dev/null -mypass -debug
I am here!
-Using the ``DEBUG()`` macro instead of a home-brewed solution allows you to not
+Using the ``LLVM_DEBUG()`` macro instead of a home-brewed solution allows you to not
have to create "yet another" command line option for the debug output for your
-pass. Note that ``DEBUG()`` macros are disabled for non-asserts builds, so they
+pass. Note that ``LLVM_DEBUG()`` macros are disabled for non-asserts builds, so they
do not cause a performance impact at all (for the same reason, they should also
not contain side-effects!).
-One additional nice thing about the ``DEBUG()`` macro is that you can enable or
+One additional nice thing about the ``LLVM_DEBUG()`` macro is that you can enable or
disable it directly in gdb. Just use "``set DebugFlag=0``" or "``set
DebugFlag=1``" from the gdb if the program is running. If the program hasn't
been started yet, you can always just run it with ``-debug``.
@@ -1076,10 +1076,10 @@ follows:
.. code-block:: c++
#define DEBUG_TYPE "foo"
- DEBUG(dbgs() << "'foo' debug type\n");
+ LLVM_DEBUG(dbgs() << "'foo' debug type\n");
#undef DEBUG_TYPE
#define DEBUG_TYPE "bar"
- DEBUG(dbgs() << "'bar' debug type\n");
+ LLVM_DEBUG(dbgs() << "'bar' debug type\n");
#undef DEBUG_TYPE
Then you can run your pass like this:
diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index 1823654c0e07..25b2efd33c98 100644
--- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -1030,8 +1030,9 @@ void BlockFrequencyInfoImpl<BT>::calculate(const FunctionT &F,
Nodes.clear();
// Initialize.
- DEBUG(dbgs() << "\nblock-frequency: " << F.getName() << "\n================="
- << std::string(F.getName().size(), '=') << "\n");
+ LLVM_DEBUG(dbgs() << "\nblock-frequency: " << F.getName()
+ << "\n================="
+ << std::string(F.getName().size(), '=') << "\n");
initializeRPOT();
initializeLoops();
@@ -1067,10 +1068,11 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
"More nodes in function than Block Frequency Info supports");
- DEBUG(dbgs() << "reverse-post-order-traversal\n");
+ LLVM_DEBUG(dbgs() << "reverse-post-order-traversal\n");
for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
BlockNode Node = getNode(I);
- DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
+ LLVM_DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node)
+ << "\n");
Nodes[*I] = Node;
}
@@ -1081,7 +1083,7 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
}
template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
- DEBUG(dbgs() << "loop-detection\n");
+ LLVM_DEBUG(dbgs() << "loop-detection\n");
if (LI->empty())
return;
@@ -1099,7 +1101,7 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
Loops.emplace_back(Parent, Header);
Working[Header.Index].Loop = &Loops.back();
- DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
+ LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
for (const LoopT *L : *Loop)
Q.emplace_back(L, &Loops.back());
@@ -1128,8 +1130,8 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
Working[Index].Loop = HeaderData.Loop;
HeaderData.Loop->Nodes.push_back(Index);
- DEBUG(dbgs() << " - loop = " << getBlockName(Header)
- << ": member = " << getBlockName(Index) << "\n");
+ LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header)
+ << ": member = " << getBlockName(Index) << "\n");
}
}
@@ -1150,10 +1152,10 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
template <class BT>
bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
// Compute mass in loop.
- DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
+ LLVM_DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
if (Loop.isIrreducible()) {
- DEBUG(dbgs() << "isIrreducible = true\n");
+ LLVM_DEBUG(dbgs() << "isIrreducible = true\n");
Distribution Dist;
unsigned NumHeadersWithWeight = 0;
Optional<uint64_t> MinHeaderWeight;
@@ -1165,14 +1167,14 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
IsIrrLoopHeader.set(Loop.Nodes[H].Index);
Optional<uint64_t> HeaderWeight = Block->getIrrLoopHeaderWeight();
if (!HeaderWeight) {
- DEBUG(dbgs() << "Missing irr loop header metadata on "
- << getBlockName(HeaderNode) << "\n");
+ LLVM_DEBUG(dbgs() << "Missing irr loop header metadata on "
+ << getBlockName(HeaderNode) << "\n");
HeadersWithoutWeight.insert(H);
continue;
}
- DEBUG(dbgs() << getBlockName(HeaderNode)
- << " has irr loop header weight " << HeaderWeight.getValue()
- << "\n");
+ LLVM_DEBUG(dbgs() << getBlockName(HeaderNode)
+ << " has irr loop header weight "
+ << HeaderWeight.getValue() << "\n");
NumHeadersWithWeight++;
uint64_t HeaderWeightValue = HeaderWeight.getValue();
if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight)
@@ -1194,8 +1196,8 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
assert(!getBlock(HeaderNode)->getIrrLoopHeaderWeight() &&
"Shouldn't have a weight metadata");
uint64_t MinWeight = MinHeaderWeight.getValue();
- DEBUG(dbgs() << "Giving weight " << MinWeight
- << " to " << getBlockName(HeaderNode) << "\n");
+ LLVM_DEBUG(dbgs() << "Giving weight " << MinWeight << " to "
+ << getBlockName(HeaderNode) << "\n");
if (MinWeight)
Dist.addLocal(HeaderNode, MinWeight);
}
@@ -1224,7 +1226,7 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
template <class BT>
bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
// Compute mass in function.
- DEBUG(dbgs() << "compute-mass-in-function\n");
+ LLVM_DEBUG(dbgs() << "compute-mass-in-function\n");
assert(!Working.empty() && "no blocks in function");
assert(!Working[0].isLoopHeader() && "entry block is a loop header");
@@ -1276,9 +1278,10 @@ template <class BT> struct BlockEdgesAdder {
template <class BT>
void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
- DEBUG(dbgs() << "analyze-irreducible-in-";
- if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
- else dbgs() << "function\n");
+ LLVM_DEBUG(dbgs() << "analyze-irreducible-in-";
+ if (OuterLoop) dbgs()
+ << "loop: " << getLoopName(*OuterLoop) << "\n";
+ else dbgs() << "function\n");
using namespace bfi_detail;
@@ -1304,7 +1307,7 @@ template <class BT>
bool
BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
const BlockNode &Node) {
- DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
+ LLVM_DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
// Calculate probability for successors.
Distribution Dist;
if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
diff --git a/include/llvm/Analysis/CGSCCPassManager.h b/include/llvm/Analysis/CGSCCPassManager.h
index f04d67d598ba..5e83ea2a6e2b 100644
--- a/include/llvm/Analysis/CGSCCPassManager.h
+++ b/include/llvm/Analysis/CGSCCPassManager.h
@@ -387,15 +387,15 @@ public:
do {
LazyCallGraph::RefSCC *RC = RCWorklist.pop_back_val();
if (InvalidRefSCCSet.count(RC)) {
- DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
+ LLVM_DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
continue;
}
assert(CWorklist.empty() &&
"Should always start with an empty SCC worklist");
- DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
- << "\n");
+ LLVM_DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
+ << "\n");
// Push the initial SCCs in reverse post-order as we'll pop off the
// back and so see this in post-order.
@@ -409,12 +409,13 @@ public:
// other RefSCCs should be queued above, so we just need to skip both
// scenarios here.
if (InvalidSCCSet.count(C)) {
- DEBUG(dbgs() << "Skipping an invalid SCC...\n");
+ LLVM_DEBUG(dbgs() << "Skipping an invalid SCC...\n");
continue;
}
if (&C->getOuterRefSCC() != RC) {
- DEBUG(dbgs() << "Skipping an SCC that is now part of some other "
- "RefSCC...\n");
+ LLVM_DEBUG(dbgs()
+ << "Skipping an SCC that is now part of some other "
+ "RefSCC...\n");
continue;
}
@@ -436,7 +437,8 @@ public:
// If the CGSCC pass wasn't able to provide a valid updated SCC,
// the current SCC may simply need to be skipped if invalid.
if (UR.InvalidatedSCCs.count(C)) {
- DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
+ LLVM_DEBUG(dbgs()
+ << "Skipping invalidated root or island SCC!\n");
break;
}
// Check that we didn't miss any update scenario.
@@ -464,9 +466,10 @@ public:
// FIXME: If we ever start having RefSCC passes, we'll want to
// iterate there too.
if (UR.UpdatedC)
- DEBUG(dbgs() << "Re-running SCC passes after a refinement of the "
- "current SCC: "
- << *UR.UpdatedC << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Re-running SCC passes after a refinement of the "
+ "current SCC: "
+ << *UR.UpdatedC << "\n");
// Note that both `C` and `RC` may at this point refer to deleted,
// invalid SCC and RefSCCs respectively. But we will short circuit
@@ -601,7 +604,8 @@ public:
// a pointer we can overwrite.
LazyCallGraph::SCC *CurrentC = &C;
- DEBUG(dbgs() << "Running function passes across an SCC: " << C << "\n");
+ LLVM_DEBUG(dbgs() << "Running function passes across an SCC: " << C
+ << "\n");
PreservedAnalyses PA = PreservedAnalyses::all();
for (LazyCallGraph::Node *N : Nodes) {
@@ -757,9 +761,9 @@ public:
if (!F)
return false;
- DEBUG(dbgs() << "Found devirutalized call from "
- << CS.getParent()->getParent()->getName() << " to "
- << F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Found devirutalized call from "
+ << CS.getParent()->getParent()->getName() << " to "
+ << F->getName() << "\n");
// We now have a direct call where previously we had an indirect call,
// so iterate to process this devirtualization site.
@@ -793,16 +797,18 @@ public:
// Otherwise, if we've already hit our max, we're done.
if (Iteration >= MaxIterations) {
- DEBUG(dbgs() << "Found another devirtualization after hitting the max "
- "number of repetitions ("
- << MaxIterations << ") on SCC: " << *C << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Found another devirtualization after hitting the max "
+ "number of repetitions ("
+ << MaxIterations << ") on SCC: " << *C << "\n");
PA.intersect(std::move(PassPA));
break;
}
- DEBUG(dbgs()
- << "Repeating an SCC pass after finding a devirtualization in: "
- << *C << "\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Repeating an SCC pass after finding a devirtualization in: " << *C
+ << "\n");
// Move over the new call counts in preparation for iterating.
CallCounts = std::move(NewCallCounts);
diff --git a/include/llvm/Analysis/RegionInfoImpl.h b/include/llvm/Analysis/RegionInfoImpl.h
index 5c4cdbe20633..5904214aa925 100644
--- a/include/llvm/Analysis/RegionInfoImpl.h
+++ b/include/llvm/Analysis/RegionInfoImpl.h
@@ -675,7 +675,7 @@ typename Tr::RegionT *RegionInfoBase<Tr>::createRegion(BlockT *entry,
#ifdef EXPENSIVE_CHECKS
region->verifyRegion();
#else
- DEBUG(region->verifyRegion());
+ LLVM_DEBUG(region->verifyRegion());
#endif
updateStatistics(region);
diff --git a/include/llvm/Analysis/SparsePropagation.h b/include/llvm/Analysis/SparsePropagation.h
index 1b8df03b3a1b..b9691351b5c0 100644
--- a/include/llvm/Analysis/SparsePropagation.h
+++ b/include/llvm/Analysis/SparsePropagation.h
@@ -260,7 +260,7 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::MarkBlockExecutable(
BasicBlock *BB) {
if (!BBExecutable.insert(BB).second)
return;
- DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n");
BBWorkList.push_back(BB); // Add the block to the work list!
}
@@ -270,8 +270,8 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::markEdgeExecutable(
if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
return; // This edge is already known to be executable!
- DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() << " -> "
- << Dest->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
+ << " -> " << Dest->getName() << "\n");
if (BBExecutable.count(Dest)) {
// The destination is already executable, but we just made an edge
@@ -477,7 +477,7 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::Solve() {
Value *V = ValueWorkList.back();
ValueWorkList.pop_back();
- DEBUG(dbgs() << "\nPopped off V-WL: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "\nPopped off V-WL: " << *V << "\n");
// "V" got into the work list because it made a transition. See if any
// users are both live and in need of updating.
@@ -492,7 +492,7 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::Solve() {
BasicBlock *BB = BBWorkList.back();
BBWorkList.pop_back();
- DEBUG(dbgs() << "\nPopped off BBWL: " << *BB);
+ LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB);
// Notify all instructions in this basic block that they are newly
// executable.
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 03af771bda06..873587651efd 100644
--- a/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -38,7 +38,7 @@ public:
return false;
if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
MI.getOperand(1).getReg(), MRI)) {
- DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = DefMI->getOperand(1).getReg();
Builder.setInstr(MI);
@@ -62,7 +62,7 @@ public:
if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
return false;
- DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
Builder.setInstr(MI);
unsigned ZExtSrc = MI.getOperand(1).getReg();
LLT ZExtSrcTy = MRI.getType(ZExtSrc);
@@ -91,7 +91,7 @@ public:
isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
return false;
- DEBUG(dbgs() << ".. Combine MI: " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
Builder.setInstr(MI);
unsigned SExtSrc = MI.getOperand(1).getReg();
LLT SExtSrcTy = MRI.getType(SExtSrc);
@@ -123,7 +123,7 @@ public:
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
return false;
- DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
+ LLVM_DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
Builder.setInstr(MI);
Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
markInstAndDefDead(MI, *DefMI, DeadInsts);
diff --git a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
index 7179e5ff66fd..686d9a4aa04f 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
@@ -63,7 +63,7 @@ public:
public:
~RemoteRTDyldMemoryManager() {
Client.destroyRemoteAllocator(Id);
- DEBUG(dbgs() << "Destroyed remote allocator " << Id << "\n");
+ LLVM_DEBUG(dbgs() << "Destroyed remote allocator " << Id << "\n");
}
RemoteRTDyldMemoryManager(const RemoteRTDyldMemoryManager &) = delete;
@@ -79,9 +79,9 @@ public:
Unmapped.back().CodeAllocs.emplace_back(Size, Alignment);
uint8_t *Alloc = reinterpret_cast<uint8_t *>(
Unmapped.back().CodeAllocs.back().getLocalAddress());
- DEBUG(dbgs() << "Allocator " << Id << " allocated code for "
- << SectionName << ": " << Alloc << " (" << Size
- << " bytes, alignment " << Alignment << ")\n");
+ LLVM_DEBUG(dbgs() << "Allocator " << Id << " allocated code for "
+ << SectionName << ": " << Alloc << " (" << Size
+ << " bytes, alignment " << Alignment << ")\n");
return Alloc;
}
@@ -92,18 +92,18 @@ public:
Unmapped.back().RODataAllocs.emplace_back(Size, Alignment);
uint8_t *Alloc = reinterpret_cast<uint8_t *>(
Unmapped.back().RODataAllocs.back().getLocalAddress());
- DEBUG(dbgs() << "Allocator " << Id << " allocated ro-data for "
- << SectionName << ": " << Alloc << " (" << Size
- << " bytes, alignment " << Alignment << ")\n");
+ LLVM_DEBUG(dbgs() << "Allocator " << Id << " allocated ro-data for "
+ << SectionName << ": " << Alloc << " (" << Size
+ << " bytes, alignment " << Alignment << ")\n");
return Alloc;
} // else...
Unmapped.back().RWDataAllocs.emplace_back(Size, Alignment);
uint8_t *Alloc = reinterpret_cast<uint8_t *>(
Unmapped.back().RWDataAllocs.back().getLocalAddress());
- DEBUG(dbgs() << "Allocator " << Id << " allocated rw-data for "
- << SectionName << ": " << Alloc << " (" << Size
- << " bytes, alignment " << Alignment << ")\n");
+ LLVM_DEBUG(dbgs() << "Allocator " << Id << " allocated rw-data for "
+ << SectionName << ": " << Alloc << " (" << Size
+ << " bytes, alignment " << Alignment << ")\n");
return Alloc;
}
@@ -113,36 +113,36 @@ public:
uint32_t RWDataAlign) override {
Unmapped.push_back(ObjectAllocs());
- DEBUG(dbgs() << "Allocator " << Id << " reserved:\n");
+ LLVM_DEBUG(dbgs() << "Allocator " << Id << " reserved:\n");
if (CodeSize != 0) {
Unmapped.back().RemoteCodeAddr =
Client.reserveMem(Id, CodeSize, CodeAlign);
- DEBUG(dbgs() << " code: "
- << format("0x%016x", Unmapped.back().RemoteCodeAddr)
- << " (" << CodeSize << " bytes, alignment " << CodeAlign
- << ")\n");
+ LLVM_DEBUG(dbgs() << " code: "
+ << format("0x%016x", Unmapped.back().RemoteCodeAddr)
+ << " (" << CodeSize << " bytes, alignment "
+ << CodeAlign << ")\n");
}
if (RODataSize != 0) {
Unmapped.back().RemoteRODataAddr =
Client.reserveMem(Id, RODataSize, RODataAlign);
- DEBUG(dbgs() << " ro-data: "
- << format("0x%016x", Unmapped.back().RemoteRODataAddr)
- << " (" << RODataSize << " bytes, alignment "
- << RODataAlign << ")\n");
+ LLVM_DEBUG(dbgs() << " ro-data: "
+ << format("0x%016x", Unmapped.back().RemoteRODataAddr)
+ << " (" << RODataSize << " bytes, alignment "
+ << RODataAlign << ")\n");
}
if (RWDataSize != 0) {
Unmapped.back().RemoteRWDataAddr =
Client.reserveMem(Id, RWDataSize, RWDataAlign);
- DEBUG(dbgs() << " rw-data: "
- << format("0x%016x", Unmapped.back().RemoteRWDataAddr)
- << " (" << RWDataSize << " bytes, alignment "
- << RWDataAlign << ")\n");
+ LLVM_DEBUG(dbgs() << " rw-data: "
+ << format("0x%016x", Unmapped.back().RemoteRWDataAddr)
+ << " (" << RWDataSize << " bytes, alignment "
+ << RWDataAlign << ")\n");
}
}
@@ -162,7 +162,7 @@ public:
void notifyObjectLoaded(RuntimeDyld &Dyld,
const object::ObjectFile &Obj) override {
- DEBUG(dbgs() << "Allocator " << Id << " applied mappings:\n");
+ LLVM_DEBUG(dbgs() << "Allocator " << Id << " applied mappings:\n");
for (auto &ObjAllocs : Unmapped) {
mapAllocsToRemoteAddrs(Dyld, ObjAllocs.CodeAllocs,
ObjAllocs.RemoteCodeAddr);
@@ -176,7 +176,7 @@ public:
}
bool finalizeMemory(std::string *ErrMsg = nullptr) override {
- DEBUG(dbgs() << "Allocator " << Id << " finalizing:\n");
+ LLVM_DEBUG(dbgs() << "Allocator " << Id << " finalizing:\n");
for (auto &ObjAllocs : Unfinalized) {
if (copyAndProtect(ObjAllocs.CodeAllocs, ObjAllocs.RemoteCodeAddr,
@@ -261,7 +261,7 @@ public:
RemoteRTDyldMemoryManager(OrcRemoteTargetClient &Client,
ResourceIdMgr::ResourceId Id)
: Client(Client), Id(Id) {
- DEBUG(dbgs() << "Created remote allocator " << Id << "\n");
+ LLVM_DEBUG(dbgs() << "Created remote allocator " << Id << "\n");
}
// Maps all allocations in Allocs to aligned blocks
@@ -270,8 +270,9 @@ public:
for (auto &Alloc : Allocs) {
NextAddr = alignTo(NextAddr, Alloc.getAlign());
Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextAddr);
- DEBUG(dbgs() << " " << static_cast<void *>(Alloc.getLocalAddress())
- << " -> " << format("0x%016x", NextAddr) << "\n");
+ LLVM_DEBUG(dbgs() << " "
+ << static_cast<void *>(Alloc.getLocalAddress())
+ << " -> " << format("0x%016x", NextAddr) << "\n");
Alloc.setRemoteAddress(NextAddr);
// Only advance NextAddr if it was non-null to begin with,
@@ -290,22 +291,23 @@ public:
assert(!Allocs.empty() && "No sections in allocated segment");
for (auto &Alloc : Allocs) {
- DEBUG(dbgs() << " copying section: "
- << static_cast<void *>(Alloc.getLocalAddress()) << " -> "
- << format("0x%016x", Alloc.getRemoteAddress()) << " ("
- << Alloc.getSize() << " bytes)\n";);
+ LLVM_DEBUG(dbgs() << " copying section: "
+ << static_cast<void *>(Alloc.getLocalAddress())
+ << " -> "
+ << format("0x%016x", Alloc.getRemoteAddress())
+ << " (" << Alloc.getSize() << " bytes)\n";);
if (Client.writeMem(Alloc.getRemoteAddress(), Alloc.getLocalAddress(),
Alloc.getSize()))
return true;
}
- DEBUG(dbgs() << " setting "
- << (Permissions & sys::Memory::MF_READ ? 'R' : '-')
- << (Permissions & sys::Memory::MF_WRITE ? 'W' : '-')
- << (Permissions & sys::Memory::MF_EXEC ? 'X' : '-')
- << " permissions on block: "
- << format("0x%016x", RemoteSegmentAddr) << "\n");
+ LLVM_DEBUG(dbgs() << " setting "
+ << (Permissions & sys::Memory::MF_READ ? 'R' : '-')
+ << (Permissions & sys::Memory::MF_WRITE ? 'W' : '-')
+ << (Permissions & sys::Memory::MF_EXEC ? 'X' : '-')
+ << " permissions on block: "
+ << format("0x%016x", RemoteSegmentAddr) << "\n");
if (Client.setProtections(Id, RemoteSegmentAddr, Permissions))
return true;
}
@@ -487,7 +489,8 @@ public:
/// Call the int(void) function at the given address in the target and return
/// its result.
Expected<int> callIntVoid(JITTargetAddress Addr) {
- DEBUG(dbgs() << "Calling int(*)(void) " << format("0x%016x", Addr) << "\n");
+ LLVM_DEBUG(dbgs() << "Calling int(*)(void) " << format("0x%016x", Addr)
+ << "\n");
return callB<exec::CallIntVoid>(Addr);
}
@@ -495,16 +498,16 @@ public:
/// return its result.
Expected<int> callMain(JITTargetAddress Addr,
const std::vector<std::string> &Args) {
- DEBUG(dbgs() << "Calling int(*)(int, char*[]) " << format("0x%016x", Addr)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Calling int(*)(int, char*[]) "
+ << format("0x%016x", Addr) << "\n");
return callB<exec::CallMain>(Addr, Args);
}
/// Call the void() function at the given address in the target and wait for
/// it to finish.
Error callVoidVoid(JITTargetAddress Addr) {
- DEBUG(dbgs() << "Calling void(*)(void) " << format("0x%016x", Addr)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Calling void(*)(void) " << format("0x%016x", Addr)
+ << "\n");
return callB<exec::CallVoidVoid>(Addr);
}
diff --git a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
index cf419d33004c..acbc1682fa5d 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
@@ -161,9 +161,9 @@ private:
IntVoidFnTy Fn =
reinterpret_cast<IntVoidFnTy>(static_cast<uintptr_t>(Addr));
- DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
+ LLVM_DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
int Result = Fn();
- DEBUG(dbgs() << " Result = " << Result << "\n");
+ LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
@@ -180,15 +180,13 @@ private:
for (auto &Arg : Args)
ArgV[Idx++] = Arg.c_str();
ArgV[ArgC] = 0;
- DEBUG(
- for (int Idx = 0; Idx < ArgC; ++Idx) {
- llvm::dbgs() << "Arg " << Idx << ": " << ArgV[Idx] << "\n";
- }
- );
+ LLVM_DEBUG(for (int Idx = 0; Idx < ArgC; ++Idx) {
+ llvm::dbgs() << "Arg " << Idx << ": " << ArgV[Idx] << "\n";
+ });
- DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
+ LLVM_DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
int Result = Fn(ArgC, ArgV.get());
- DEBUG(dbgs() << " Result = " << Result << "\n");
+ LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
@@ -199,9 +197,9 @@ private:
VoidVoidFnTy Fn =
reinterpret_cast<VoidVoidFnTy>(static_cast<uintptr_t>(Addr));
- DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
+ LLVM_DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
Fn();
- DEBUG(dbgs() << " Complete.\n");
+ LLVM_DEBUG(dbgs() << " Complete.\n");
return Error::success();
}
@@ -211,7 +209,7 @@ private:
if (I != Allocators.end())
return errorCodeToError(
orcError(OrcErrorCode::RemoteAllocatorIdAlreadyInUse));
- DEBUG(dbgs() << " Created allocator " << Id << "\n");
+ LLVM_DEBUG(dbgs() << " Created allocator " << Id << "\n");
Allocators[Id] = Allocator();
return Error::success();
}
@@ -221,15 +219,16 @@ private:
if (I != IndirectStubsOwners.end())
return errorCodeToError(
orcError(OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse));
- DEBUG(dbgs() << " Create indirect stubs owner " << Id << "\n");
+ LLVM_DEBUG(dbgs() << " Create indirect stubs owner " << Id << "\n");
IndirectStubsOwners[Id] = ISBlockOwnerList();
return Error::success();
}
Error handleDeregisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
- DEBUG(dbgs() << " Registering EH frames at " << format("0x%016x", TAddr)
- << ", Size = " << Size << " bytes\n");
+ LLVM_DEBUG(dbgs() << " Registering EH frames at "
+ << format("0x%016x", TAddr) << ", Size = " << Size
+ << " bytes\n");
EHFramesDeregister(Addr, Size);
return Error::success();
}
@@ -240,7 +239,7 @@ private:
return errorCodeToError(
orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
Allocators.erase(I);
- DEBUG(dbgs() << " Destroyed allocator " << Id << "\n");
+ LLVM_DEBUG(dbgs() << " Destroyed allocator " << Id << "\n");
return Error::success();
}
@@ -256,8 +255,8 @@ private:
Expected<std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>>
handleEmitIndirectStubs(ResourceIdMgr::ResourceId Id,
uint32_t NumStubsRequired) {
- DEBUG(dbgs() << " ISMgr " << Id << " request " << NumStubsRequired
- << " stubs.\n");
+ LLVM_DEBUG(dbgs() << " ISMgr " << Id << " request " << NumStubsRequired
+ << " stubs.\n");
auto StubOwnerItr = IndirectStubsOwners.find(Id);
if (StubOwnerItr == IndirectStubsOwners.end())
@@ -328,8 +327,8 @@ private:
Expected<JITTargetAddress> handleGetSymbolAddress(const std::string &Name) {
JITTargetAddress Addr = SymbolLookup(Name);
- DEBUG(dbgs() << " Symbol '" << Name << "' = " << format("0x%016x", Addr)
- << "\n");
+ LLVM_DEBUG(dbgs() << " Symbol '" << Name
+ << "' = " << format("0x%016x", Addr) << "\n");
return Addr;
}
@@ -340,12 +339,13 @@ private:
uint32_t PageSize = sys::Process::getPageSize();
uint32_t TrampolineSize = TargetT::TrampolineSize;
uint32_t IndirectStubSize = TargetT::IndirectStubsInfo::StubSize;
- DEBUG(dbgs() << " Remote info:\n"
- << " triple = '" << ProcessTriple << "'\n"
- << " pointer size = " << PointerSize << "\n"
- << " page size = " << PageSize << "\n"
- << " trampoline size = " << TrampolineSize << "\n"
- << " indirect stub size = " << IndirectStubSize << "\n");
+ LLVM_DEBUG(dbgs() << " Remote info:\n"
+ << " triple = '" << ProcessTriple << "'\n"
+ << " pointer size = " << PointerSize << "\n"
+ << " page size = " << PageSize << "\n"
+ << " trampoline size = " << TrampolineSize << "\n"
+ << " indirect stub size = " << IndirectStubSize
+ << "\n");
return std::make_tuple(ProcessTriple, PointerSize, PageSize, TrampolineSize,
IndirectStubSize);
}
@@ -354,8 +354,8 @@ private:
uint64_t Size) {
uint8_t *Src = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(RSrc));
- DEBUG(dbgs() << " Reading " << Size << " bytes from "
- << format("0x%016x", RSrc) << "\n");
+ LLVM_DEBUG(dbgs() << " Reading " << Size << " bytes from "
+ << format("0x%016x", RSrc) << "\n");
std::vector<uint8_t> Buffer;
Buffer.resize(Size);
@@ -367,8 +367,9 @@ private:
Error handleRegisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
- DEBUG(dbgs() << " Registering EH frames at " << format("0x%016x", TAddr)
- << ", Size = " << Size << " bytes\n");
+ LLVM_DEBUG(dbgs() << " Registering EH frames at "
+ << format("0x%016x", TAddr) << ", Size = " << Size
+ << " bytes\n");
EHFramesRegister(Addr, Size);
return Error::success();
}
@@ -384,8 +385,9 @@ private:
if (auto Err = Allocator.allocate(LocalAllocAddr, Size, Align))
return std::move(Err);
- DEBUG(dbgs() << " Allocator " << Id << " reserved " << LocalAllocAddr
- << " (" << Size << " bytes, alignment " << Align << ")\n");
+ LLVM_DEBUG(dbgs() << " Allocator " << Id << " reserved " << LocalAllocAddr
+ << " (" << Size << " bytes, alignment " << Align
+ << ")\n");
JITTargetAddress AllocAddr = static_cast<JITTargetAddress>(
reinterpret_cast<uintptr_t>(LocalAllocAddr));
@@ -401,10 +403,11 @@ private:
orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
auto &Allocator = I->second;
void *LocalAddr = reinterpret_cast<void *>(static_cast<uintptr_t>(Addr));
- DEBUG(dbgs() << " Allocator " << Id << " set permissions on " << LocalAddr
- << " to " << (Flags & sys::Memory::MF_READ ? 'R' : '-')
- << (Flags & sys::Memory::MF_WRITE ? 'W' : '-')
- << (Flags & sys::Memory::MF_EXEC ? 'X' : '-') << "\n");
+ LLVM_DEBUG(dbgs() << " Allocator " << Id << " set permissions on "
+ << LocalAddr << " to "
+ << (Flags & sys::Memory::MF_READ ? 'R' : '-')
+ << (Flags & sys::Memory::MF_WRITE ? 'W' : '-')
+ << (Flags & sys::Memory::MF_EXEC ? 'X' : '-') << "\n");
return Allocator.setProtections(LocalAddr, Flags);
}
@@ -414,14 +417,14 @@ private:
}
Error handleWriteMem(DirectBufferWriter DBW) {
- DEBUG(dbgs() << " Writing " << DBW.getSize() << " bytes to "
- << format("0x%016x", DBW.getDst()) << "\n");
+ LLVM_DEBUG(dbgs() << " Writing " << DBW.getSize() << " bytes to "
+ << format("0x%016x", DBW.getDst()) << "\n");
return Error::success();
}
Error handleWritePtr(JITTargetAddress Addr, JITTargetAddress PtrVal) {
- DEBUG(dbgs() << " Writing pointer *" << format("0x%016x", Addr) << " = "
- << format("0x%016x", PtrVal) << "\n");
+ LLVM_DEBUG(dbgs() << " Writing pointer *" << format("0x%016x", Addr)
+ << " = " << format("0x%016x", PtrVal) << "\n");
uintptr_t *Ptr =
reinterpret_cast<uintptr_t *>(static_cast<uintptr_t>(Addr));
*Ptr = static_cast<uintptr_t>(PtrVal);
diff --git a/include/llvm/Support/Debug.h b/include/llvm/Support/Debug.h
index 48e9e1bc167d..59a5b0a70e40 100644
--- a/include/llvm/Support/Debug.h
+++ b/include/llvm/Support/Debug.h
@@ -11,17 +11,18 @@
// code, without it being enabled all of the time, and without having to add
// command line options to enable it.
//
-// In particular, just wrap your code with the DEBUG() macro, and it will be
-// enabled automatically if you specify '-debug' on the command-line.
-// DEBUG() requires the DEBUG_TYPE macro to be defined. Set it to "foo" specify
-// that your debug code belongs to class "foo". Be careful that you only do
-// this after including Debug.h and not around any #include of headers. Headers
-// should define and undef the macro acround the code that needs to use the
-// DEBUG() macro. Then, on the command line, you can specify '-debug-only=foo'
-// to enable JUST the debug information for the foo class.
+// In particular, just wrap your code with the LLVM_DEBUG() macro, and it will
+// be enabled automatically if you specify '-debug' on the command-line.
+// LLVM_DEBUG() requires the DEBUG_TYPE macro to be defined. Set it to "foo"
+// specify that your debug code belongs to class "foo". Be careful that you only
+// do this after including Debug.h and not around any #include of headers.
+// Headers should define and undef the macro acround the code that needs to use
+// the LLVM_DEBUG() macro. Then, on the command line, you can specify
+// '-debug-only=foo' to enable JUST the debug information for the foo class.
//
// When compiling without assertions, the -debug-* options and all code in
-// DEBUG() statements disappears, so it does not affect the runtime of the code.
+// LLVM_DEBUG() statements disappears, so it does not affect the runtime of the
+// code.
//
//===----------------------------------------------------------------------===//
@@ -113,9 +114,11 @@ raw_ostream &dbgs();
// debug build, then the code specified as the option to the macro will be
// executed. Otherwise it will not be. Example:
//
-// DEBUG(dbgs() << "Bitset contains: " << Bitset << "\n");
+// LLVM_DEBUG(dbgs() << "Bitset contains: " << Bitset << "\n");
//
-#define DEBUG(X) DEBUG_WITH_TYPE(DEBUG_TYPE, X)
+#define LLVM_DEBUG(X) DEBUG_WITH_TYPE(DEBUG_TYPE, X)
+
+#define DEBUG(X) LLVM_DEBUG(X)
} // end namespace llvm
diff --git a/include/llvm/Support/GenericDomTreeConstruction.h b/include/llvm/Support/GenericDomTreeConstruction.h
index 66e2ae8c08db..ec0f62547ccc 100644
--- a/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/include/llvm/Support/GenericDomTreeConstruction.h
@@ -146,15 +146,15 @@ struct SemiNCAInfo {
assert(llvm::find(Res, Child) != Res.end()
&& "Expected child not found in the CFG");
Res.erase(std::remove(Res.begin(), Res.end(), Child), Res.end());
- DEBUG(dbgs() << "\tHiding edge " << BlockNamePrinter(N) << " -> "
- << BlockNamePrinter(Child) << "\n");
+ LLVM_DEBUG(dbgs() << "\tHiding edge " << BlockNamePrinter(N) << " -> "
+ << BlockNamePrinter(Child) << "\n");
} else {
// If there's an deletion in the future, it means that the edge cannot
// exist in the current CFG, but existed in it before.
assert(llvm::find(Res, Child) == Res.end() &&
"Unexpected child found in the CFG");
- DEBUG(dbgs() << "\tShowing virtual edge " << BlockNamePrinter(N)
- << " -> " << BlockNamePrinter(Child) << "\n");
+ LLVM_DEBUG(dbgs() << "\tShowing virtual edge " << BlockNamePrinter(N)
+ << " -> " << BlockNamePrinter(Child) << "\n");
Res.push_back(Child);
}
}
@@ -387,7 +387,7 @@ struct SemiNCAInfo {
SNCA.addVirtualRoot();
unsigned Num = 1;
- DEBUG(dbgs() << "\t\tLooking for trivial roots\n");
+ LLVM_DEBUG(dbgs() << "\t\tLooking for trivial roots\n");
// Step #1: Find all the trivial roots that are going to will definitely
// remain tree roots.
@@ -404,14 +404,14 @@ struct SemiNCAInfo {
Roots.push_back(N);
// Run DFS not to walk this part of CFG later.
Num = SNCA.runDFS(N, Num, AlwaysDescend, 1);
- DEBUG(dbgs() << "Found a new trivial root: " << BlockNamePrinter(N)
- << "\n");
- DEBUG(dbgs() << "Last visited node: "
- << BlockNamePrinter(SNCA.NumToNode[Num]) << "\n");
+ LLVM_DEBUG(dbgs() << "Found a new trivial root: " << BlockNamePrinter(N)
+ << "\n");
+ LLVM_DEBUG(dbgs() << "Last visited node: "
+ << BlockNamePrinter(SNCA.NumToNode[Num]) << "\n");
}
}
- DEBUG(dbgs() << "\t\tLooking for non-trivial roots\n");
+ LLVM_DEBUG(dbgs() << "\t\tLooking for non-trivial roots\n");
// Step #2: Find all non-trivial root candidates. Those are CFG nodes that
// are reverse-unreachable were not visited by previous DFS walks (i.e. CFG
@@ -431,8 +431,8 @@ struct SemiNCAInfo {
SmallPtrSet<NodePtr, 4> ConnectToExitBlock;
for (const NodePtr I : nodes(DT.Parent)) {
if (SNCA.NodeToInfo.count(I) == 0) {
- DEBUG(dbgs() << "\t\t\tVisiting node " << BlockNamePrinter(I)
- << "\n");
+ LLVM_DEBUG(dbgs()
+ << "\t\t\tVisiting node " << BlockNamePrinter(I) << "\n");
// Find the furthest away we can get by following successors, then
// follow them in reverse. This gives us some reasonable answer about
// the post-dom tree inside any infinite loop. In particular, it
@@ -443,47 +443,49 @@ struct SemiNCAInfo {
// the lowest and highest points in the infinite loop. In theory, it
// would be nice to give the canonical backedge for the loop, but it's
// expensive and does not always lead to a minimal set of roots.
- DEBUG(dbgs() << "\t\t\tRunning forward DFS\n");
+ LLVM_DEBUG(dbgs() << "\t\t\tRunning forward DFS\n");
const unsigned NewNum = SNCA.runDFS<true>(I, Num, AlwaysDescend, Num);
const NodePtr FurthestAway = SNCA.NumToNode[NewNum];
- DEBUG(dbgs() << "\t\t\tFound a new furthest away node "
- << "(non-trivial root): "
- << BlockNamePrinter(FurthestAway) << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t\tFound a new furthest away node "
+ << "(non-trivial root): "
+ << BlockNamePrinter(FurthestAway) << "\n");
ConnectToExitBlock.insert(FurthestAway);
Roots.push_back(FurthestAway);
- DEBUG(dbgs() << "\t\t\tPrev DFSNum: " << Num << ", new DFSNum: "
- << NewNum << "\n\t\t\tRemoving DFS info\n");
+ LLVM_DEBUG(dbgs() << "\t\t\tPrev DFSNum: " << Num << ", new DFSNum: "
+ << NewNum << "\n\t\t\tRemoving DFS info\n");
for (unsigned i = NewNum; i > Num; --i) {
const NodePtr N = SNCA.NumToNode[i];
- DEBUG(dbgs() << "\t\t\t\tRemoving DFS info for "
- << BlockNamePrinter(N) << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t\t\tRemoving DFS info for "
+ << BlockNamePrinter(N) << "\n");
SNCA.NodeToInfo.erase(N);
SNCA.NumToNode.pop_back();
}
const unsigned PrevNum = Num;
- DEBUG(dbgs() << "\t\t\tRunning reverse DFS\n");
+ LLVM_DEBUG(dbgs() << "\t\t\tRunning reverse DFS\n");
Num = SNCA.runDFS(FurthestAway, Num, AlwaysDescend, 1);
for (unsigned i = PrevNum + 1; i <= Num; ++i)
- DEBUG(dbgs() << "\t\t\t\tfound node "
- << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t\t\tfound node "
+ << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");
}
}
}
- DEBUG(dbgs() << "Total: " << Total << ", Num: " << Num << "\n");
- DEBUG(dbgs() << "Discovered CFG nodes:\n");
- DEBUG(for (size_t i = 0; i <= Num; ++i) dbgs()
- << i << ": " << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");
+ LLVM_DEBUG(dbgs() << "Total: " << Total << ", Num: " << Num << "\n");
+ LLVM_DEBUG(dbgs() << "Discovered CFG nodes:\n");
+ LLVM_DEBUG(for (size_t i = 0; i <= Num; ++i) dbgs()
+ << i << ": " << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");
assert((Total + 1 == Num) && "Everything should have been visited");
// Step #3: If we found some non-trivial roots, make them non-redundant.
if (HasNonTrivialRoots) RemoveRedundantRoots(DT, BUI, Roots);
- DEBUG(dbgs() << "Found roots: ");
- DEBUG(for (auto *Root : Roots) dbgs() << BlockNamePrinter(Root) << " ");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Found roots: ");
+ LLVM_DEBUG(for (auto *Root
+ : Roots) dbgs()
+ << BlockNamePrinter(Root) << " ");
+ LLVM_DEBUG(dbgs() << "\n");
return Roots;
}
@@ -499,7 +501,7 @@ struct SemiNCAInfo {
static void RemoveRedundantRoots(const DomTreeT &DT, BatchUpdatePtr BUI,
RootsT &Roots) {
assert(IsPostDom && "This function is for postdominators only");
- DEBUG(dbgs() << "Removing redundant roots\n");
+ LLVM_DEBUG(dbgs() << "Removing redundant roots\n");
SemiNCAInfo SNCA(BUI);
@@ -507,8 +509,8 @@ struct SemiNCAInfo {
auto &Root = Roots[i];
// Trivial roots are always non-redundant.
if (!HasForwardSuccessors(Root, BUI)) continue;
- DEBUG(dbgs() << "\tChecking if " << BlockNamePrinter(Root)
- << " remains a root\n");
+ LLVM_DEBUG(dbgs() << "\tChecking if " << BlockNamePrinter(Root)
+ << " remains a root\n");
SNCA.clear();
// Do a forward walk looking for the other roots.
const unsigned Num = SNCA.runDFS<true>(Root, 0, AlwaysDescend, 0);
@@ -520,9 +522,9 @@ struct SemiNCAInfo {
// root from the set of roots, as it is reverse-reachable from the other
// one.
if (llvm::find(Roots, N) != Roots.end()) {
- DEBUG(dbgs() << "\tForward DFS walk found another root "
- << BlockNamePrinter(N) << "\n\tRemoving root "
- << BlockNamePrinter(Root) << "\n");
+ LLVM_DEBUG(dbgs() << "\tForward DFS walk found another root "
+ << BlockNamePrinter(N) << "\n\tRemoving root "
+ << BlockNamePrinter(Root) << "\n");
std::swap(Root, Roots.back());
Roots.pop_back();
@@ -563,7 +565,8 @@ struct SemiNCAInfo {
SNCA.runSemiNCA(DT);
if (BUI) {
BUI->IsRecalculated = true;
- DEBUG(dbgs() << "DomTree recalculated, skipping future batch updates\n");
+ LLVM_DEBUG(
+ dbgs() << "DomTree recalculated, skipping future batch updates\n");
}
if (DT.Roots.empty()) return;
@@ -585,8 +588,8 @@ struct SemiNCAInfo {
// Loop over all of the discovered blocks in the function...
for (size_t i = 1, e = NumToNode.size(); i != e; ++i) {
NodePtr W = NumToNode[i];
- DEBUG(dbgs() << "\tdiscovered a new reachable node "
- << BlockNamePrinter(W) << "\n");
+ LLVM_DEBUG(dbgs() << "\tdiscovered a new reachable node "
+ << BlockNamePrinter(W) << "\n");
// Don't replace this with 'count', the insertion side effect is important
if (DT.DomTreeNodes[W]) continue; // Haven't calculated this node yet?
@@ -638,8 +641,8 @@ struct SemiNCAInfo {
assert((From || IsPostDom) &&
"From has to be a valid CFG node or a virtual root");
assert(To && "Cannot be a nullptr");
- DEBUG(dbgs() << "Inserting edge " << BlockNamePrinter(From) << " -> "
- << BlockNamePrinter(To) << "\n");
+ LLVM_DEBUG(dbgs() << "Inserting edge " << BlockNamePrinter(From) << " -> "
+ << BlockNamePrinter(To) << "\n");
TreeNodePtr FromTN = DT.getNode(From);
if (!FromTN) {
@@ -678,8 +681,8 @@ struct SemiNCAInfo {
if (RIt == DT.Roots.end())
return false; // To is not a root, nothing to update.
- DEBUG(dbgs() << "\t\tAfter the insertion, " << BlockNamePrinter(To)
- << " is no longer a root\n\t\tRebuilding the tree!!!\n");
+ LLVM_DEBUG(dbgs() << "\t\tAfter the insertion, " << BlockNamePrinter(To)
+ << " is no longer a root\n\t\tRebuilding the tree!!!\n");
CalculateFromScratch(DT, BUI);
return true;
@@ -706,8 +709,8 @@ struct SemiNCAInfo {
// can make a different (implicit) decision about which node within an
// infinite loop becomes a root.
- DEBUG(dbgs() << "Roots are different in updated trees\n"
- << "The entire tree needs to be rebuilt\n");
+ LLVM_DEBUG(dbgs() << "Roots are different in updated trees\n"
+ << "The entire tree needs to be rebuilt\n");
// It may be possible to update the tree without recalculating it, but
// we do not know yet how to do it, and it happens rarely in practise.
CalculateFromScratch(DT, BUI);
@@ -718,8 +721,8 @@ struct SemiNCAInfo {
// Handles insertion to a node already in the dominator tree.
static void InsertReachable(DomTreeT &DT, const BatchUpdatePtr BUI,
const TreeNodePtr From, const TreeNodePtr To) {
- DEBUG(dbgs() << "\tReachable " << BlockNamePrinter(From->getBlock())
- << " -> " << BlockNamePrinter(To->getBlock()) << "\n");
+ LLVM_DEBUG(dbgs() << "\tReachable " << BlockNamePrinter(From->getBlock())
+ << " -> " << BlockNamePrinter(To->getBlock()) << "\n");
if (IsPostDom && UpdateRootsBeforeInsertion(DT, BUI, From, To)) return;
// DT.findNCD expects both pointers to be valid. When From is a virtual
// root, then its CFG block pointer is a nullptr, so we have to 'compute'
@@ -732,7 +735,7 @@ struct SemiNCAInfo {
const TreeNodePtr NCD = DT.getNode(NCDBlock);
assert(NCD);
- DEBUG(dbgs() << "\t\tNCA == " << BlockNamePrinter(NCD) << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tNCA == " << BlockNamePrinter(NCD) << "\n");
const TreeNodePtr ToIDom = To->getIDom();
// Nothing affected -- NCA property holds.
@@ -741,18 +744,20 @@ struct SemiNCAInfo {
// Identify and collect affected nodes.
InsertionInfo II;
- DEBUG(dbgs() << "Marking " << BlockNamePrinter(To) << " as affected\n");
+ LLVM_DEBUG(dbgs() << "Marking " << BlockNamePrinter(To)
+ << " as affected\n");
II.Affected.insert(To);
const unsigned ToLevel = To->getLevel();
- DEBUG(dbgs() << "Putting " << BlockNamePrinter(To) << " into a Bucket\n");
+ LLVM_DEBUG(dbgs() << "Putting " << BlockNamePrinter(To)
+ << " into a Bucket\n");
II.Bucket.push({ToLevel, To});
while (!II.Bucket.empty()) {
const TreeNodePtr CurrentNode = II.Bucket.top().second;
const unsigned CurrentLevel = CurrentNode->getLevel();
II.Bucket.pop();
- DEBUG(dbgs() << "\tAdding to Visited and AffectedQueue: "
- << BlockNamePrinter(CurrentNode) << "\n");
+ LLVM_DEBUG(dbgs() << "\tAdding to Visited and AffectedQueue: "
+ << BlockNamePrinter(CurrentNode) << "\n");
II.Visited.insert({CurrentNode, CurrentLevel});
II.AffectedQueue.push_back(CurrentNode);
@@ -770,8 +775,8 @@ struct SemiNCAInfo {
const TreeNodePtr TN, const unsigned RootLevel,
const TreeNodePtr NCD, InsertionInfo &II) {
const unsigned NCDLevel = NCD->getLevel();
- DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << ", RootLevel "
- << RootLevel << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << ", RootLevel "
+ << RootLevel << "\n");
SmallVector<TreeNodePtr, 8> Stack = {TN};
assert(TN->getBlock() && II.Visited.count(TN) && "Preconditions!");
@@ -780,7 +785,7 @@ struct SemiNCAInfo {
do {
TreeNodePtr Next = Stack.pop_back_val();
- DEBUG(dbgs() << " Next: " << BlockNamePrinter(Next) << "\n");
+ LLVM_DEBUG(dbgs() << " Next: " << BlockNamePrinter(Next) << "\n");
for (const NodePtr Succ :
ChildrenGetter<IsPostDom>::Get(Next->getBlock(), BUI)) {
@@ -788,8 +793,8 @@ struct SemiNCAInfo {
assert(SuccTN && "Unreachable successor found at reachable insertion");
const unsigned SuccLevel = SuccTN->getLevel();
- DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ) << ", level = "
- << SuccLevel << "\n");
+ LLVM_DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ)
+ << ", level = " << SuccLevel << "\n");
// Do not process the same node multiple times.
if (Processed.count(Next) > 0)
@@ -798,11 +803,11 @@ struct SemiNCAInfo {
// Succ dominated by subtree From -- not affected.
// (Based on the lemma 2.5 from the second paper.)
if (SuccLevel > RootLevel) {
- DEBUG(dbgs() << "\t\tDominated by subtree From\n");
+ LLVM_DEBUG(dbgs() << "\t\tDominated by subtree From\n");
if (II.Visited.count(SuccTN) != 0) {
- DEBUG(dbgs() << "\t\t\talready visited at level "
- << II.Visited[SuccTN] << "\n\t\t\tcurrent level "
- << RootLevel << ")\n");
+ LLVM_DEBUG(dbgs() << "\t\t\talready visited at level "
+ << II.Visited[SuccTN] << "\n\t\t\tcurrent level "
+ << RootLevel << ")\n");
// A node can be necessary to visit again if we see it again at
// a lower level than before.
@@ -810,15 +815,15 @@ struct SemiNCAInfo {
continue;
}
- DEBUG(dbgs() << "\t\tMarking visited not affected "
- << BlockNamePrinter(Succ) << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tMarking visited not affected "
+ << BlockNamePrinter(Succ) << "\n");
II.Visited.insert({SuccTN, RootLevel});
II.VisitedNotAffectedQueue.push_back(SuccTN);
Stack.push_back(SuccTN);
} else if ((SuccLevel > NCDLevel + 1) &&
II.Affected.count(SuccTN) == 0) {
- DEBUG(dbgs() << "\t\tMarking affected and adding "
- << BlockNamePrinter(Succ) << " to a Bucket\n");
+ LLVM_DEBUG(dbgs() << "\t\tMarking affected and adding "
+ << BlockNamePrinter(Succ) << " to a Bucket\n");
II.Affected.insert(SuccTN);
II.Bucket.push({SuccLevel, SuccTN});
}
@@ -831,11 +836,11 @@ struct SemiNCAInfo {
// Updates immediate dominators and levels after insertion.
static void UpdateInsertion(DomTreeT &DT, const BatchUpdatePtr BUI,
const TreeNodePtr NCD, InsertionInfo &II) {
- DEBUG(dbgs() << "Updating NCD = " << BlockNamePrinter(NCD) << "\n");
+ LLVM_DEBUG(dbgs() << "Updating NCD = " << BlockNamePrinter(NCD) << "\n");
for (const TreeNodePtr TN : II.AffectedQueue) {
- DEBUG(dbgs() << "\tIDom(" << BlockNamePrinter(TN)
- << ") = " << BlockNamePrinter(NCD) << "\n");
+ LLVM_DEBUG(dbgs() << "\tIDom(" << BlockNamePrinter(TN)
+ << ") = " << BlockNamePrinter(NCD) << "\n");
TN->setIDom(NCD);
}
@@ -844,12 +849,13 @@ struct SemiNCAInfo {
}
static void UpdateLevelsAfterInsertion(InsertionInfo &II) {
- DEBUG(dbgs() << "Updating levels for visited but not affected nodes\n");
+ LLVM_DEBUG(
+ dbgs() << "Updating levels for visited but not affected nodes\n");
for (const TreeNodePtr TN : II.VisitedNotAffectedQueue) {
- DEBUG(dbgs() << "\tlevel(" << BlockNamePrinter(TN) << ") = ("
- << BlockNamePrinter(TN->getIDom()) << ") "
- << TN->getIDom()->getLevel() << " + 1\n");
+ LLVM_DEBUG(dbgs() << "\tlevel(" << BlockNamePrinter(TN) << ") = ("
+ << BlockNamePrinter(TN->getIDom()) << ") "
+ << TN->getIDom()->getLevel() << " + 1\n");
TN->UpdateLevel();
}
}
@@ -857,23 +863,24 @@ struct SemiNCAInfo {
// Handles insertion to previously unreachable nodes.
static void InsertUnreachable(DomTreeT &DT, const BatchUpdatePtr BUI,
const TreeNodePtr From, const NodePtr To) {
- DEBUG(dbgs() << "Inserting " << BlockNamePrinter(From)
- << " -> (unreachable) " << BlockNamePrinter(To) << "\n");
+ LLVM_DEBUG(dbgs() << "Inserting " << BlockNamePrinter(From)
+ << " -> (unreachable) " << BlockNamePrinter(To) << "\n");
// Collect discovered edges to already reachable nodes.
SmallVector<std::pair<NodePtr, TreeNodePtr>, 8> DiscoveredEdgesToReachable;
// Discover and connect nodes that became reachable with the insertion.
ComputeUnreachableDominators(DT, BUI, To, From, DiscoveredEdgesToReachable);
- DEBUG(dbgs() << "Inserted " << BlockNamePrinter(From)
- << " -> (prev unreachable) " << BlockNamePrinter(To) << "\n");
+ LLVM_DEBUG(dbgs() << "Inserted " << BlockNamePrinter(From)
+ << " -> (prev unreachable) " << BlockNamePrinter(To)
+ << "\n");
// Used the discovered edges and inset discovered connecting (incoming)
// edges.
for (const auto &Edge : DiscoveredEdgesToReachable) {
- DEBUG(dbgs() << "\tInserting discovered connecting edge "
- << BlockNamePrinter(Edge.first) << " -> "
- << BlockNamePrinter(Edge.second) << "\n");
+ LLVM_DEBUG(dbgs() << "\tInserting discovered connecting edge "
+ << BlockNamePrinter(Edge.first) << " -> "
+ << BlockNamePrinter(Edge.second) << "\n");
InsertReachable(DT, BUI, DT.getNode(Edge.first), Edge.second);
}
}
@@ -901,14 +908,14 @@ struct SemiNCAInfo {
SNCA.runSemiNCA(DT);
SNCA.attachNewSubtree(DT, Incoming);
- DEBUG(dbgs() << "After adding unreachable nodes\n");
+ LLVM_DEBUG(dbgs() << "After adding unreachable nodes\n");
}
static void DeleteEdge(DomTreeT &DT, const BatchUpdatePtr BUI,
const NodePtr From, const NodePtr To) {
assert(From && To && "Cannot disconnect nullptrs");
- DEBUG(dbgs() << "Deleting edge " << BlockNamePrinter(From) << " -> "
- << BlockNamePrinter(To) << "\n");
+ LLVM_DEBUG(dbgs() << "Deleting edge " << BlockNamePrinter(From) << " -> "
+ << BlockNamePrinter(To) << "\n");
#ifndef NDEBUG
// Ensure that the edge was in fact deleted from the CFG before informing
@@ -928,8 +935,9 @@ struct SemiNCAInfo {
const TreeNodePtr ToTN = DT.getNode(To);
if (!ToTN) {
- DEBUG(dbgs() << "\tTo (" << BlockNamePrinter(To)
- << ") already unreachable -- there is no edge to delete\n");
+ LLVM_DEBUG(
+ dbgs() << "\tTo (" << BlockNamePrinter(To)
+ << ") already unreachable -- there is no edge to delete\n");
return;
}
@@ -941,8 +949,8 @@ struct SemiNCAInfo {
DT.DFSInfoValid = false;
const TreeNodePtr ToIDom = ToTN->getIDom();
- DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
- << BlockNamePrinter(ToIDom) << "\n");
+ LLVM_DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
+ << BlockNamePrinter(ToIDom) << "\n");
// To remains reachable after deletion.
// (Based on the caption under Figure 4. from the second paper.)
@@ -959,9 +967,9 @@ struct SemiNCAInfo {
static void DeleteReachable(DomTreeT &DT, const BatchUpdatePtr BUI,
const TreeNodePtr FromTN,
const TreeNodePtr ToTN) {
- DEBUG(dbgs() << "Deleting reachable " << BlockNamePrinter(FromTN) << " -> "
- << BlockNamePrinter(ToTN) << "\n");
- DEBUG(dbgs() << "\tRebuilding subtree\n");
+ LLVM_DEBUG(dbgs() << "Deleting reachable " << BlockNamePrinter(FromTN)
+ << " -> " << BlockNamePrinter(ToTN) << "\n");
+ LLVM_DEBUG(dbgs() << "\tRebuilding subtree\n");
// Find the top of the subtree that needs to be rebuilt.
// (Based on the lemma 2.6 from the second paper.)
@@ -974,7 +982,7 @@ struct SemiNCAInfo {
// Top of the subtree to rebuild is the root node. Rebuild the tree from
// scratch.
if (!PrevIDomSubTree) {
- DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
+ LLVM_DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
CalculateFromScratch(DT, BUI);
return;
}
@@ -985,11 +993,12 @@ struct SemiNCAInfo {
return DT.getNode(To)->getLevel() > Level;
};
- DEBUG(dbgs() << "\tTop of subtree: " << BlockNamePrinter(ToIDomTN) << "\n");
+ LLVM_DEBUG(dbgs() << "\tTop of subtree: " << BlockNamePrinter(ToIDomTN)
+ << "\n");
SemiNCAInfo SNCA(BUI);
SNCA.runDFS(ToIDom, 0, DescendBelow, 0);
- DEBUG(dbgs() << "\tRunning Semi-NCA\n");
+ LLVM_DEBUG(dbgs() << "\tRunning Semi-NCA\n");
SNCA.runSemiNCA(DT, Level);
SNCA.reattachExistingSubtree(DT, PrevIDomSubTree);
}
@@ -998,19 +1007,20 @@ struct SemiNCAInfo {
// explained on the page 7 of the second paper.
static bool HasProperSupport(DomTreeT &DT, const BatchUpdatePtr BUI,
const TreeNodePtr TN) {
- DEBUG(dbgs() << "IsReachableFromIDom " << BlockNamePrinter(TN) << "\n");
+ LLVM_DEBUG(dbgs() << "IsReachableFromIDom " << BlockNamePrinter(TN)
+ << "\n");
for (const NodePtr Pred :
ChildrenGetter<!IsPostDom>::Get(TN->getBlock(), BUI)) {
- DEBUG(dbgs() << "\tPred " << BlockNamePrinter(Pred) << "\n");
+ LLVM_DEBUG(dbgs() << "\tPred " << BlockNamePrinter(Pred) << "\n");
if (!DT.getNode(Pred)) continue;
const NodePtr Support =
DT.findNearestCommonDominator(TN->getBlock(), Pred);
- DEBUG(dbgs() << "\tSupport " << BlockNamePrinter(Support) << "\n");
+ LLVM_DEBUG(dbgs() << "\tSupport " << BlockNamePrinter(Support) << "\n");
if (Support != TN->getBlock()) {
- DEBUG(dbgs() << "\t" << BlockNamePrinter(TN)
- << " is reachable from support "
- << BlockNamePrinter(Support) << "\n");
+ LLVM_DEBUG(dbgs() << "\t" << BlockNamePrinter(TN)
+ << " is reachable from support "
+ << BlockNamePrinter(Support) << "\n");
return true;
}
}
@@ -1022,8 +1032,8 @@ struct SemiNCAInfo {
// (Based on the lemma 2.7 from the second paper.)
static void DeleteUnreachable(DomTreeT &DT, const BatchUpdatePtr BUI,
const TreeNodePtr ToTN) {
- DEBUG(dbgs() << "Deleting unreachable subtree " << BlockNamePrinter(ToTN)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Deleting unreachable subtree "
+ << BlockNamePrinter(ToTN) << "\n");
assert(ToTN);
assert(ToTN->getBlock());
@@ -1031,8 +1041,9 @@ struct SemiNCAInfo {
// Deletion makes a region reverse-unreachable and creates a new root.
// Simulate that by inserting an edge from the virtual root to ToTN and
// adding it as a new root.
- DEBUG(dbgs() << "\tDeletion made a region reverse-unreachable\n");
- DEBUG(dbgs() << "\tAdding new root " << BlockNamePrinter(ToTN) << "\n");
+ LLVM_DEBUG(dbgs() << "\tDeletion made a region reverse-unreachable\n");
+ LLVM_DEBUG(dbgs() << "\tAdding new root " << BlockNamePrinter(ToTN)
+ << "\n");
DT.Roots.push_back(ToTN->getBlock());
InsertReachable(DT, BUI, DT.getNode(nullptr), ToTN);
return;
@@ -1069,15 +1080,15 @@ struct SemiNCAInfo {
const TreeNodePtr NCD = DT.getNode(NCDBlock);
assert(NCD);
- DEBUG(dbgs() << "Processing affected node " << BlockNamePrinter(TN)
- << " with NCD = " << BlockNamePrinter(NCD)
- << ", MinNode =" << BlockNamePrinter(MinNode) << "\n");
+ LLVM_DEBUG(dbgs() << "Processing affected node " << BlockNamePrinter(TN)
+ << " with NCD = " << BlockNamePrinter(NCD)
+ << ", MinNode =" << BlockNamePrinter(MinNode) << "\n");
if (NCD != TN && NCD->getLevel() < MinNode->getLevel()) MinNode = NCD;
}
// Root reached, rebuild the whole tree from scratch.
if (!MinNode->getIDom()) {
- DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
+ LLVM_DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
CalculateFromScratch(DT, BUI);
return;
}
@@ -1087,7 +1098,7 @@ struct SemiNCAInfo {
for (unsigned i = LastDFSNum; i > 0; --i) {
const NodePtr N = SNCA.NumToNode[i];
const TreeNodePtr TN = DT.getNode(N);
- DEBUG(dbgs() << "Erasing node " << BlockNamePrinter(TN) << "\n");
+ LLVM_DEBUG(dbgs() << "Erasing node " << BlockNamePrinter(TN) << "\n");
EraseNode(DT, TN);
}
@@ -1095,8 +1106,8 @@ struct SemiNCAInfo {
// The affected subtree start at the To node -- there's no extra work to do.
if (MinNode == ToTN) return;
- DEBUG(dbgs() << "DeleteUnreachable: running DFS with MinNode = "
- << BlockNamePrinter(MinNode) << "\n");
+ LLVM_DEBUG(dbgs() << "DeleteUnreachable: running DFS with MinNode = "
+ << BlockNamePrinter(MinNode) << "\n");
const unsigned MinLevel = MinNode->getLevel();
const TreeNodePtr PrevIDom = MinNode->getIDom();
assert(PrevIDom);
@@ -1109,8 +1120,8 @@ struct SemiNCAInfo {
};
SNCA.runDFS(MinNode->getBlock(), 0, DescendBelow, 0);
- DEBUG(dbgs() << "Previous IDom(MinNode) = " << BlockNamePrinter(PrevIDom)
- << "\nRunning Semi-NCA\n");
+ LLVM_DEBUG(dbgs() << "Previous IDom(MinNode) = "
+ << BlockNamePrinter(PrevIDom) << "\nRunning Semi-NCA\n");
// Rebuild the remaining part of affected subtree.
SNCA.runSemiNCA(DT, MinLevel);
@@ -1169,11 +1180,11 @@ struct SemiNCAInfo {
BUI.FuturePredecessors[U.getTo()].insert({U.getFrom(), U.getKind()});
}
- DEBUG(dbgs() << "About to apply " << NumLegalized << " updates\n");
- DEBUG(if (NumLegalized < 32) for (const auto &U
- : reverse(BUI.Updates)) dbgs()
- << '\t' << U << "\n");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "About to apply " << NumLegalized << " updates\n");
+ LLVM_DEBUG(if (NumLegalized < 32) for (const auto &U
+ : reverse(BUI.Updates)) dbgs()
+ << '\t' << U << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
// If the DominatorTree was recalculated at some point, stop the batch
// updates. Full recalculations ignore batch updates and look at the actual
@@ -1201,7 +1212,7 @@ struct SemiNCAInfo {
// minimizes the amount of work needed done during incremental updates.
static void LegalizeUpdates(ArrayRef<UpdateT> AllUpdates,
SmallVectorImpl<UpdateT> &Result) {
- DEBUG(dbgs() << "Legalizing " << AllUpdates.size() << " updates\n");
+ LLVM_DEBUG(dbgs() << "Legalizing " << AllUpdates.size() << " updates\n");
// Count the total number of inserions of each edge.
// Each insertion adds 1 and deletion subtracts 1. The end number should be
// one of {-1 (deletion), 0 (NOP), +1 (insertion)}. Otherwise, the sequence
@@ -1251,7 +1262,7 @@ struct SemiNCAInfo {
static void ApplyNextUpdate(DomTreeT &DT, BatchUpdateInfo &BUI) {
assert(!BUI.Updates.empty() && "No updates to apply!");
UpdateT CurrentUpdate = BUI.Updates.pop_back_val();
- DEBUG(dbgs() << "Applying update: " << CurrentUpdate << "\n");
+ LLVM_DEBUG(dbgs() << "Applying update: " << CurrentUpdate << "\n");
// Move to the next snapshot of the CFG by removing the reverse-applied
// current update.
@@ -1530,8 +1541,8 @@ struct SemiNCAInfo {
const NodePtr BB = TN->getBlock();
if (!BB || TN->getChildren().empty()) continue;
- DEBUG(dbgs() << "Verifying parent property of node "
- << BlockNamePrinter(TN) << "\n");
+ LLVM_DEBUG(dbgs() << "Verifying parent property of node "
+ << BlockNamePrinter(TN) << "\n");
clear();
doFullDFSWalk(DT, [BB](NodePtr From, NodePtr To) {
return From != BB && To != BB;
diff --git a/include/llvm/Support/UnicodeCharRanges.h b/include/llvm/Support/UnicodeCharRanges.h
index 32fd17d5a140..3cf4a6d96602 100644
--- a/include/llvm/Support/UnicodeCharRanges.h
+++ b/include/llvm/Support/UnicodeCharRanges.h
@@ -77,17 +77,17 @@ private:
for (CharRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
if (I != Ranges.begin() && Prev >= I->Lower) {
- DEBUG(dbgs() << "Upper bound 0x");
- DEBUG(dbgs().write_hex(Prev));
- DEBUG(dbgs() << " should be less than succeeding lower bound 0x");
- DEBUG(dbgs().write_hex(I->Lower) << "\n");
+ LLVM_DEBUG(dbgs() << "Upper bound 0x");
+ LLVM_DEBUG(dbgs().write_hex(Prev));
+ LLVM_DEBUG(dbgs() << " should be less than succeeding lower bound 0x");
+ LLVM_DEBUG(dbgs().write_hex(I->Lower) << "\n");
return false;
}
if (I->Upper < I->Lower) {
- DEBUG(dbgs() << "Upper bound 0x");
- DEBUG(dbgs().write_hex(I->Lower));
- DEBUG(dbgs() << " should not be less than lower bound 0x");
- DEBUG(dbgs().write_hex(I->Upper) << "\n");
+ LLVM_DEBUG(dbgs() << "Upper bound 0x");
+ LLVM_DEBUG(dbgs().write_hex(I->Lower));
+ LLVM_DEBUG(dbgs() << " should not be less than lower bound 0x");
+ LLVM_DEBUG(dbgs().write_hex(I->Upper) << "\n");
return false;
}
Prev = I->Upper;
diff --git a/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
index 271e891bb45e..f860b4b86555 100644
--- a/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
+++ b/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -40,7 +40,7 @@ public:
/// in it.
void Add(Instruction *I) {
if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
- DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
Worklist.push_back(I);
}
}
@@ -57,7 +57,8 @@ public:
assert(Worklist.empty() && "Worklist must be empty to add initial group");
Worklist.reserve(List.size()+16);
WorklistMap.reserve(List.size());
- DEBUG(dbgs() << "IC: ADDING: " << List.size() << " instrs to worklist\n");
+ LLVM_DEBUG(dbgs() << "IC: ADDING: " << List.size()
+ << " instrs to worklist\n");
unsigned Idx = 0;
for (Instruction *I : reverse(List)) {
WorklistMap.insert(std::make_pair(I, Idx++));
diff --git a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
index 3c8bd1724e62..b7649ba88334 100644
--- a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
+++ b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -379,7 +379,7 @@ public:
Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
}
- DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n");
+ LLVM_DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n");
// If the client wants to know about all new instructions, tell it.
if (InsertedPHIs) InsertedPHIs->push_back(PHI);
diff --git a/lib/Analysis/BlockFrequencyInfoImpl.cpp b/lib/Analysis/BlockFrequencyInfoImpl.cpp
index 748736b60808..3d095068e7ff 100644
--- a/lib/Analysis/BlockFrequencyInfoImpl.cpp
+++ b/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -316,13 +316,13 @@ bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
#endif
if (isLoopHeader(Resolved)) {
- DEBUG(debugSuccessor("backedge"));
+ LLVM_DEBUG(debugSuccessor("backedge"));
Dist.addBackedge(Resolved, Weight);
return true;
}
if (Working[Resolved.Index].getContainingLoop() != OuterLoop) {
- DEBUG(debugSuccessor(" exit "));
+ LLVM_DEBUG(debugSuccessor(" exit "));
Dist.addExit(Resolved, Weight);
return true;
}
@@ -334,7 +334,7 @@ bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
"unhandled irreducible control flow");
// Irreducible backedge. Abort.
- DEBUG(debugSuccessor("abort!!!"));
+ LLVM_DEBUG(debugSuccessor("abort!!!"));
return false;
}
@@ -345,7 +345,7 @@ bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
"unhandled irreducible control flow");
}
- DEBUG(debugSuccessor(" local "));
+ LLVM_DEBUG(debugSuccessor(" local "));
Dist.addLocal(Resolved, Weight);
return true;
}
@@ -365,7 +365,7 @@ bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist(
/// Compute the loop scale for a loop.
void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
// Compute loop scale.
- DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n");
+ LLVM_DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n");
// Infinite loops need special handling. If we give the back edge an infinite
// mass, they may saturate all the other scales in the function down to 1,
@@ -391,20 +391,21 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
Loop.Scale =
ExitMass.isEmpty() ? InfiniteLoopScale : ExitMass.toScaled().inverse();
- DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
- << " - " << TotalBackedgeMass << ")\n"
- << " - scale = " << Loop.Scale << "\n");
+ LLVM_DEBUG(dbgs() << " - exit-mass = " << ExitMass << " ("
+ << BlockMass::getFull() << " - " << TotalBackedgeMass
+ << ")\n"
+ << " - scale = " << Loop.Scale << "\n");
}
/// Package up a loop.
void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
- DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n");
+ LLVM_DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n");
// Clear the subloop exits to prevent quadratic memory usage.
for (const BlockNode &M : Loop.Nodes) {
if (auto *Loop = Working[M.Index].getPackagedLoop())
Loop->Exits.clear();
- DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n");
+ LLVM_DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n");
}
Loop.IsPackaged = true;
}
@@ -426,7 +427,7 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
LoopData *OuterLoop,
Distribution &Dist) {
BlockMass Mass = Working[Source.Index].getMass();
- DEBUG(dbgs() << " => mass: " << Mass << "\n");
+ LLVM_DEBUG(dbgs() << " => mass: " << Mass << "\n");
// Distribute mass to successors as laid out in Dist.
DitheringDistributer D(Dist, Mass);
@@ -436,7 +437,7 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
BlockMass Taken = D.takeMass(W.Amount);
if (W.Type == Weight::Local) {
Working[W.TargetNode.Index].getMass() += Taken;
- DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
+ LLVM_DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
continue;
}
@@ -446,14 +447,14 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
// Check for a backedge.
if (W.Type == Weight::Backedge) {
OuterLoop->BackedgeMass[OuterLoop->getHeaderIndex(W.TargetNode)] += Taken;
- DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "back"));
+ LLVM_DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "back"));
continue;
}
// This must be an exit.
assert(W.Type == Weight::Exit);
OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken));
- DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "exit"));
+ LLVM_DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "exit"));
}
}
@@ -481,14 +482,14 @@ static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
}
// Translate the floats to integers.
- DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max
- << ", factor = " << ScalingFactor << "\n");
+ LLVM_DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max
+ << ", factor = " << ScalingFactor << "\n");
for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) {
Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor;
BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>());
- DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = "
- << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled
- << ", int = " << BFI.Freqs[Index].Integer << "\n");
+ LLVM_DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = "
+ << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled
+ << ", int = " << BFI.Freqs[Index].Integer << "\n");
}
}
@@ -497,12 +498,12 @@ static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
/// Visits all the members of a loop, adjusting their BlockData according to
/// the loop's pseudo-node.
static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) {
- DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop)
- << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale
- << "\n");
+ LLVM_DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop)
+ << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale
+ << "\n");
Loop.Scale *= Loop.Mass.toScaled();
Loop.IsPackaged = false;
- DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n");
+ LLVM_DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n");
// Propagate the head scale through the loop. Since members are visited in
// RPO, the head scale will be updated by the loop scale first, and then the
@@ -512,8 +513,8 @@ static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) {
Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale
: BFI.Freqs[N.Index].Scaled;
Scaled64 New = Loop.Scale * F;
- DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New
- << "\n");
+ LLVM_DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => "
+ << New << "\n");
F = New;
}
}
@@ -545,7 +546,7 @@ void BlockFrequencyInfoImplBase::finalizeMetrics() {
cleanup(*this);
// Print out the final stats.
- DEBUG(dump());
+ LLVM_DEBUG(dump());
}
BlockFrequency
@@ -695,7 +696,8 @@ static void findIrreducibleHeaders(
// This is an entry block.
I->second = true;
Headers.push_back(Irr.Node);
- DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n");
+ LLVM_DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node)
+ << "\n");
break;
}
}
@@ -726,7 +728,8 @@ static void findIrreducibleHeaders(
// Store the extra header.
Headers.push_back(Irr.Node);
- DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n");
+ LLVM_DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node)
+ << "\n");
break;
}
if (Headers.back() == Irr.Node)
@@ -735,7 +738,7 @@ static void findIrreducibleHeaders(
// This is not a header.
Others.push_back(Irr.Node);
- DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
+ LLVM_DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
}
llvm::sort(Headers.begin(), Headers.end());
llvm::sort(Others.begin(), Others.end());
@@ -746,7 +749,7 @@ static void createIrreducibleLoop(
LoopData *OuterLoop, std::list<LoopData>::iterator Insert,
const std::vector<const IrreducibleGraph::IrrNode *> &SCC) {
// Translate the SCC into RPO.
- DEBUG(dbgs() << " - found-scc\n");
+ LLVM_DEBUG(dbgs() << " - found-scc\n");
LoopData::NodeList Headers;
LoopData::NodeList Others;
@@ -807,27 +810,28 @@ void BlockFrequencyInfoImplBase::adjustLoopHeaderMass(LoopData &Loop) {
BlockMass LoopMass = BlockMass::getFull();
Distribution Dist;
- DEBUG(dbgs() << "adjust-loop-header-mass:\n");
+ LLVM_DEBUG(dbgs() << "adjust-loop-header-mass:\n");
for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
auto &HeaderNode = Loop.Nodes[H];
auto &BackedgeMass = Loop.BackedgeMass[Loop.getHeaderIndex(HeaderNode)];
- DEBUG(dbgs() << " - Add back edge mass for node "
- << getBlockName(HeaderNode) << ": " << BackedgeMass << "\n");
+ LLVM_DEBUG(dbgs() << " - Add back edge mass for node "
+ << getBlockName(HeaderNode) << ": " << BackedgeMass
+ << "\n");
if (BackedgeMass.getMass() > 0)
Dist.addLocal(HeaderNode, BackedgeMass.getMass());
else
- DEBUG(dbgs() << " Nothing added. Back edge mass is zero\n");
+ LLVM_DEBUG(dbgs() << " Nothing added. Back edge mass is zero\n");
}
DitheringDistributer D(Dist, LoopMass);
- DEBUG(dbgs() << " Distribute loop mass " << LoopMass
- << " to headers using above weights\n");
+ LLVM_DEBUG(dbgs() << " Distribute loop mass " << LoopMass
+ << " to headers using above weights\n");
for (const Weight &W : Dist.Weights) {
BlockMass Taken = D.takeMass(W.Amount);
assert(W.Type == Weight::Local && "all weights should be local");
Working[W.TargetNode.Index].getMass() = Taken;
- DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
+ LLVM_DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
}
}
@@ -838,6 +842,6 @@ void BlockFrequencyInfoImplBase::distributeIrrLoopHeaderMass(Distribution &Dist)
BlockMass Taken = D.takeMass(W.Amount);
assert(W.Type == Weight::Local && "all weights should be local");
Working[W.TargetNode.Index].getMass() = Taken;
- DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
+ LLVM_DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
}
}
diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp
index 4e8491355fdd..7299ef45ff51 100644
--- a/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/lib/Analysis/BranchProbabilityInfo.cpp
@@ -908,8 +908,9 @@ void BranchProbabilityInfo::setEdgeProbability(const BasicBlock *Src,
BranchProbability Prob) {
Probs[std::make_pair(Src, IndexInSuccessors)] = Prob;
Handles.insert(BasicBlockCallbackVH(Src, this));
- DEBUG(dbgs() << "set edge " << Src->getName() << " -> " << IndexInSuccessors
- << " successor probability to " << Prob << "\n");
+ LLVM_DEBUG(dbgs() << "set edge " << Src->getName() << " -> "
+ << IndexInSuccessors << " successor probability to " << Prob
+ << "\n");
}
raw_ostream &
@@ -934,8 +935,8 @@ void BranchProbabilityInfo::eraseBlock(const BasicBlock *BB) {
void BranchProbabilityInfo::calculate(const Function &F, const LoopInfo &LI,
const TargetLibraryInfo *TLI) {
- DEBUG(dbgs() << "---- Branch Probability Info : " << F.getName()
- << " ----\n\n");
+ LLVM_DEBUG(dbgs() << "---- Branch Probability Info : " << F.getName()
+ << " ----\n\n");
LastF = &F; // Store the last function we ran on for printing.
assert(PostDominatedByUnreachable.empty());
assert(PostDominatedByColdCall.empty());
@@ -953,18 +954,19 @@ void BranchProbabilityInfo::calculate(const Function &F, const LoopInfo &LI,
if (Scc.size() == 1)
continue;
- DEBUG(dbgs() << "BPI: SCC " << SccNum << ":");
+ LLVM_DEBUG(dbgs() << "BPI: SCC " << SccNum << ":");
for (auto *BB : Scc) {
- DEBUG(dbgs() << " " << BB->getName());
+ LLVM_DEBUG(dbgs() << " " << BB->getName());
SccI.SccNums[BB] = SccNum;
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
}
// Walk the basic blocks in post-order so that we can build up state about
// the successors of a block iteratively.
for (auto BB : post_order(&F.getEntryBlock())) {
- DEBUG(dbgs() << "Computing probabilities for " << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Computing probabilities for " << BB->getName()
+ << "\n");
updatePostDominatedByUnreachable(BB);
updatePostDominatedByColdCall(BB);
// If there is no at least two successors, no sense to set probability.
diff --git a/lib/Analysis/CFLAndersAliasAnalysis.cpp b/lib/Analysis/CFLAndersAliasAnalysis.cpp
index cdb7ca9fb696..ef7e95ffb1fe 100644
--- a/lib/Analysis/CFLAndersAliasAnalysis.cpp
+++ b/lib/Analysis/CFLAndersAliasAnalysis.cpp
@@ -855,8 +855,9 @@ AliasResult CFLAndersAAResult::query(const MemoryLocation &LocA,
if (!Fn) {
// The only times this is known to happen are when globals + InlineAsm are
// involved
- DEBUG(dbgs()
- << "CFLAndersAA: could not extract parent function information.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "CFLAndersAA: could not extract parent function information.\n");
return MayAlias;
}
} else {
diff --git a/lib/Analysis/CFLSteensAliasAnalysis.cpp b/lib/Analysis/CFLSteensAliasAnalysis.cpp
index eee6d26ba787..30ce13578e54 100644
--- a/lib/Analysis/CFLSteensAliasAnalysis.cpp
+++ b/lib/Analysis/CFLSteensAliasAnalysis.cpp
@@ -276,8 +276,9 @@ AliasResult CFLSteensAAResult::query(const MemoryLocation &LocA,
if (!MaybeFnA && !MaybeFnB) {
// The only times this is known to happen are when globals + InlineAsm are
// involved
- DEBUG(dbgs()
- << "CFLSteensAA: could not extract parent function information.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "CFLSteensAA: could not extract parent function information.\n");
return MayAlias;
}
diff --git a/lib/Analysis/CGSCCPassManager.cpp b/lib/Analysis/CGSCCPassManager.cpp
index 62e3dfc8b060..b325afb8e7c5 100644
--- a/lib/Analysis/CGSCCPassManager.cpp
+++ b/lib/Analysis/CGSCCPassManager.cpp
@@ -75,7 +75,7 @@ PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
// If the CGSCC pass wasn't able to provide a valid updated SCC, the
// current SCC may simply need to be skipped if invalid.
if (UR.InvalidatedSCCs.count(C)) {
- DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
+ LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
break;
}
// Check that we didn't miss any update scenario.
@@ -353,7 +353,8 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
// Add the current SCC to the worklist as its shape has changed.
UR.CWorklist.insert(C);
- DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist:" << *C << "\n");
+ LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist:" << *C
+ << "\n");
SCC *OldC = C;
@@ -389,7 +390,7 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
assert(C != &NewC && "No need to re-visit the current SCC!");
assert(OldC != &NewC && "Already handled the original SCC!");
UR.CWorklist.insert(&NewC);
- DEBUG(dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n");
+ LLVM_DEBUG(dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n");
// Ensure new SCCs' function analyses are updated.
if (NeedFAMProxy)
@@ -514,8 +515,8 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
return false;
RC->removeOutgoingEdge(N, *TargetN);
- DEBUG(dbgs() << "Deleting outgoing edge from '" << N
- << "' to '" << TargetN << "'\n");
+ LLVM_DEBUG(dbgs() << "Deleting outgoing edge from '"
+ << N << "' to '" << TargetN << "'\n");
return true;
}),
DeadTargets.end());
@@ -546,8 +547,8 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
assert(NewRC != RC && "Should not encounter the current RefSCC further "
"in the postorder list of new RefSCCs.");
UR.RCWorklist.insert(NewRC);
- DEBUG(dbgs() << "Enqueuing a new RefSCC in the update worklist: "
- << *NewRC << "\n");
+ LLVM_DEBUG(dbgs() << "Enqueuing a new RefSCC in the update worklist: "
+ << *NewRC << "\n");
}
}
@@ -564,8 +565,8 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
assert(RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!");
RC->switchOutgoingEdgeToRef(N, *RefTarget);
- DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << N
- << "' to '" << *RefTarget << "'\n");
+ LLVM_DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << N
+ << "' to '" << *RefTarget << "'\n");
continue;
}
@@ -593,12 +594,12 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
assert(RC->isAncestorOf(TargetRC) &&
"Cannot potentially form RefSCC cycles here!");
RC->switchOutgoingEdgeToCall(N, *CallTarget);
- DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << N
- << "' to '" << *CallTarget << "'\n");
+ LLVM_DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << N
+ << "' to '" << *CallTarget << "'\n");
continue;
}
- DEBUG(dbgs() << "Switch an internal ref edge to a call edge from '" << N
- << "' to '" << *CallTarget << "'\n");
+ LLVM_DEBUG(dbgs() << "Switch an internal ref edge to a call edge from '"
+ << N << "' to '" << *CallTarget << "'\n");
// Otherwise we are switching an internal ref edge to a call edge. This
// may merge away some SCCs, and we add those to the UpdateResult. We also
@@ -661,14 +662,14 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
// post-order sequence, and may end up observing more precise context to
// optimize the current SCC.
UR.CWorklist.insert(C);
- DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist: " << *C
- << "\n");
+ LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist: " << *C
+ << "\n");
// Enqueue in reverse order as we pop off the back of the worklist.
for (SCC &MovedC : llvm::reverse(make_range(RC->begin() + InitialSCCIndex,
RC->begin() + NewSCCIndex))) {
UR.CWorklist.insert(&MovedC);
- DEBUG(dbgs() << "Enqueuing a newly earlier in post-order SCC: "
- << MovedC << "\n");
+ LLVM_DEBUG(dbgs() << "Enqueuing a newly earlier in post-order SCC: "
+ << MovedC << "\n");
}
}
}
diff --git a/lib/Analysis/CallGraphSCCPass.cpp b/lib/Analysis/CallGraphSCCPass.cpp
index b1b3b79b4078..4bb181e352d8 100644
--- a/lib/Analysis/CallGraphSCCPass.cpp
+++ b/lib/Analysis/CallGraphSCCPass.cpp
@@ -162,8 +162,8 @@ bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
// The function pass(es) modified the IR, they may have clobbered the
// callgraph.
if (Changed && CallGraphUpToDate) {
- DEBUG(dbgs() << "CGSCCPASSMGR: Pass Dirtied SCC: "
- << P->getPassName() << '\n');
+ LLVM_DEBUG(dbgs() << "CGSCCPASSMGR: Pass Dirtied SCC: " << P->getPassName()
+ << '\n');
CallGraphUpToDate = false;
}
return Changed;
@@ -181,12 +181,11 @@ bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
bool CheckingMode) {
DenseMap<Value*, CallGraphNode*> CallSites;
-
- DEBUG(dbgs() << "CGSCCPASSMGR: Refreshing SCC with " << CurSCC.size()
- << " nodes:\n";
- for (CallGraphNode *CGN : CurSCC)
- CGN->dump();
- );
+
+ LLVM_DEBUG(dbgs() << "CGSCCPASSMGR: Refreshing SCC with " << CurSCC.size()
+ << " nodes:\n";
+ for (CallGraphNode *CGN
+ : CurSCC) CGN->dump(););
bool MadeChange = false;
bool DevirtualizedCall = false;
@@ -307,8 +306,8 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
// one.
if (!ExistingNode->getFunction()) {
DevirtualizedCall = true;
- DEBUG(dbgs() << " CGSCCPASSMGR: Devirtualized call to '"
- << Callee->getName() << "'\n");
+ LLVM_DEBUG(dbgs() << " CGSCCPASSMGR: Devirtualized call to '"
+ << Callee->getName() << "'\n");
}
} else {
CalleeNode = CG.getCallsExternalNode();
@@ -363,17 +362,15 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
CallSites.clear();
}
- DEBUG(if (MadeChange) {
- dbgs() << "CGSCCPASSMGR: Refreshed SCC is now:\n";
- for (CallGraphNode *CGN : CurSCC)
- CGN->dump();
- if (DevirtualizedCall)
- dbgs() << "CGSCCPASSMGR: Refresh devirtualized a call!\n";
-
- } else {
- dbgs() << "CGSCCPASSMGR: SCC Refresh didn't change call graph.\n";
- }
- );
+ LLVM_DEBUG(if (MadeChange) {
+ dbgs() << "CGSCCPASSMGR: Refreshed SCC is now:\n";
+ for (CallGraphNode *CGN : CurSCC)
+ CGN->dump();
+ if (DevirtualizedCall)
+ dbgs() << "CGSCCPASSMGR: Refresh devirtualized a call!\n";
+ } else {
+ dbgs() << "CGSCCPASSMGR: SCC Refresh didn't change call graph.\n";
+ });
(void)MadeChange;
return DevirtualizedCall;
@@ -472,16 +469,17 @@ bool CGPassManager::runOnModule(Module &M) {
unsigned Iteration = 0;
bool DevirtualizedCall = false;
do {
- DEBUG(if (Iteration)
- dbgs() << " SCCPASSMGR: Re-visiting SCC, iteration #"
- << Iteration << '\n');
+ LLVM_DEBUG(if (Iteration) dbgs()
+ << " SCCPASSMGR: Re-visiting SCC, iteration #" << Iteration
+ << '\n');
DevirtualizedCall = false;
Changed |= RunAllPassesOnSCC(CurSCC, CG, DevirtualizedCall);
} while (Iteration++ < MaxIterations && DevirtualizedCall);
if (DevirtualizedCall)
- DEBUG(dbgs() << " CGSCCPASSMGR: Stopped iteration after " << Iteration
- << " times, due to -max-cg-scc-iterations\n");
+ LLVM_DEBUG(dbgs() << " CGSCCPASSMGR: Stopped iteration after "
+ << Iteration
+ << " times, due to -max-cg-scc-iterations\n");
MaxSCCIterations.updateMax(Iteration);
}
diff --git a/lib/Analysis/CodeMetrics.cpp b/lib/Analysis/CodeMetrics.cpp
index ac7d14ebdaea..46cc87d2b178 100644
--- a/lib/Analysis/CodeMetrics.cpp
+++ b/lib/Analysis/CodeMetrics.cpp
@@ -61,7 +61,7 @@ static void completeEphemeralValues(SmallPtrSetImpl<const Value *> &Visited,
continue;
EphValues.insert(V);
- DEBUG(dbgs() << "Ephemeral Value: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "Ephemeral Value: " << *V << "\n");
// Append any more operands to consider.
appendSpeculatableOperands(V, Visited, Worklist);
diff --git a/lib/Analysis/DemandedBits.cpp b/lib/Analysis/DemandedBits.cpp
index de7d21f9f133..58c5bccff65d 100644
--- a/lib/Analysis/DemandedBits.cpp
+++ b/lib/Analysis/DemandedBits.cpp
@@ -283,7 +283,7 @@ void DemandedBits::performAnalysis() {
if (!isAlwaysLive(&I))
continue;
- DEBUG(dbgs() << "DemandedBits: Root: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "DemandedBits: Root: " << I << "\n");
// For integer-valued instructions, set up an initial empty set of alive
// bits and add the instruction to the work list. For other instructions
// add their operands to the work list (for integer values operands, mark
@@ -313,13 +313,13 @@ void DemandedBits::performAnalysis() {
while (!Worklist.empty()) {
Instruction *UserI = Worklist.pop_back_val();
- DEBUG(dbgs() << "DemandedBits: Visiting: " << *UserI);
+ LLVM_DEBUG(dbgs() << "DemandedBits: Visiting: " << *UserI);
APInt AOut;
if (UserI->getType()->isIntegerTy()) {
AOut = AliveBits[UserI];
- DEBUG(dbgs() << " Alive Out: " << AOut);
+ LLVM_DEBUG(dbgs() << " Alive Out: " << AOut);
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
if (!UserI->getType()->isIntegerTy())
Visited.insert(UserI);
diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp
index aa4c3f21758e..252341721f1d 100644
--- a/lib/Analysis/DependenceAnalysis.cpp
+++ b/lib/Analysis/DependenceAnalysis.cpp
@@ -415,9 +415,9 @@ LLVM_DUMP_METHOD void DependenceInfo::Constraint::dump(raw_ostream &OS) const {
// PLDI 1991
bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
++DeltaApplications;
- DEBUG(dbgs() << "\tintersect constraints\n");
- DEBUG(dbgs() << "\t X ="; X->dump(dbgs()));
- DEBUG(dbgs() << "\t Y ="; Y->dump(dbgs()));
+ LLVM_DEBUG(dbgs() << "\tintersect constraints\n");
+ LLVM_DEBUG(dbgs() << "\t X ="; X->dump(dbgs()));
+ LLVM_DEBUG(dbgs() << "\t Y ="; Y->dump(dbgs()));
assert(!Y->isPoint() && "Y must not be a Point");
if (X->isAny()) {
if (Y->isAny())
@@ -433,7 +433,7 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
}
if (X->isDistance() && Y->isDistance()) {
- DEBUG(dbgs() << "\t intersect 2 distances\n");
+ LLVM_DEBUG(dbgs() << "\t intersect 2 distances\n");
if (isKnownPredicate(CmpInst::ICMP_EQ, X->getD(), Y->getD()))
return false;
if (isKnownPredicate(CmpInst::ICMP_NE, X->getD(), Y->getD())) {
@@ -460,12 +460,12 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
"We shouldn't ever see X->isPoint() && Y->isPoint()");
if (X->isLine() && Y->isLine()) {
- DEBUG(dbgs() << "\t intersect 2 lines\n");
+ LLVM_DEBUG(dbgs() << "\t intersect 2 lines\n");
const SCEV *Prod1 = SE->getMulExpr(X->getA(), Y->getB());
const SCEV *Prod2 = SE->getMulExpr(X->getB(), Y->getA());
if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2)) {
// slopes are equal, so lines are parallel
- DEBUG(dbgs() << "\t\tsame slope\n");
+ LLVM_DEBUG(dbgs() << "\t\tsame slope\n");
Prod1 = SE->getMulExpr(X->getC(), Y->getB());
Prod2 = SE->getMulExpr(X->getB(), Y->getC());
if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2))
@@ -479,7 +479,7 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
}
if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) {
// slopes differ, so lines intersect
- DEBUG(dbgs() << "\t\tdifferent slopes\n");
+ LLVM_DEBUG(dbgs() << "\t\tdifferent slopes\n");
const SCEV *C1B2 = SE->getMulExpr(X->getC(), Y->getB());
const SCEV *C1A2 = SE->getMulExpr(X->getC(), Y->getA());
const SCEV *C2B1 = SE->getMulExpr(Y->getC(), X->getB());
@@ -501,10 +501,10 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
APInt Xbot = A1B2_A2B1->getAPInt();
APInt Ytop = C1A2_C2A1->getAPInt();
APInt Ybot = A2B1_A1B2->getAPInt();
- DEBUG(dbgs() << "\t\tXtop = " << Xtop << "\n");
- DEBUG(dbgs() << "\t\tXbot = " << Xbot << "\n");
- DEBUG(dbgs() << "\t\tYtop = " << Ytop << "\n");
- DEBUG(dbgs() << "\t\tYbot = " << Ybot << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tXtop = " << Xtop << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tXbot = " << Xbot << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tYtop = " << Ytop << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tYbot = " << Ybot << "\n");
APInt Xq = Xtop; // these need to be initialized, even
APInt Xr = Xtop; // though they're just going to be overwritten
APInt::sdivrem(Xtop, Xbot, Xq, Xr);
@@ -516,7 +516,7 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
++DeltaSuccesses;
return true;
}
- DEBUG(dbgs() << "\t\tX = " << Xq << ", Y = " << Yq << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tX = " << Xq << ", Y = " << Yq << "\n");
if (Xq.slt(0) || Yq.slt(0)) {
X->setEmpty();
++DeltaSuccesses;
@@ -525,7 +525,7 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
if (const SCEVConstant *CUB =
collectConstantUpperBound(X->getAssociatedLoop(), Prod1->getType())) {
const APInt &UpperBound = CUB->getAPInt();
- DEBUG(dbgs() << "\t\tupper bound = " << UpperBound << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tupper bound = " << UpperBound << "\n");
if (Xq.sgt(UpperBound) || Yq.sgt(UpperBound)) {
X->setEmpty();
++DeltaSuccesses;
@@ -545,7 +545,7 @@ bool DependenceInfo::intersectConstraints(Constraint *X, const Constraint *Y) {
assert(!(X->isLine() && Y->isPoint()) && "This case should never occur");
if (X->isPoint() && Y->isLine()) {
- DEBUG(dbgs() << "\t intersect Point and Line\n");
+ LLVM_DEBUG(dbgs() << "\t intersect Point and Line\n");
const SCEV *A1X1 = SE->getMulExpr(Y->getA(), X->getX());
const SCEV *B1Y1 = SE->getMulExpr(Y->getB(), X->getY());
const SCEV *Sum = SE->getAddExpr(A1X1, B1Y1);
@@ -1033,19 +1033,19 @@ const SCEVConstant *DependenceInfo::collectConstantUpperBound(const Loop *L,
// Return true if dependence disproved.
bool DependenceInfo::testZIV(const SCEV *Src, const SCEV *Dst,
FullDependence &Result) const {
- DEBUG(dbgs() << " src = " << *Src << "\n");
- DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << " src = " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << " dst = " << *Dst << "\n");
++ZIVapplications;
if (isKnownPredicate(CmpInst::ICMP_EQ, Src, Dst)) {
- DEBUG(dbgs() << " provably dependent\n");
+ LLVM_DEBUG(dbgs() << " provably dependent\n");
return false; // provably dependent
}
if (isKnownPredicate(CmpInst::ICMP_NE, Src, Dst)) {
- DEBUG(dbgs() << " provably independent\n");
+ LLVM_DEBUG(dbgs() << " provably independent\n");
++ZIVindependence;
return true; // provably independent
}
- DEBUG(dbgs() << " possibly dependent\n");
+ LLVM_DEBUG(dbgs() << " possibly dependent\n");
Result.Consistent = false;
return false; // possibly dependent
}
@@ -1082,25 +1082,25 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
const SCEV *DstConst, const Loop *CurLoop,
unsigned Level, FullDependence &Result,
Constraint &NewConstraint) const {
- DEBUG(dbgs() << "\tStrong SIV test\n");
- DEBUG(dbgs() << "\t Coeff = " << *Coeff);
- DEBUG(dbgs() << ", " << *Coeff->getType() << "\n");
- DEBUG(dbgs() << "\t SrcConst = " << *SrcConst);
- DEBUG(dbgs() << ", " << *SrcConst->getType() << "\n");
- DEBUG(dbgs() << "\t DstConst = " << *DstConst);
- DEBUG(dbgs() << ", " << *DstConst->getType() << "\n");
+ LLVM_DEBUG(dbgs() << "\tStrong SIV test\n");
+ LLVM_DEBUG(dbgs() << "\t Coeff = " << *Coeff);
+ LLVM_DEBUG(dbgs() << ", " << *Coeff->getType() << "\n");
+ LLVM_DEBUG(dbgs() << "\t SrcConst = " << *SrcConst);
+ LLVM_DEBUG(dbgs() << ", " << *SrcConst->getType() << "\n");
+ LLVM_DEBUG(dbgs() << "\t DstConst = " << *DstConst);
+ LLVM_DEBUG(dbgs() << ", " << *DstConst->getType() << "\n");
++StrongSIVapplications;
assert(0 < Level && Level <= CommonLevels && "level out of range");
Level--;
const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst);
- DEBUG(dbgs() << "\t Delta = " << *Delta);
- DEBUG(dbgs() << ", " << *Delta->getType() << "\n");
+ LLVM_DEBUG(dbgs() << "\t Delta = " << *Delta);
+ LLVM_DEBUG(dbgs() << ", " << *Delta->getType() << "\n");
// check that |Delta| < iteration count
if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
- DEBUG(dbgs() << "\t UpperBound = " << *UpperBound);
- DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n");
+ LLVM_DEBUG(dbgs() << "\t UpperBound = " << *UpperBound);
+ LLVM_DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n");
const SCEV *AbsDelta =
SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta);
const SCEV *AbsCoeff =
@@ -1121,8 +1121,8 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
APInt Distance = ConstDelta; // these need to be initialized
APInt Remainder = ConstDelta;
APInt::sdivrem(ConstDelta, ConstCoeff, Distance, Remainder);
- DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
- DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ LLVM_DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
+ LLVM_DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
// Make sure Coeff divides Delta exactly
if (Remainder != 0) {
// Coeff doesn't divide Distance, no dependence
@@ -1149,7 +1149,7 @@ bool DependenceInfo::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst,
}
else {
if (Coeff->isOne()) {
- DEBUG(dbgs() << "\t Distance = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << "\t Distance = " << *Delta << "\n");
Result.DV[Level].Distance = Delta; // since X/1 == X
NewConstraint.setDistance(Delta, CurLoop);
}
@@ -1218,16 +1218,16 @@ bool DependenceInfo::weakCrossingSIVtest(
const SCEV *Coeff, const SCEV *SrcConst, const SCEV *DstConst,
const Loop *CurLoop, unsigned Level, FullDependence &Result,
Constraint &NewConstraint, const SCEV *&SplitIter) const {
- DEBUG(dbgs() << "\tWeak-Crossing SIV test\n");
- DEBUG(dbgs() << "\t Coeff = " << *Coeff << "\n");
- DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
- DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ LLVM_DEBUG(dbgs() << "\tWeak-Crossing SIV test\n");
+ LLVM_DEBUG(dbgs() << "\t Coeff = " << *Coeff << "\n");
+ LLVM_DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ LLVM_DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
++WeakCrossingSIVapplications;
assert(0 < Level && Level <= CommonLevels && "Level out of range");
Level--;
Result.Consistent = false;
const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
- DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
NewConstraint.setLine(Coeff, Coeff, Delta, CurLoop);
if (Delta->isZero()) {
Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT);
@@ -1257,7 +1257,7 @@ bool DependenceInfo::weakCrossingSIVtest(
SplitIter = SE->getUDivExpr(
SE->getSMaxExpr(SE->getZero(Delta->getType()), Delta),
SE->getMulExpr(SE->getConstant(Delta->getType(), 2), ConstCoeff));
- DEBUG(dbgs() << "\t Split iter = " << *SplitIter << "\n");
+ LLVM_DEBUG(dbgs() << "\t Split iter = " << *SplitIter << "\n");
const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
if (!ConstDelta)
@@ -1265,8 +1265,8 @@ bool DependenceInfo::weakCrossingSIVtest(
// We're certain that ConstCoeff > 0; therefore,
// if Delta < 0, then no dependence.
- DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
- DEBUG(dbgs() << "\t ConstCoeff = " << *ConstCoeff << "\n");
+ LLVM_DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << "\t ConstCoeff = " << *ConstCoeff << "\n");
if (SE->isKnownNegative(Delta)) {
// No dependence, Delta < 0
++WeakCrossingSIVindependence;
@@ -1277,11 +1277,11 @@ bool DependenceInfo::weakCrossingSIVtest(
// We're certain that Delta > 0 and ConstCoeff > 0.
// Check Delta/(2*ConstCoeff) against upper loop bound
if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
- DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ LLVM_DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
const SCEV *ConstantTwo = SE->getConstant(UpperBound->getType(), 2);
const SCEV *ML = SE->getMulExpr(SE->getMulExpr(ConstCoeff, UpperBound),
ConstantTwo);
- DEBUG(dbgs() << "\t ML = " << *ML << "\n");
+ LLVM_DEBUG(dbgs() << "\t ML = " << *ML << "\n");
if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, ML)) {
// Delta too big, no dependence
++WeakCrossingSIVindependence;
@@ -1309,19 +1309,19 @@ bool DependenceInfo::weakCrossingSIVtest(
APInt Distance = APDelta; // these need to be initialzed
APInt Remainder = APDelta;
APInt::sdivrem(APDelta, APCoeff, Distance, Remainder);
- DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ LLVM_DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
if (Remainder != 0) {
// Coeff doesn't divide Delta, no dependence
++WeakCrossingSIVindependence;
++WeakCrossingSIVsuccesses;
return true;
}
- DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
+ LLVM_DEBUG(dbgs() << "\t Distance = " << Distance << "\n");
// if 2*Coeff doesn't divide Delta, then the equal direction isn't possible
APInt Two = APInt(Distance.getBitWidth(), 2, true);
Remainder = Distance.srem(Two);
- DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
+ LLVM_DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n");
if (Remainder != 0) {
// Equal direction isn't possible
Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::EQ);
@@ -1357,7 +1357,7 @@ static bool findGCD(unsigned Bits, const APInt &AM, const APInt &BM,
APInt::sdivrem(G0, G1, Q, R);
}
G = G1;
- DEBUG(dbgs() << "\t GCD = " << G << "\n");
+ LLVM_DEBUG(dbgs() << "\t GCD = " << G << "\n");
X = AM.slt(0) ? -A1 : A1;
Y = BM.slt(0) ? B1 : -B1;
@@ -1430,17 +1430,17 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
const Loop *CurLoop, unsigned Level,
FullDependence &Result,
Constraint &NewConstraint) const {
- DEBUG(dbgs() << "\tExact SIV test\n");
- DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
- DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
- DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
- DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ LLVM_DEBUG(dbgs() << "\tExact SIV test\n");
+ LLVM_DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
+ LLVM_DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
+ LLVM_DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ LLVM_DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
++ExactSIVapplications;
assert(0 < Level && Level <= CommonLevels && "Level out of range");
Level--;
Result.Consistent = false;
const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
- DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff),
Delta, CurLoop);
const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
@@ -1461,7 +1461,7 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
return true;
}
- DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
+ LLVM_DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
// since SCEV construction normalizes, LM = 0
APInt UM(Bits, 1, true);
@@ -1470,7 +1470,7 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
if (const SCEVConstant *CUB =
collectConstantUpperBound(CurLoop, Delta->getType())) {
UM = CUB->getAPInt();
- DEBUG(dbgs() << "\t UM = " << UM << "\n");
+ LLVM_DEBUG(dbgs() << "\t UM = " << UM << "\n");
UMvalid = true;
}
@@ -1481,18 +1481,18 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
APInt TMUL = BM.sdiv(G);
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
if (UMvalid) {
TU = minAPInt(TU, floorOfQuotient(UM - X, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
}
}
else {
TU = minAPInt(TU, floorOfQuotient(-X, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
if (UMvalid) {
TL = maxAPInt(TL, ceilingOfQuotient(UM - X, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
}
}
@@ -1500,18 +1500,18 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
TMUL = AM.sdiv(G);
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
if (UMvalid) {
TU = minAPInt(TU, floorOfQuotient(UM - Y, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
}
}
else {
TU = minAPInt(TU, floorOfQuotient(-Y, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
if (UMvalid) {
TL = maxAPInt(TL, ceilingOfQuotient(UM - Y, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
}
}
if (TL.sgt(TU)) {
@@ -1526,15 +1526,15 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
// less than
APInt SaveTU(TU); // save these
APInt SaveTL(TL);
- DEBUG(dbgs() << "\t exploring LT direction\n");
+ LLVM_DEBUG(dbgs() << "\t exploring LT direction\n");
TMUL = AM - BM;
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(X - Y + 1, TMUL));
- DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
}
else {
TU = minAPInt(TU, floorOfQuotient(X - Y + 1, TMUL));
- DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
}
if (TL.sle(TU)) {
NewDirection |= Dependence::DVEntry::LT;
@@ -1544,23 +1544,23 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
// equal
TU = SaveTU; // restore
TL = SaveTL;
- DEBUG(dbgs() << "\t exploring EQ direction\n");
+ LLVM_DEBUG(dbgs() << "\t exploring EQ direction\n");
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(X - Y, TMUL));
- DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
}
else {
TU = minAPInt(TU, floorOfQuotient(X - Y, TMUL));
- DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
}
TMUL = BM - AM;
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(Y - X, TMUL));
- DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
}
else {
TU = minAPInt(TU, floorOfQuotient(Y - X, TMUL));
- DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
}
if (TL.sle(TU)) {
NewDirection |= Dependence::DVEntry::EQ;
@@ -1570,14 +1570,14 @@ bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
// greater than
TU = SaveTU; // restore
TL = SaveTL;
- DEBUG(dbgs() << "\t exploring GT direction\n");
+ LLVM_DEBUG(dbgs() << "\t exploring GT direction\n");
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(Y - X + 1, TMUL));
- DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TL = " << TL << "\n");
}
else {
TU = minAPInt(TU, floorOfQuotient(Y - X + 1, TMUL));
- DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t TU = " << TU << "\n");
}
if (TL.sle(TU)) {
NewDirection |= Dependence::DVEntry::GT;
@@ -1643,10 +1643,10 @@ bool DependenceInfo::weakZeroSrcSIVtest(const SCEV *DstCoeff,
// For the WeakSIV test, it's possible the loop isn't common to
// the Src and Dst loops. If it isn't, then there's no need to
// record a direction.
- DEBUG(dbgs() << "\tWeak-Zero (src) SIV test\n");
- DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << "\n");
- DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
- DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ LLVM_DEBUG(dbgs() << "\tWeak-Zero (src) SIV test\n");
+ LLVM_DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << "\n");
+ LLVM_DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ LLVM_DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
++WeakZeroSIVapplications;
assert(0 < Level && Level <= MaxLevels && "Level out of range");
Level--;
@@ -1654,7 +1654,7 @@ bool DependenceInfo::weakZeroSrcSIVtest(const SCEV *DstCoeff,
const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst);
NewConstraint.setLine(SE->getZero(Delta->getType()), DstCoeff, Delta,
CurLoop);
- DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
if (isKnownPredicate(CmpInst::ICMP_EQ, SrcConst, DstConst)) {
if (Level < CommonLevels) {
Result.DV[Level].Direction &= Dependence::DVEntry::LE;
@@ -1675,7 +1675,7 @@ bool DependenceInfo::weakZeroSrcSIVtest(const SCEV *DstCoeff,
// check that Delta/SrcCoeff < iteration count
// really check NewDelta < count*AbsCoeff
if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
- DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ LLVM_DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound);
if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) {
++WeakZeroSIVindependence;
@@ -1752,10 +1752,10 @@ bool DependenceInfo::weakZeroDstSIVtest(const SCEV *SrcCoeff,
Constraint &NewConstraint) const {
// For the WeakSIV test, it's possible the loop isn't common to the
// Src and Dst loops. If it isn't, then there's no need to record a direction.
- DEBUG(dbgs() << "\tWeak-Zero (dst) SIV test\n");
- DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << "\n");
- DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
- DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ LLVM_DEBUG(dbgs() << "\tWeak-Zero (dst) SIV test\n");
+ LLVM_DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << "\n");
+ LLVM_DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ LLVM_DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
++WeakZeroSIVapplications;
assert(0 < Level && Level <= SrcLevels && "Level out of range");
Level--;
@@ -1763,7 +1763,7 @@ bool DependenceInfo::weakZeroDstSIVtest(const SCEV *SrcCoeff,
const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
NewConstraint.setLine(SrcCoeff, SE->getZero(Delta->getType()), Delta,
CurLoop);
- DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
if (isKnownPredicate(CmpInst::ICMP_EQ, DstConst, SrcConst)) {
if (Level < CommonLevels) {
Result.DV[Level].Direction &= Dependence::DVEntry::LE;
@@ -1784,7 +1784,7 @@ bool DependenceInfo::weakZeroDstSIVtest(const SCEV *SrcCoeff,
// check that Delta/SrcCoeff < iteration count
// really check NewDelta < count*AbsCoeff
if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) {
- DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
+ LLVM_DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n");
const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound);
if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) {
++WeakZeroSIVindependence;
@@ -1833,15 +1833,15 @@ bool DependenceInfo::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
const SCEV *SrcConst, const SCEV *DstConst,
const Loop *SrcLoop, const Loop *DstLoop,
FullDependence &Result) const {
- DEBUG(dbgs() << "\tExact RDIV test\n");
- DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
- DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
- DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
- DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
+ LLVM_DEBUG(dbgs() << "\tExact RDIV test\n");
+ LLVM_DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n");
+ LLVM_DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n");
+ LLVM_DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n");
+ LLVM_DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n");
++ExactRDIVapplications;
Result.Consistent = false;
const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
- DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << "\t Delta = " << *Delta << "\n");
const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
@@ -1859,7 +1859,7 @@ bool DependenceInfo::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
return true;
}
- DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
+ LLVM_DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n");
// since SCEV construction seems to normalize, LM = 0
APInt SrcUM(Bits, 1, true);
@@ -1868,7 +1868,7 @@ bool DependenceInfo::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
if (const SCEVConstant *UpperBound =
collectConstantUpperBound(SrcLoop, Delta->getType())) {
SrcUM = UpperBound->getAPInt();
- DEBUG(dbgs() << "\t SrcUM = " << SrcUM << "\n");
+ LLVM_DEBUG(dbgs() << "\t SrcUM = " << SrcUM << "\n");
SrcUMvalid = true;
}
@@ -1878,7 +1878,7 @@ bool DependenceInfo::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
if (const SCEVConstant *UpperBound =
collectConstantUpperBound(DstLoop, Delta->getType())) {
DstUM = UpperBound->getAPInt();
- DEBUG(dbgs() << "\t DstUM = " << DstUM << "\n");
+ LLVM_DEBUG(dbgs() << "\t DstUM = " << DstUM << "\n");
DstUMvalid = true;
}
@@ -1889,18 +1889,18 @@ bool DependenceInfo::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
APInt TMUL = BM.sdiv(G);
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
if (SrcUMvalid) {
TU = minAPInt(TU, floorOfQuotient(SrcUM - X, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
}
}
else {
TU = minAPInt(TU, floorOfQuotient(-X, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
if (SrcUMvalid) {
TL = maxAPInt(TL, ceilingOfQuotient(SrcUM - X, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
}
}
@@ -1908,18 +1908,18 @@ bool DependenceInfo::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
TMUL = AM.sdiv(G);
if (TMUL.sgt(0)) {
TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
if (DstUMvalid) {
TU = minAPInt(TU, floorOfQuotient(DstUM - Y, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
}
}
else {
TU = minAPInt(TU, floorOfQuotient(-Y, TMUL));
- DEBUG(dbgs() << "\t TU = " << TU << "\n");
+ LLVM_DEBUG(dbgs() << "\t TU = " << TU << "\n");
if (DstUMvalid) {
TL = maxAPInt(TL, ceilingOfQuotient(DstUM - Y, TMUL));
- DEBUG(dbgs() << "\t TL = " << TL << "\n");
+ LLVM_DEBUG(dbgs() << "\t TL = " << TL << "\n");
}
}
if (TL.sgt(TU))
@@ -1975,27 +1975,27 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
const Loop *Loop1,
const Loop *Loop2) const {
++SymbolicRDIVapplications;
- DEBUG(dbgs() << "\ttry symbolic RDIV test\n");
- DEBUG(dbgs() << "\t A1 = " << *A1);
- DEBUG(dbgs() << ", type = " << *A1->getType() << "\n");
- DEBUG(dbgs() << "\t A2 = " << *A2 << "\n");
- DEBUG(dbgs() << "\t C1 = " << *C1 << "\n");
- DEBUG(dbgs() << "\t C2 = " << *C2 << "\n");
+ LLVM_DEBUG(dbgs() << "\ttry symbolic RDIV test\n");
+ LLVM_DEBUG(dbgs() << "\t A1 = " << *A1);
+ LLVM_DEBUG(dbgs() << ", type = " << *A1->getType() << "\n");
+ LLVM_DEBUG(dbgs() << "\t A2 = " << *A2 << "\n");
+ LLVM_DEBUG(dbgs() << "\t C1 = " << *C1 << "\n");
+ LLVM_DEBUG(dbgs() << "\t C2 = " << *C2 << "\n");
const SCEV *N1 = collectUpperBound(Loop1, A1->getType());
const SCEV *N2 = collectUpperBound(Loop2, A1->getType());
- DEBUG(if (N1) dbgs() << "\t N1 = " << *N1 << "\n");
- DEBUG(if (N2) dbgs() << "\t N2 = " << *N2 << "\n");
+ LLVM_DEBUG(if (N1) dbgs() << "\t N1 = " << *N1 << "\n");
+ LLVM_DEBUG(if (N2) dbgs() << "\t N2 = " << *N2 << "\n");
const SCEV *C2_C1 = SE->getMinusSCEV(C2, C1);
const SCEV *C1_C2 = SE->getMinusSCEV(C1, C2);
- DEBUG(dbgs() << "\t C2 - C1 = " << *C2_C1 << "\n");
- DEBUG(dbgs() << "\t C1 - C2 = " << *C1_C2 << "\n");
+ LLVM_DEBUG(dbgs() << "\t C2 - C1 = " << *C2_C1 << "\n");
+ LLVM_DEBUG(dbgs() << "\t C1 - C2 = " << *C1_C2 << "\n");
if (SE->isKnownNonNegative(A1)) {
if (SE->isKnownNonNegative(A2)) {
// A1 >= 0 && A2 >= 0
if (N1) {
// make sure that c2 - c1 <= a1*N1
const SCEV *A1N1 = SE->getMulExpr(A1, N1);
- DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
+ LLVM_DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1)) {
++SymbolicRDIVindependence;
return true;
@@ -2004,7 +2004,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
if (N2) {
// make sure that -a2*N2 <= c2 - c1, or a2*N2 >= c1 - c2
const SCEV *A2N2 = SE->getMulExpr(A2, N2);
- DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
+ LLVM_DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
if (isKnownPredicate(CmpInst::ICMP_SLT, A2N2, C1_C2)) {
++SymbolicRDIVindependence;
return true;
@@ -2018,7 +2018,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
const SCEV *A1N1 = SE->getMulExpr(A1, N1);
const SCEV *A2N2 = SE->getMulExpr(A2, N2);
const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2);
- DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
+ LLVM_DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1_A2N2)) {
++SymbolicRDIVindependence;
return true;
@@ -2039,7 +2039,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
const SCEV *A1N1 = SE->getMulExpr(A1, N1);
const SCEV *A2N2 = SE->getMulExpr(A2, N2);
const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2);
- DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
+ LLVM_DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n");
if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1_A2N2, C2_C1)) {
++SymbolicRDIVindependence;
return true;
@@ -2056,7 +2056,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
if (N1) {
// make sure that a1*N1 <= c2 - c1
const SCEV *A1N1 = SE->getMulExpr(A1, N1);
- DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
+ LLVM_DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n");
if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1, C2_C1)) {
++SymbolicRDIVindependence;
return true;
@@ -2065,7 +2065,7 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
if (N2) {
// make sure that c2 - c1 <= -a2*N2, or c1 - c2 >= a2*N2
const SCEV *A2N2 = SE->getMulExpr(A2, N2);
- DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
+ LLVM_DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n");
if (isKnownPredicate(CmpInst::ICMP_SLT, C1_C2, A2N2)) {
++SymbolicRDIVindependence;
return true;
@@ -2088,8 +2088,8 @@ bool DependenceInfo::symbolicRDIVtest(const SCEV *A1, const SCEV *A2,
bool DependenceInfo::testSIV(const SCEV *Src, const SCEV *Dst, unsigned &Level,
FullDependence &Result, Constraint &NewConstraint,
const SCEV *&SplitIter) const {
- DEBUG(dbgs() << " src = " << *Src << "\n");
- DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << " src = " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << " dst = " << *Dst << "\n");
const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src);
const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst);
if (SrcAddRec && DstAddRec) {
@@ -2165,8 +2165,8 @@ bool DependenceInfo::testRDIV(const SCEV *Src, const SCEV *Dst,
const SCEV *SrcCoeff, *DstCoeff;
const Loop *SrcLoop, *DstLoop;
- DEBUG(dbgs() << " src = " << *Src << "\n");
- DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << " src = " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << " dst = " << *Dst << "\n");
const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src);
const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst);
if (SrcAddRec && DstAddRec) {
@@ -2222,8 +2222,8 @@ bool DependenceInfo::testRDIV(const SCEV *Src, const SCEV *Dst,
bool DependenceInfo::testMIV(const SCEV *Src, const SCEV *Dst,
const SmallBitVector &Loops,
FullDependence &Result) const {
- DEBUG(dbgs() << " src = " << *Src << "\n");
- DEBUG(dbgs() << " dst = " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << " src = " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << " dst = " << *Dst << "\n");
Result.Consistent = false;
return gcdMIVtest(Src, Dst, Result) ||
banerjeeMIVtest(Src, Dst, Loops, Result);
@@ -2263,7 +2263,7 @@ const SCEVConstant *getConstantPart(const SCEV *Expr) {
// to "a common divisor".
bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
FullDependence &Result) const {
- DEBUG(dbgs() << "starting gcd\n");
+ LLVM_DEBUG(dbgs() << "starting gcd\n");
++GCDapplications;
unsigned BitWidth = SE->getTypeSizeInBits(Src->getType());
APInt RunningGCD = APInt::getNullValue(BitWidth);
@@ -2308,7 +2308,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
APInt ExtraGCD = APInt::getNullValue(BitWidth);
const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
- DEBUG(dbgs() << " Delta = " << *Delta << "\n");
+ LLVM_DEBUG(dbgs() << " Delta = " << *Delta << "\n");
const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Delta);
if (const SCEVAddExpr *Sum = dyn_cast<SCEVAddExpr>(Delta)) {
// If Delta is a sum of products, we may be able to make further progress.
@@ -2335,11 +2335,11 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
if (!Constant)
return false;
APInt ConstDelta = cast<SCEVConstant>(Constant)->getAPInt();
- DEBUG(dbgs() << " ConstDelta = " << ConstDelta << "\n");
+ LLVM_DEBUG(dbgs() << " ConstDelta = " << ConstDelta << "\n");
if (ConstDelta == 0)
return false;
RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ExtraGCD);
- DEBUG(dbgs() << " RunningGCD = " << RunningGCD << "\n");
+ LLVM_DEBUG(dbgs() << " RunningGCD = " << RunningGCD << "\n");
APInt Remainder = ConstDelta.srem(RunningGCD);
if (Remainder != 0) {
++GCDindependence;
@@ -2358,7 +2358,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
// Given A[5*i + 10*j*M + 9*M*N] and A[15*i + 20*j*M - 21*N*M + 5],
// we need to remember that the constant part is 5 and the RunningGCD should
// be initialized to ExtraGCD = 30.
- DEBUG(dbgs() << " ExtraGCD = " << ExtraGCD << '\n');
+ LLVM_DEBUG(dbgs() << " ExtraGCD = " << ExtraGCD << '\n');
bool Improved = false;
Coefficients = Src;
@@ -2413,10 +2413,10 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
continue;
APInt ConstCoeff = Constant->getAPInt();
RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs());
- DEBUG(dbgs() << "\tRunningGCD = " << RunningGCD << "\n");
+ LLVM_DEBUG(dbgs() << "\tRunningGCD = " << RunningGCD << "\n");
if (RunningGCD != 0) {
Remainder = ConstDelta.srem(RunningGCD);
- DEBUG(dbgs() << "\tRemainder = " << Remainder << "\n");
+ LLVM_DEBUG(dbgs() << "\tRemainder = " << Remainder << "\n");
if (Remainder != 0) {
unsigned Level = mapSrcLoop(CurLoop);
Result.DV[Level - 1].Direction &= unsigned(~Dependence::DVEntry::EQ);
@@ -2426,7 +2426,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
}
if (Improved)
++GCDsuccesses;
- DEBUG(dbgs() << "all done\n");
+ LLVM_DEBUG(dbgs() << "all done\n");
return false;
}
@@ -2467,35 +2467,35 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
bool DependenceInfo::banerjeeMIVtest(const SCEV *Src, const SCEV *Dst,
const SmallBitVector &Loops,
FullDependence &Result) const {
- DEBUG(dbgs() << "starting Banerjee\n");
+ LLVM_DEBUG(dbgs() << "starting Banerjee\n");
++BanerjeeApplications;
- DEBUG(dbgs() << " Src = " << *Src << '\n');
+ LLVM_DEBUG(dbgs() << " Src = " << *Src << '\n');
const SCEV *A0;
CoefficientInfo *A = collectCoeffInfo(Src, true, A0);
- DEBUG(dbgs() << " Dst = " << *Dst << '\n');
+ LLVM_DEBUG(dbgs() << " Dst = " << *Dst << '\n');
const SCEV *B0;
CoefficientInfo *B = collectCoeffInfo(Dst, false, B0);
BoundInfo *Bound = new BoundInfo[MaxLevels + 1];
const SCEV *Delta = SE->getMinusSCEV(B0, A0);
- DEBUG(dbgs() << "\tDelta = " << *Delta << '\n');
+ LLVM_DEBUG(dbgs() << "\tDelta = " << *Delta << '\n');
// Compute bounds for all the * directions.
- DEBUG(dbgs() << "\tBounds[*]\n");
+ LLVM_DEBUG(dbgs() << "\tBounds[*]\n");
for (unsigned K = 1; K <= MaxLevels; ++K) {
Bound[K].Iterations = A[K].Iterations ? A[K].Iterations : B[K].Iterations;
Bound[K].Direction = Dependence::DVEntry::ALL;
Bound[K].DirSet = Dependence::DVEntry::NONE;
findBoundsALL(A, B, Bound, K);
#ifndef NDEBUG
- DEBUG(dbgs() << "\t " << K << '\t');
+ LLVM_DEBUG(dbgs() << "\t " << K << '\t');
if (Bound[K].Lower[Dependence::DVEntry::ALL])
- DEBUG(dbgs() << *Bound[K].Lower[Dependence::DVEntry::ALL] << '\t');
+ LLVM_DEBUG(dbgs() << *Bound[K].Lower[Dependence::DVEntry::ALL] << '\t');
else
- DEBUG(dbgs() << "-inf\t");
+ LLVM_DEBUG(dbgs() << "-inf\t");
if (Bound[K].Upper[Dependence::DVEntry::ALL])
- DEBUG(dbgs() << *Bound[K].Upper[Dependence::DVEntry::ALL] << '\n');
+ LLVM_DEBUG(dbgs() << *Bound[K].Upper[Dependence::DVEntry::ALL] << '\n');
else
- DEBUG(dbgs() << "+inf\n");
+ LLVM_DEBUG(dbgs() << "+inf\n");
#endif
}
@@ -2551,23 +2551,23 @@ unsigned DependenceInfo::exploreDirections(unsigned Level, CoefficientInfo *A,
const SCEV *Delta) const {
if (Level > CommonLevels) {
// record result
- DEBUG(dbgs() << "\t[");
+ LLVM_DEBUG(dbgs() << "\t[");
for (unsigned K = 1; K <= CommonLevels; ++K) {
if (Loops[K]) {
Bound[K].DirSet |= Bound[K].Direction;
#ifndef NDEBUG
switch (Bound[K].Direction) {
case Dependence::DVEntry::LT:
- DEBUG(dbgs() << " <");
+ LLVM_DEBUG(dbgs() << " <");
break;
case Dependence::DVEntry::EQ:
- DEBUG(dbgs() << " =");
+ LLVM_DEBUG(dbgs() << " =");
break;
case Dependence::DVEntry::GT:
- DEBUG(dbgs() << " >");
+ LLVM_DEBUG(dbgs() << " >");
break;
case Dependence::DVEntry::ALL:
- DEBUG(dbgs() << " *");
+ LLVM_DEBUG(dbgs() << " *");
break;
default:
llvm_unreachable("unexpected Bound[K].Direction");
@@ -2575,7 +2575,7 @@ unsigned DependenceInfo::exploreDirections(unsigned Level, CoefficientInfo *A,
#endif
}
}
- DEBUG(dbgs() << " ]\n");
+ LLVM_DEBUG(dbgs() << " ]\n");
return 1;
}
if (Loops[Level]) {
@@ -2586,34 +2586,40 @@ unsigned DependenceInfo::exploreDirections(unsigned Level, CoefficientInfo *A,
findBoundsGT(A, B, Bound, Level);
findBoundsEQ(A, B, Bound, Level);
#ifndef NDEBUG
- DEBUG(dbgs() << "\tBound for level = " << Level << '\n');
- DEBUG(dbgs() << "\t <\t");
+ LLVM_DEBUG(dbgs() << "\tBound for level = " << Level << '\n');
+ LLVM_DEBUG(dbgs() << "\t <\t");
if (Bound[Level].Lower[Dependence::DVEntry::LT])
- DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::LT] << '\t');
+ LLVM_DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::LT]
+ << '\t');
else
- DEBUG(dbgs() << "-inf\t");
+ LLVM_DEBUG(dbgs() << "-inf\t");
if (Bound[Level].Upper[Dependence::DVEntry::LT])
- DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::LT] << '\n');
+ LLVM_DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::LT]
+ << '\n');
else
- DEBUG(dbgs() << "+inf\n");
- DEBUG(dbgs() << "\t =\t");
+ LLVM_DEBUG(dbgs() << "+inf\n");
+ LLVM_DEBUG(dbgs() << "\t =\t");
if (Bound[Level].Lower[Dependence::DVEntry::EQ])
- DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::EQ] << '\t');
+ LLVM_DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::EQ]
+ << '\t');
else
- DEBUG(dbgs() << "-inf\t");
+ LLVM_DEBUG(dbgs() << "-inf\t");
if (Bound[Level].Upper[Dependence::DVEntry::EQ])
- DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::EQ] << '\n');
+ LLVM_DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::EQ]
+ << '\n');
else
- DEBUG(dbgs() << "+inf\n");
- DEBUG(dbgs() << "\t >\t");
+ LLVM_DEBUG(dbgs() << "+inf\n");
+ LLVM_DEBUG(dbgs() << "\t >\t");
if (Bound[Level].Lower[Dependence::DVEntry::GT])
- DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::GT] << '\t');
+ LLVM_DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::GT]
+ << '\t');
else
- DEBUG(dbgs() << "-inf\t");
+ LLVM_DEBUG(dbgs() << "-inf\t");
if (Bound[Level].Upper[Dependence::DVEntry::GT])
- DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::GT] << '\n');
+ LLVM_DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::GT]
+ << '\n');
else
- DEBUG(dbgs() << "+inf\n");
+ LLVM_DEBUG(dbgs() << "+inf\n");
#endif
}
@@ -2860,21 +2866,21 @@ DependenceInfo::collectCoeffInfo(const SCEV *Subscript, bool SrcFlag,
}
Constant = Subscript;
#ifndef NDEBUG
- DEBUG(dbgs() << "\tCoefficient Info\n");
+ LLVM_DEBUG(dbgs() << "\tCoefficient Info\n");
for (unsigned K = 1; K <= MaxLevels; ++K) {
- DEBUG(dbgs() << "\t " << K << "\t" << *CI[K].Coeff);
- DEBUG(dbgs() << "\tPos Part = ");
- DEBUG(dbgs() << *CI[K].PosPart);
- DEBUG(dbgs() << "\tNeg Part = ");
- DEBUG(dbgs() << *CI[K].NegPart);
- DEBUG(dbgs() << "\tUpper Bound = ");
+ LLVM_DEBUG(dbgs() << "\t " << K << "\t" << *CI[K].Coeff);
+ LLVM_DEBUG(dbgs() << "\tPos Part = ");
+ LLVM_DEBUG(dbgs() << *CI[K].PosPart);
+ LLVM_DEBUG(dbgs() << "\tNeg Part = ");
+ LLVM_DEBUG(dbgs() << *CI[K].NegPart);
+ LLVM_DEBUG(dbgs() << "\tUpper Bound = ");
if (CI[K].Iterations)
- DEBUG(dbgs() << *CI[K].Iterations);
+ LLVM_DEBUG(dbgs() << *CI[K].Iterations);
else
- DEBUG(dbgs() << "+inf");
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "+inf");
+ LLVM_DEBUG(dbgs() << '\n');
}
- DEBUG(dbgs() << "\t Constant = " << *Subscript << '\n');
+ LLVM_DEBUG(dbgs() << "\t Constant = " << *Subscript << '\n');
#endif
return CI;
}
@@ -2999,8 +3005,8 @@ bool DependenceInfo::propagate(const SCEV *&Src, const SCEV *&Dst,
bool &Consistent) {
bool Result = false;
for (unsigned LI : Loops.set_bits()) {
- DEBUG(dbgs() << "\t Constraint[" << LI << "] is");
- DEBUG(Constraints[LI].dump(dbgs()));
+ LLVM_DEBUG(dbgs() << "\t Constraint[" << LI << "] is");
+ LLVM_DEBUG(Constraints[LI].dump(dbgs()));
if (Constraints[LI].isDistance())
Result |= propagateDistance(Src, Dst, Constraints[LI], Consistent);
else if (Constraints[LI].isLine())
@@ -3021,17 +3027,17 @@ bool DependenceInfo::propagateDistance(const SCEV *&Src, const SCEV *&Dst,
Constraint &CurConstraint,
bool &Consistent) {
const Loop *CurLoop = CurConstraint.getAssociatedLoop();
- DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
const SCEV *A_K = findCoefficient(Src, CurLoop);
if (A_K->isZero())
return false;
const SCEV *DA_K = SE->getMulExpr(A_K, CurConstraint.getD());
Src = SE->getMinusSCEV(Src, DA_K);
Src = zeroCoefficient(Src, CurLoop);
- DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
- DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
Dst = addToCoefficient(Dst, CurLoop, SE->getNegativeSCEV(A_K));
- DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
if (!findCoefficient(Dst, CurLoop)->isZero())
Consistent = false;
return true;
@@ -3050,9 +3056,10 @@ bool DependenceInfo::propagateLine(const SCEV *&Src, const SCEV *&Dst,
const SCEV *A = CurConstraint.getA();
const SCEV *B = CurConstraint.getB();
const SCEV *C = CurConstraint.getC();
- DEBUG(dbgs() << "\t\tA = " << *A << ", B = " << *B << ", C = " << *C << "\n");
- DEBUG(dbgs() << "\t\tSrc = " << *Src << "\n");
- DEBUG(dbgs() << "\t\tDst = " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tA = " << *A << ", B = " << *B << ", C = " << *C
+ << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tSrc = " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tDst = " << *Dst << "\n");
if (A->isZero()) {
const SCEVConstant *Bconst = dyn_cast<SCEVConstant>(B);
const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C);
@@ -3108,8 +3115,8 @@ bool DependenceInfo::propagateLine(const SCEV *&Src, const SCEV *&Dst,
if (!findCoefficient(Dst, CurLoop)->isZero())
Consistent = false;
}
- DEBUG(dbgs() << "\t\tnew Src = " << *Src << "\n");
- DEBUG(dbgs() << "\t\tnew Dst = " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tnew Src = " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tnew Dst = " << *Dst << "\n");
return true;
}
@@ -3124,13 +3131,13 @@ bool DependenceInfo::propagatePoint(const SCEV *&Src, const SCEV *&Dst,
const SCEV *AP_K = findCoefficient(Dst, CurLoop);
const SCEV *XA_K = SE->getMulExpr(A_K, CurConstraint.getX());
const SCEV *YAP_K = SE->getMulExpr(AP_K, CurConstraint.getY());
- DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n");
Src = SE->getAddExpr(Src, SE->getMinusSCEV(XA_K, YAP_K));
Src = zeroCoefficient(Src, CurLoop);
- DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
- DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n");
Dst = zeroCoefficient(Dst, CurLoop);
- DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n");
return true;
}
@@ -3138,8 +3145,8 @@ bool DependenceInfo::propagatePoint(const SCEV *&Src, const SCEV *&Dst,
// Update direction vector entry based on the current constraint.
void DependenceInfo::updateDirection(Dependence::DVEntry &Level,
const Constraint &CurConstraint) const {
- DEBUG(dbgs() << "\tUpdate direction, constraint =");
- DEBUG(CurConstraint.dump(dbgs()));
+ LLVM_DEBUG(dbgs() << "\tUpdate direction, constraint =");
+ LLVM_DEBUG(CurConstraint.dump(dbgs()));
if (CurConstraint.isAny())
; // use defaults
else if (CurConstraint.isDistance()) {
@@ -3246,14 +3253,14 @@ bool DependenceInfo::tryDelinearize(Instruction *Src, Instruction *Dst,
int size = SrcSubscripts.size();
- DEBUG({
- dbgs() << "\nSrcSubscripts: ";
+ LLVM_DEBUG({
+ dbgs() << "\nSrcSubscripts: ";
for (int i = 0; i < size; i++)
dbgs() << *SrcSubscripts[i];
dbgs() << "\nDstSubscripts: ";
for (int i = 0; i < size; i++)
dbgs() << *DstSubscripts[i];
- });
+ });
// The delinearization transforms a single-subscript MIV dependence test into
// a multi-subscript SIV dependence test that is easier to compute. So we
@@ -3315,7 +3322,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
if (!isLoadOrStore(Src) || !isLoadOrStore(Dst)) {
// can only analyze simple loads and stores, i.e., no calls, invokes, etc.
- DEBUG(dbgs() << "can only handle simple loads and stores\n");
+ LLVM_DEBUG(dbgs() << "can only handle simple loads and stores\n");
return make_unique<Dependence>(Src, Dst);
}
@@ -3330,11 +3337,11 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
case MayAlias:
case PartialAlias:
// cannot analyse objects if we don't understand their aliasing.
- DEBUG(dbgs() << "can't analyze may or partial alias\n");
+ LLVM_DEBUG(dbgs() << "can't analyze may or partial alias\n");
return make_unique<Dependence>(Src, Dst);
case NoAlias:
// If the objects noalias, they are distinct, accesses are independent.
- DEBUG(dbgs() << "no alias\n");
+ LLVM_DEBUG(dbgs() << "no alias\n");
return nullptr;
case MustAlias:
break; // The underlying objects alias; test accesses for dependence.
@@ -3342,8 +3349,8 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
// establish loop nesting levels
establishNestingLevels(Src, Dst);
- DEBUG(dbgs() << " common nesting levels = " << CommonLevels << "\n");
- DEBUG(dbgs() << " maximum nesting levels = " << MaxLevels << "\n");
+ LLVM_DEBUG(dbgs() << " common nesting levels = " << CommonLevels << "\n");
+ LLVM_DEBUG(dbgs() << " maximum nesting levels = " << MaxLevels << "\n");
FullDependence Result(Src, Dst, PossiblyLoopIndependent, CommonLevels);
++TotalArrayPairs;
@@ -3352,14 +3359,14 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
SmallVector<Subscript, 2> Pair(Pairs);
const SCEV *SrcSCEV = SE->getSCEV(SrcPtr);
const SCEV *DstSCEV = SE->getSCEV(DstPtr);
- DEBUG(dbgs() << " SrcSCEV = " << *SrcSCEV << "\n");
- DEBUG(dbgs() << " DstSCEV = " << *DstSCEV << "\n");
+ LLVM_DEBUG(dbgs() << " SrcSCEV = " << *SrcSCEV << "\n");
+ LLVM_DEBUG(dbgs() << " DstSCEV = " << *DstSCEV << "\n");
Pair[0].Src = SrcSCEV;
Pair[0].Dst = DstSCEV;
if (Delinearize) {
if (tryDelinearize(Src, Dst, Pair)) {
- DEBUG(dbgs() << " delinearized\n");
+ LLVM_DEBUG(dbgs() << " delinearized\n");
Pairs = Pair.size();
}
}
@@ -3375,12 +3382,12 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
Pair[P].Loops);
Pair[P].GroupLoops = Pair[P].Loops;
Pair[P].Group.set(P);
- DEBUG(dbgs() << " subscript " << P << "\n");
- DEBUG(dbgs() << "\tsrc = " << *Pair[P].Src << "\n");
- DEBUG(dbgs() << "\tdst = " << *Pair[P].Dst << "\n");
- DEBUG(dbgs() << "\tclass = " << Pair[P].Classification << "\n");
- DEBUG(dbgs() << "\tloops = ");
- DEBUG(dumpSmallBitVector(Pair[P].Loops));
+ LLVM_DEBUG(dbgs() << " subscript " << P << "\n");
+ LLVM_DEBUG(dbgs() << "\tsrc = " << *Pair[P].Src << "\n");
+ LLVM_DEBUG(dbgs() << "\tdst = " << *Pair[P].Dst << "\n");
+ LLVM_DEBUG(dbgs() << "\tclass = " << Pair[P].Classification << "\n");
+ LLVM_DEBUG(dbgs() << "\tloops = ");
+ LLVM_DEBUG(dumpSmallBitVector(Pair[P].Loops));
}
SmallBitVector Separable(Pairs);
@@ -3485,25 +3492,25 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
}
}
- DEBUG(dbgs() << " Separable = ");
- DEBUG(dumpSmallBitVector(Separable));
- DEBUG(dbgs() << " Coupled = ");
- DEBUG(dumpSmallBitVector(Coupled));
+ LLVM_DEBUG(dbgs() << " Separable = ");
+ LLVM_DEBUG(dumpSmallBitVector(Separable));
+ LLVM_DEBUG(dbgs() << " Coupled = ");
+ LLVM_DEBUG(dumpSmallBitVector(Coupled));
Constraint NewConstraint;
NewConstraint.setAny(SE);
// test separable subscripts
for (unsigned SI : Separable.set_bits()) {
- DEBUG(dbgs() << "testing subscript " << SI);
+ LLVM_DEBUG(dbgs() << "testing subscript " << SI);
switch (Pair[SI].Classification) {
case Subscript::ZIV:
- DEBUG(dbgs() << ", ZIV\n");
+ LLVM_DEBUG(dbgs() << ", ZIV\n");
if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result))
return nullptr;
break;
case Subscript::SIV: {
- DEBUG(dbgs() << ", SIV\n");
+ LLVM_DEBUG(dbgs() << ", SIV\n");
unsigned Level;
const SCEV *SplitIter = nullptr;
if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level, Result, NewConstraint,
@@ -3512,12 +3519,12 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
break;
}
case Subscript::RDIV:
- DEBUG(dbgs() << ", RDIV\n");
+ LLVM_DEBUG(dbgs() << ", RDIV\n");
if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result))
return nullptr;
break;
case Subscript::MIV:
- DEBUG(dbgs() << ", MIV\n");
+ LLVM_DEBUG(dbgs() << ", MIV\n");
if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result))
return nullptr;
break;
@@ -3528,20 +3535,20 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
if (Coupled.count()) {
// test coupled subscript groups
- DEBUG(dbgs() << "starting on coupled subscripts\n");
- DEBUG(dbgs() << "MaxLevels + 1 = " << MaxLevels + 1 << "\n");
+ LLVM_DEBUG(dbgs() << "starting on coupled subscripts\n");
+ LLVM_DEBUG(dbgs() << "MaxLevels + 1 = " << MaxLevels + 1 << "\n");
SmallVector<Constraint, 4> Constraints(MaxLevels + 1);
for (unsigned II = 0; II <= MaxLevels; ++II)
Constraints[II].setAny(SE);
for (unsigned SI : Coupled.set_bits()) {
- DEBUG(dbgs() << "testing subscript group " << SI << " { ");
+ LLVM_DEBUG(dbgs() << "testing subscript group " << SI << " { ");
SmallBitVector Group(Pair[SI].Group);
SmallBitVector Sivs(Pairs);
SmallBitVector Mivs(Pairs);
SmallBitVector ConstrainedLevels(MaxLevels + 1);
SmallVector<Subscript *, 4> PairsInGroup;
for (unsigned SJ : Group.set_bits()) {
- DEBUG(dbgs() << SJ << " ");
+ LLVM_DEBUG(dbgs() << SJ << " ");
if (Pair[SJ].Classification == Subscript::SIV)
Sivs.set(SJ);
else
@@ -3549,15 +3556,15 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
PairsInGroup.push_back(&Pair[SJ]);
}
unifySubscriptType(PairsInGroup);
- DEBUG(dbgs() << "}\n");
+ LLVM_DEBUG(dbgs() << "}\n");
while (Sivs.any()) {
bool Changed = false;
for (unsigned SJ : Sivs.set_bits()) {
- DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n");
+ LLVM_DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n");
// SJ is an SIV subscript that's part of the current coupled group
unsigned Level;
const SCEV *SplitIter = nullptr;
- DEBUG(dbgs() << "SIV\n");
+ LLVM_DEBUG(dbgs() << "SIV\n");
if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level, Result, NewConstraint,
SplitIter))
return nullptr;
@@ -3573,15 +3580,15 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
}
if (Changed) {
// propagate, possibly creating new SIVs and ZIVs
- DEBUG(dbgs() << " propagating\n");
- DEBUG(dbgs() << "\tMivs = ");
- DEBUG(dumpSmallBitVector(Mivs));
+ LLVM_DEBUG(dbgs() << " propagating\n");
+ LLVM_DEBUG(dbgs() << "\tMivs = ");
+ LLVM_DEBUG(dumpSmallBitVector(Mivs));
for (unsigned SJ : Mivs.set_bits()) {
// SJ is an MIV subscript that's part of the current coupled group
- DEBUG(dbgs() << "\tSJ = " << SJ << "\n");
+ LLVM_DEBUG(dbgs() << "\tSJ = " << SJ << "\n");
if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops,
Constraints, Result.Consistent)) {
- DEBUG(dbgs() << "\t Changed\n");
+ LLVM_DEBUG(dbgs() << "\t Changed\n");
++DeltaPropagations;
Pair[SJ].Classification =
classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()),
@@ -3589,7 +3596,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
Pair[SJ].Loops);
switch (Pair[SJ].Classification) {
case Subscript::ZIV:
- DEBUG(dbgs() << "ZIV\n");
+ LLVM_DEBUG(dbgs() << "ZIV\n");
if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
return nullptr;
Mivs.reset(SJ);
@@ -3612,7 +3619,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
// test & propagate remaining RDIVs
for (unsigned SJ : Mivs.set_bits()) {
if (Pair[SJ].Classification == Subscript::RDIV) {
- DEBUG(dbgs() << "RDIV test\n");
+ LLVM_DEBUG(dbgs() << "RDIV test\n");
if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
return nullptr;
// I don't yet understand how to propagate RDIV results
@@ -3625,7 +3632,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
// Better to somehow test all remaining subscripts simultaneously.
for (unsigned SJ : Mivs.set_bits()) {
if (Pair[SJ].Classification == Subscript::MIV) {
- DEBUG(dbgs() << "MIV test\n");
+ LLVM_DEBUG(dbgs() << "MIV test\n");
if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result))
return nullptr;
}
@@ -3634,7 +3641,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst,
}
// update Result.DV from constraint vector
- DEBUG(dbgs() << " updating\n");
+ LLVM_DEBUG(dbgs() << " updating\n");
for (unsigned SJ : ConstrainedLevels.set_bits()) {
if (SJ > CommonLevels)
break;
@@ -3760,7 +3767,7 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep,
if (Delinearize) {
if (tryDelinearize(Src, Dst, Pair)) {
- DEBUG(dbgs() << " delinearized\n");
+ LLVM_DEBUG(dbgs() << " delinearized\n");
Pairs = Pair.size();
}
}
diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp
index f028dd0ef0d1..609e5e3a1448 100644
--- a/lib/Analysis/IVUsers.cpp
+++ b/lib/Analysis/IVUsers.cpp
@@ -235,13 +235,13 @@ bool IVUsers::AddUsersImpl(Instruction *I,
if (LI->getLoopFor(User->getParent()) != L) {
if (isa<PHINode>(User) || Processed.count(User) ||
!AddUsersImpl(User, SimpleLoopNests)) {
- DEBUG(dbgs() << "FOUND USER in other loop: " << *User << '\n'
- << " OF SCEV: " << *ISE << '\n');
+ LLVM_DEBUG(dbgs() << "FOUND USER in other loop: " << *User << '\n'
+ << " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
}
} else if (Processed.count(User) || !AddUsersImpl(User, SimpleLoopNests)) {
- DEBUG(dbgs() << "FOUND USER: " << *User << '\n'
- << " OF SCEV: " << *ISE << '\n');
+ LLVM_DEBUG(dbgs() << "FOUND USER: " << *User << '\n'
+ << " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
}
@@ -274,14 +274,15 @@ bool IVUsers::AddUsersImpl(Instruction *I,
// If we normalized the expression, but denormalization doesn't give the
// original one, discard this user.
if (OriginalISE != DenormalizedISE) {
- DEBUG(dbgs() << " DISCARDING (NORMALIZATION ISN'T INVERTIBLE): "
- << *ISE << '\n');
+ LLVM_DEBUG(dbgs()
+ << " DISCARDING (NORMALIZATION ISN'T INVERTIBLE): "
+ << *ISE << '\n');
IVUses.pop_back();
return false;
}
}
- DEBUG(if (SE->getSCEV(I) != ISE)
- dbgs() << " NORMALIZED TO: " << *ISE << '\n');
+ LLVM_DEBUG(if (SE->getSCEV(I) != ISE) dbgs()
+ << " NORMALIZED TO: " << *ISE << '\n');
}
}
return true;
diff --git a/lib/Analysis/IndirectCallPromotionAnalysis.cpp b/lib/Analysis/IndirectCallPromotionAnalysis.cpp
index c11176bbb9c8..4659c0a00629 100644
--- a/lib/Analysis/IndirectCallPromotionAnalysis.cpp
+++ b/lib/Analysis/IndirectCallPromotionAnalysis.cpp
@@ -71,19 +71,19 @@ uint32_t ICallPromotionAnalysis::getProfitablePromotionCandidates(
const Instruction *Inst, uint32_t NumVals, uint64_t TotalCount) {
ArrayRef<InstrProfValueData> ValueDataRef(ValueDataArray.get(), NumVals);
- DEBUG(dbgs() << " \nWork on callsite " << *Inst << " Num_targets: " << NumVals
- << "\n");
+ LLVM_DEBUG(dbgs() << " \nWork on callsite " << *Inst
+ << " Num_targets: " << NumVals << "\n");
uint32_t I = 0;
uint64_t RemainingCount = TotalCount;
for (; I < MaxNumPromotions && I < NumVals; I++) {
uint64_t Count = ValueDataRef[I].Count;
assert(Count <= RemainingCount);
- DEBUG(dbgs() << " Candidate " << I << " Count=" << Count
- << " Target_func: " << ValueDataRef[I].Value << "\n");
+ LLVM_DEBUG(dbgs() << " Candidate " << I << " Count=" << Count
+ << " Target_func: " << ValueDataRef[I].Value << "\n");
if (!isPromotionProfitable(Count, TotalCount, RemainingCount)) {
- DEBUG(dbgs() << " Not promote: Cold target.\n");
+ LLVM_DEBUG(dbgs() << " Not promote: Cold target.\n");
return I;
}
RemainingCount -= Count;
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index c81a66a051af..7a28ad431f67 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -921,14 +921,14 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) {
BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr;
auto HotCallSiteThreshold = getHotCallSiteThreshold(CS, CallerBFI);
if (!Caller->optForSize() && HotCallSiteThreshold) {
- DEBUG(dbgs() << "Hot callsite.\n");
+ LLVM_DEBUG(dbgs() << "Hot callsite.\n");
// FIXME: This should update the threshold only if it exceeds the
// current threshold, but AutoFDO + ThinLTO currently relies on this
// behavior to prevent inlining of hot callsites during ThinLTO
// compile phase.
Threshold = HotCallSiteThreshold.getValue();
} else if (isColdCallSite(CS, CallerBFI)) {
- DEBUG(dbgs() << "Cold callsite.\n");
+ LLVM_DEBUG(dbgs() << "Cold callsite.\n");
// Do not apply bonuses for a cold callsite including the
// LastCallToStatic bonus. While this bonus might result in code size
// reduction, it can cause the size of a non-cold caller to increase
@@ -939,13 +939,13 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) {
// Use callee's global profile information only if we have no way of
// determining this via callsite information.
if (PSI->isFunctionEntryHot(&Callee)) {
- DEBUG(dbgs() << "Hot callee.\n");
+ LLVM_DEBUG(dbgs() << "Hot callee.\n");
// If callsite hotness can not be determined, we may still know
// that the callee is hot and treat it as a weaker hint for threshold
// increase.
Threshold = MaxIfValid(Threshold, Params.HintThreshold);
} else if (PSI->isFunctionEntryCold(&Callee)) {
- DEBUG(dbgs() << "Cold callee.\n");
+ LLVM_DEBUG(dbgs() << "Cold callee.\n");
// Do not apply bonuses for a cold callee including the
// LastCallToStatic bonus. While this bonus might result in code size
// reduction, it can cause the size of a non-cold caller to increase
@@ -2002,14 +2002,14 @@ InlineCost llvm::getInlineCost(
CS.isNoInline())
return llvm::InlineCost::getNever();
- DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
- << "... (caller:" << Caller->getName() << ")\n");
+ LLVM_DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
+ << "... (caller:" << Caller->getName() << ")\n");
CallAnalyzer CA(CalleeTTI, GetAssumptionCache, GetBFI, PSI, ORE, *Callee, CS,
Params);
bool ShouldInline = CA.analyzeCall(CS);
- DEBUG(CA.dump());
+ LLVM_DEBUG(CA.dump());
// Check if there was a reason to force inlining or no inlining.
if (!ShouldInline && CA.getCost() < CA.getThreshold())
diff --git a/lib/Analysis/LazyCallGraph.cpp b/lib/Analysis/LazyCallGraph.cpp
index 420a71c9ffe4..c001f55bb62e 100644
--- a/lib/Analysis/LazyCallGraph.cpp
+++ b/lib/Analysis/LazyCallGraph.cpp
@@ -66,15 +66,15 @@ static void addEdge(SmallVectorImpl<LazyCallGraph::Edge> &Edges,
if (!EdgeIndexMap.insert({&N, Edges.size()}).second)
return;
- DEBUG(dbgs() << " Added callable function: " << N.getName() << "\n");
+ LLVM_DEBUG(dbgs() << " Added callable function: " << N.getName() << "\n");
Edges.emplace_back(LazyCallGraph::Edge(N, EK));
}
LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() {
assert(!Edges && "Must not have already populated the edges for this node!");
- DEBUG(dbgs() << " Adding functions called by '" << getName()
- << "' to the graph.\n");
+ LLVM_DEBUG(dbgs() << " Adding functions called by '" << getName()
+ << "' to the graph.\n");
Edges = EdgeSequence();
@@ -152,8 +152,8 @@ static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI) {
}
LazyCallGraph::LazyCallGraph(Module &M, TargetLibraryInfo &TLI) {
- DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
- << "\n");
+ LLVM_DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
+ << "\n");
for (Function &F : M) {
if (F.isDeclaration())
continue;
@@ -168,8 +168,8 @@ LazyCallGraph::LazyCallGraph(Module &M, TargetLibraryInfo &TLI) {
// External linkage defined functions have edges to them from other
// modules.
- DEBUG(dbgs() << " Adding '" << F.getName()
- << "' to entry set of the graph.\n");
+ LLVM_DEBUG(dbgs() << " Adding '" << F.getName()
+ << "' to entry set of the graph.\n");
addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref);
}
@@ -181,8 +181,9 @@ LazyCallGraph::LazyCallGraph(Module &M, TargetLibraryInfo &TLI) {
if (Visited.insert(GV.getInitializer()).second)
Worklist.push_back(GV.getInitializer());
- DEBUG(dbgs() << " Adding functions referenced by global initializers to the "
- "entry set.\n");
+ LLVM_DEBUG(
+ dbgs() << " Adding functions referenced by global initializers to the "
+ "entry set.\n");
visitReferences(Worklist, Visited, [&](Function &F) {
addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F),
LazyCallGraph::Edge::Ref);
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index 4bb1e56d086b..a133357979b6 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -392,8 +392,8 @@ namespace {
if (!BlockValueSet.insert(BV).second)
return false; // It's already in the stack.
- DEBUG(dbgs() << "PUSH: " << *BV.second << " in " << BV.first->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
+ << BV.first->getName() << "\n");
BlockValueStack.push_back(BV);
return true;
}
@@ -508,7 +508,8 @@ void LazyValueInfoImpl::solve() {
// PredicateInfo is used in LVI or CVP, we should be able to make the
// overdefined cache global, and remove this throttle.
if (processedCount > MaxProcessedPerValue) {
- DEBUG(dbgs() << "Giving up on stack because we are getting too deep\n");
+ LLVM_DEBUG(
+ dbgs() << "Giving up on stack because we are getting too deep\n");
// Fill in the original values
while (!StartingStack.empty()) {
std::pair<BasicBlock *, Value *> &e = StartingStack.back();
@@ -529,8 +530,9 @@ void LazyValueInfoImpl::solve() {
assert(TheCache.hasCachedValueInfo(e.second, e.first) &&
"Result should be in cache!");
- DEBUG(dbgs() << "POP " << *e.second << " in " << e.first->getName()
- << " = " << TheCache.getCachedValueInfo(e.second, e.first) << "\n");
+ LLVM_DEBUG(
+ dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
+ << TheCache.getCachedValueInfo(e.second, e.first) << "\n");
BlockValueStack.pop_back();
BlockValueSet.erase(e);
@@ -581,8 +583,8 @@ bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
if (TheCache.hasCachedValueInfo(Val, BB)) {
// If we have a cached value, use that.
- DEBUG(dbgs() << " reuse BB '" << BB->getName()
- << "' val=" << TheCache.getCachedValueInfo(Val, BB) << '\n');
+ LLVM_DEBUG(dbgs() << " reuse BB '" << BB->getName() << "' val="
+ << TheCache.getCachedValueInfo(Val, BB) << '\n');
// Since we're reusing a cached value, we don't need to update the
// OverDefinedCache. The cache will have been properly updated whenever the
@@ -637,8 +639,8 @@ bool LazyValueInfoImpl::solveBlockValueImpl(ValueLatticeElement &Res,
return solveBlockValueBinaryOp(Res, BO, BB);
}
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - unknown inst def found.\n");
+ LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - unknown inst def found.\n");
Res = getFromRangeMetadata(BBI);
return true;
}
@@ -733,8 +735,8 @@ bool LazyValueInfoImpl::solveBlockValueNonLocal(ValueLatticeElement &BBLV,
// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
if (Result.isOverdefined()) {
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined because of pred (non local).\n");
+ LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because of pred (non local).\n");
// Before giving up, see if we can prove the pointer non-null local to
// this particular block.
if (Val->getType()->isPointerTy() &&
@@ -777,8 +779,8 @@ bool LazyValueInfoImpl::solveBlockValuePHINode(ValueLatticeElement &BBLV,
// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
if (Result.isOverdefined()) {
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined because of pred (local).\n");
+ LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because of pred (local).\n");
BBLV = Result;
return true;
@@ -968,8 +970,8 @@ bool LazyValueInfoImpl::solveBlockValueCast(ValueLatticeElement &BBLV,
break;
default:
// Unhandled instructions are overdefined.
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined (unknown cast).\n");
+ LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined (unknown cast).\n");
BBLV = ValueLatticeElement::getOverdefined();
return true;
}
@@ -1027,8 +1029,8 @@ bool LazyValueInfoImpl::solveBlockValueBinaryOp(ValueLatticeElement &BBLV,
break;
default:
// Unhandled instructions are overdefined.
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined (unknown binary operator).\n");
+ LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined (unknown binary operator).\n");
BBLV = ValueLatticeElement::getOverdefined();
return true;
};
@@ -1399,8 +1401,8 @@ bool LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
Instruction *CxtI) {
- DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
- << BB->getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
+ << BB->getName() << "'\n");
assert(BlockValueStack.empty() && BlockValueSet.empty());
if (!hasBlockValue(V, BB)) {
@@ -1410,13 +1412,13 @@ ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
ValueLatticeElement Result = getBlockValue(V, BB);
intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
- DEBUG(dbgs() << " Result = " << Result << "\n");
+ LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
- DEBUG(dbgs() << "LVI Getting value " << *V << " at '"
- << CxtI->getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
+ << "'\n");
if (auto *C = dyn_cast<Constant>(V))
return ValueLatticeElement::get(C);
@@ -1426,15 +1428,16 @@ ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
Result = getFromRangeMetadata(I);
intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
- DEBUG(dbgs() << " Result = " << Result << "\n");
+ LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
ValueLatticeElement LazyValueInfoImpl::
getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI) {
- DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
- << FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
+ << FromBB->getName() << "' to '" << ToBB->getName()
+ << "'\n");
ValueLatticeElement Result;
if (!getEdgeValue(V, FromBB, ToBB, Result, CxtI)) {
@@ -1444,7 +1447,7 @@ getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
assert(WasFastQuery && "More work to do after problem solved?");
}
- DEBUG(dbgs() << " Result = " << Result << "\n");
+ LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp
index 6f94d30855cb..17b13802e1d8 100644
--- a/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/lib/Analysis/LoopAccessAnalysis.cpp
@@ -165,8 +165,8 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
PSE.addPredicate(*SE->getEqualPredicate(U, CT));
auto *Expr = PSE.getSCEV(Ptr);
- DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr
- << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
+ << " by: " << *Expr << "\n");
return Expr;
}
@@ -684,7 +684,7 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
bool IsWrite = Access.getInt();
RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
- DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
+ LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
return true;
}
@@ -729,7 +729,7 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop,
RunningDepId, ASId, ShouldCheckWrap, false)) {
- DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
+ LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
Retries.push_back(Access);
CanDoAliasSetRT = false;
}
@@ -791,8 +791,9 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
unsigned ASi = PtrI->getType()->getPointerAddressSpace();
unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
if (ASi != ASj) {
- DEBUG(dbgs() << "LAA: Runtime check would require comparison between"
- " different address spaces\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: Runtime check would require comparison between"
+ " different address spaces\n");
return false;
}
}
@@ -801,8 +802,8 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
if (NeedRTCheck && CanDoRT)
RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
- DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
- << " pointer comparisons.\n");
+ LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
+ << " pointer comparisons.\n");
RtCheck.Need = NeedRTCheck;
@@ -817,10 +818,10 @@ void AccessAnalysis::processMemAccesses() {
// process read-only pointers. This allows us to skip dependence tests for
// read-only pointers.
- DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
- DEBUG(dbgs() << " AST: "; AST.dump());
- DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
- DEBUG({
+ LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
+ LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
+ LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
+ LLVM_DEBUG({
for (auto A : Accesses)
dbgs() << "\t" << *A.getPointer() << " (" <<
(A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
@@ -904,7 +905,8 @@ void AccessAnalysis::processMemAccesses() {
ValueVector TempObjects;
GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
- DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Underlying objects for pointer " << *Ptr << "\n");
for (Value *UnderlyingObj : TempObjects) {
// nullptr never alias, don't join sets for pointer that have "null"
// in their UnderlyingObjects list.
@@ -917,7 +919,7 @@ void AccessAnalysis::processMemAccesses() {
DepCands.unionSets(Access, Prev->second);
ObjToLastAccess[UnderlyingObj] = Access;
- DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
+ LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
}
}
}
@@ -989,8 +991,8 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
// Make sure that the pointer does not point to aggregate types.
auto *PtrTy = cast<PointerType>(Ty);
if (PtrTy->getElementType()->isAggregateType()) {
- DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr
- << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
+ << *Ptr << "\n");
return 0;
}
@@ -1001,15 +1003,15 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
AR = PSE.getAsAddRec(Ptr);
if (!AR) {
- DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
- << " SCEV: " << *PtrScev << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
+ << " SCEV: " << *PtrScev << "\n");
return 0;
}
// The accesss function must stride over the innermost loop.
if (Lp != AR->getLoop()) {
- DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " <<
- *Ptr << " SCEV: " << *AR << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
+ << *Ptr << " SCEV: " << *AR << "\n");
return 0;
}
@@ -1029,13 +1031,14 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
if (Assume) {
PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
IsNoWrapAddRec = true;
- DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
- << "LAA: Pointer: " << *Ptr << "\n"
- << "LAA: SCEV: " << *AR << "\n"
- << "LAA: Added an overflow assumption\n");
+ LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
+ << "LAA: Pointer: " << *Ptr << "\n"
+ << "LAA: SCEV: " << *AR << "\n"
+ << "LAA: Added an overflow assumption\n");
} else {
- DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
- << *Ptr << " SCEV: " << *AR << "\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
+ << *Ptr << " SCEV: " << *AR << "\n");
return 0;
}
}
@@ -1046,8 +1049,8 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
// Calculate the pointer stride and check if it is constant.
const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
if (!C) {
- DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr <<
- " SCEV: " << *AR << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
+ << " SCEV: " << *AR << "\n");
return 0;
}
@@ -1074,11 +1077,11 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
Stride != 1 && Stride != -1) {
if (Assume) {
// We can avoid this case by adding a run-time check.
- DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
- << "inbouds or in address space 0 may wrap:\n"
- << "LAA: Pointer: " << *Ptr << "\n"
- << "LAA: SCEV: " << *AR << "\n"
- << "LAA: Added an overflow assumption\n");
+ LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
+ << "inbouds or in address space 0 may wrap:\n"
+ << "LAA: Pointer: " << *Ptr << "\n"
+ << "LAA: SCEV: " << *AR << "\n"
+ << "LAA: Added an overflow assumption\n");
PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
} else
return 0;
@@ -1293,8 +1296,9 @@ bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
}
if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
- DEBUG(dbgs() << "LAA: Distance " << Distance
- << " that could cause a store-load forwarding conflict\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: Distance " << Distance
+ << " that could cause a store-load forwarding conflict\n");
return true;
}
@@ -1446,16 +1450,16 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
- DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
- << "(Induction step: " << StrideAPtr << ")\n");
- DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
- << *InstMap[BIdx] << ": " << *Dist << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
+ << "(Induction step: " << StrideAPtr << ")\n");
+ LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
+ << *InstMap[BIdx] << ": " << *Dist << "\n");
// Need accesses with constant stride. We don't want to vectorize
// "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
// the address space.
if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
- DEBUG(dbgs() << "Pointer access with non-constant stride\n");
+ LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
return Dependence::Unknown;
}
@@ -1472,7 +1476,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
TypeByteSize))
return Dependence::NoDep;
- DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
+ LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
ShouldRetryWithRuntimeCheck = true;
return Dependence::Unknown;
}
@@ -1483,7 +1487,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
// Attempt to prove strided accesses independent.
if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
- DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
+ LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
return Dependence::NoDep;
}
@@ -1493,11 +1497,11 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
if (IsTrueDataDependence && EnableForwardingConflictDetection &&
(couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
ATy != BTy)) {
- DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
+ LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
return Dependence::ForwardButPreventsForwarding;
}
- DEBUG(dbgs() << "LAA: Dependence is negative\n");
+ LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
return Dependence::Forward;
}
@@ -1506,15 +1510,17 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
if (Val == 0) {
if (ATy == BTy)
return Dependence::Forward;
- DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: Zero dependence difference but different types\n");
return Dependence::Unknown;
}
assert(Val.isStrictlyPositive() && "Expect a positive value");
if (ATy != BTy) {
- DEBUG(dbgs() <<
- "LAA: ReadWrite-Write positive dependency with different types\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "LAA: ReadWrite-Write positive dependency with different types\n");
return Dependence::Unknown;
}
@@ -1555,15 +1561,15 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
uint64_t MinDistanceNeeded =
TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
- DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance
- << '\n');
+ LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
+ << Distance << '\n');
return Dependence::Backward;
}
// Unsafe if the minimum distance needed is greater than max safe distance.
if (MinDistanceNeeded > MaxSafeDepDistBytes) {
- DEBUG(dbgs() << "LAA: Failure because it needs at least "
- << MinDistanceNeeded << " size in bytes");
+ LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
+ << MinDistanceNeeded << " size in bytes");
return Dependence::Backward;
}
@@ -1592,8 +1598,8 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
return Dependence::BackwardVectorizableButPreventsForwarding;
uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
- DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
- << " with max VF = " << MaxVF << '\n');
+ LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
+ << " with max VF = " << MaxVF << '\n');
uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
MaxSafeRegisterWidth = std::min(MaxSafeRegisterWidth, MaxVFInBits);
return Dependence::BackwardVectorizable;
@@ -1651,7 +1657,8 @@ bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
if (Dependences.size() >= MaxDependences) {
RecordDependences = false;
Dependences.clear();
- DEBUG(dbgs() << "Too many dependences, stopped recording\n");
+ LLVM_DEBUG(dbgs()
+ << "Too many dependences, stopped recording\n");
}
}
if (!RecordDependences && !SafeForVectorization)
@@ -1663,7 +1670,7 @@ bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
}
}
- DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
+ LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
return SafeForVectorization;
}
@@ -1693,20 +1700,21 @@ void MemoryDepChecker::Dependence::print(
bool LoopAccessInfo::canAnalyzeLoop() {
// We need to have a loop header.
- DEBUG(dbgs() << "LAA: Found a loop in "
- << TheLoop->getHeader()->getParent()->getName() << ": "
- << TheLoop->getHeader()->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
+ << TheLoop->getHeader()->getParent()->getName() << ": "
+ << TheLoop->getHeader()->getName() << '\n');
// We can only analyze innermost loops.
if (!TheLoop->empty()) {
- DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
+ LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
return false;
}
// We must have a single backedge.
if (TheLoop->getNumBackEdges() != 1) {
- DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: loop control flow is not understood by analyzer\n");
recordAnalysis("CFGNotUnderstood")
<< "loop control flow is not understood by analyzer";
return false;
@@ -1714,7 +1722,8 @@ bool LoopAccessInfo::canAnalyzeLoop() {
// We must have a single exiting block.
if (!TheLoop->getExitingBlock()) {
- DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: loop control flow is not understood by analyzer\n");
recordAnalysis("CFGNotUnderstood")
<< "loop control flow is not understood by analyzer";
return false;
@@ -1724,7 +1733,8 @@ bool LoopAccessInfo::canAnalyzeLoop() {
// checked at the end of each iteration. With that we can assume that all
// instructions in the loop are executed the same number of times.
if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
- DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: loop control flow is not understood by analyzer\n");
recordAnalysis("CFGNotUnderstood")
<< "loop control flow is not understood by analyzer";
return false;
@@ -1735,7 +1745,7 @@ bool LoopAccessInfo::canAnalyzeLoop() {
if (ExitCount == PSE->getSE()->getCouldNotCompute()) {
recordAnalysis("CantComputeNumberOfIterations")
<< "could not determine number of loop iterations";
- DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
+ LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
return false;
}
@@ -1785,7 +1795,7 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
recordAnalysis("NonSimpleLoad", Ld)
<< "read with atomic ordering or volatile read";
- DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
+ LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
CanVecMem = false;
return;
}
@@ -1809,7 +1819,7 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
if (!St->isSimple() && !IsAnnotatedParallel) {
recordAnalysis("NonSimpleStore", St)
<< "write with atomic ordering or volatile write";
- DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
+ LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
CanVecMem = false;
return;
}
@@ -1828,7 +1838,7 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
// Check if we see any stores. If there are no stores, then we don't
// care if the pointers are *restrict*.
if (!Stores.size()) {
- DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
+ LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
CanVecMem = true;
return;
}
@@ -1865,9 +1875,9 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
}
if (IsAnnotatedParallel) {
- DEBUG(dbgs()
- << "LAA: A loop annotated parallel, ignore memory dependency "
- << "checks.\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
+ << "checks.\n");
CanVecMem = true;
return;
}
@@ -1902,7 +1912,7 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
// If we write (or read-write) to a single destination and there are no
// other reads in this loop then is it safe to vectorize.
if (NumReadWrites == 1 && NumReads == 0) {
- DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
+ LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
CanVecMem = true;
return;
}
@@ -1917,23 +1927,24 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
TheLoop, SymbolicStrides);
if (!CanDoRTIfNeeded) {
recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
- DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
- << "the array bounds.\n");
+ LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
+ << "the array bounds.\n");
CanVecMem = false;
return;
}
- DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
CanVecMem = true;
if (Accesses.isDependencyCheckNeeded()) {
- DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
+ LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
CanVecMem = DepChecker->areDepsSafe(
DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
- DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
+ LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
// Clear the dependency checks. We assume they are not needed.
Accesses.resetDepChecks(*DepChecker);
@@ -1949,7 +1960,7 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
if (!CanDoRTIfNeeded) {
recordAnalysis("CantCheckMemDepsAtRunTime")
<< "cannot check memory dependencies at runtime";
- DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
+ LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
CanVecMem = false;
return;
}
@@ -1959,16 +1970,17 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
}
if (CanVecMem)
- DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
- << (PtrRtChecking->Need ? "" : " don't")
- << " need runtime memory checks.\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
+ << (PtrRtChecking->Need ? "" : " don't")
+ << " need runtime memory checks.\n");
else {
recordAnalysis("UnsafeMemDep")
<< "unsafe dependent memory operations in loop. Use "
"#pragma loop distribute(enable) to allow loop distribution "
"to attempt to isolate the offending operations into a separate "
"loop";
- DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
+ LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
}
}
@@ -2052,8 +2064,8 @@ expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
if (SE->isLoopInvariant(Sc, TheLoop)) {
- DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr
- << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:"
+ << *Ptr << "\n");
// Ptr could be in the loop body. If so, expand a new one at the correct
// location.
Instruction *Inst = dyn_cast<Instruction>(Ptr);
@@ -2066,10 +2078,11 @@ expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
return {NewPtr, NewPtrPlusOne};
} else {
Value *Start = nullptr, *End = nullptr;
- DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
+ LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
- DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n");
+ LLVM_DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High
+ << "\n");
return {Start, End};
}
}
@@ -2187,9 +2200,9 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
if (!Stride)
return;
- DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
- "versioning:");
- DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
+ LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
+ "versioning:");
+ LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
// Avoid adding the "Stride == 1" predicate when we know that
// Stride >= Trip-Count. Such a predicate will effectively optimize a single
@@ -2225,12 +2238,13 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
// "Stride >= TripCount" is equivalent to checking:
// Stride - BETakenCount > 0
if (SE->isKnownPositive(StrideMinusBETaken)) {
- DEBUG(dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
- "Stride==1 predicate will imply that the loop executes "
- "at most once.\n");
+ LLVM_DEBUG(
+ dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
+ "Stride==1 predicate will imply that the loop executes "
+ "at most once.\n");
return;
- }
- DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
+ }
+ LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
SymbolicStrides[Ptr] = Stride;
StrideSet.insert(Stride);
diff --git a/lib/Analysis/LoopPass.cpp b/lib/Analysis/LoopPass.cpp
index e39dc2070a35..208bd53a3a8d 100644
--- a/lib/Analysis/LoopPass.cpp
+++ b/lib/Analysis/LoopPass.cpp
@@ -362,8 +362,8 @@ bool LoopPass::skipLoop(const Loop *L) const {
// Check for the OptimizeNone attribute.
if (F->hasFnAttribute(Attribute::OptimizeNone)) {
// FIXME: Report this to dbgs() only once per function.
- DEBUG(dbgs() << "Skipping pass '" << getPassName()
- << "' in function " << F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' in function "
+ << F->getName() << "\n");
// FIXME: Delete loop from pass manager's queue?
return true;
}
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 186fda18886e..deacdb9e3241 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -528,8 +528,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
return visitGEPOperator(cast<GEPOperator>(*CE));
}
- DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V
- << '\n');
+ LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: "
+ << *V << '\n');
return unknown();
}
@@ -729,7 +729,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) {
}
SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
- DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I << '\n');
+ LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I
+ << '\n');
return unknown();
}
@@ -808,8 +809,9 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
// Ignore values where we cannot do more than ObjectSizeVisitor.
Result = unknown();
} else {
- DEBUG(dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: "
- << *V << '\n');
+ LLVM_DEBUG(
+ dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V
+ << '\n');
Result = unknown();
}
@@ -946,6 +948,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) {
- DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I <<'\n');
+ LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I
+ << '\n');
return unknown();
}
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 9b080e19f195..64ceae72f71c 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -824,7 +824,7 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
SmallPtrSet<BasicBlock *, 32> Visited;
unsigned NumSortedEntries = Cache.size();
- DEBUG(AssertSorted(Cache));
+ LLVM_DEBUG(AssertSorted(Cache));
// Iterate while we still have blocks to update.
while (!DirtyBlocks.empty()) {
@@ -837,7 +837,7 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
// Do a binary search to see if we already have an entry for this block in
// the cache set. If so, find it.
- DEBUG(AssertSorted(Cache, NumSortedEntries));
+ LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries));
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries,
NonLocalDepEntry(DirtyBB));
@@ -1210,7 +1210,7 @@ bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
unsigned NumSortedEntries = Cache->size();
unsigned WorklistEntries = BlockNumberLimit;
bool GotWorklistLimit = false;
- DEBUG(AssertSorted(*Cache));
+ LLVM_DEBUG(AssertSorted(*Cache));
while (!Worklist.empty()) {
BasicBlock *BB = Worklist.pop_back_val();
@@ -1241,7 +1241,7 @@ bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
// Get the dependency info for Pointer in BB. If we have cached
// information, we will use it, otherwise we compute it.
- DEBUG(AssertSorted(*Cache, NumSortedEntries));
+ LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries));
MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB,
Cache, NumSortedEntries);
@@ -1455,7 +1455,7 @@ bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
// Okay, we're done now. If we added new values to the cache, re-sort it.
SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
- DEBUG(AssertSorted(*Cache));
+ LLVM_DEBUG(AssertSorted(*Cache));
return true;
}
@@ -1651,7 +1651,7 @@ void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
}
assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
- DEBUG(verifyRemoved(RemInst));
+ LLVM_DEBUG(verifyRemoved(RemInst));
}
/// Verify that the specified instruction does not occur in our internal data
diff --git a/lib/Analysis/MemorySSA.cpp b/lib/Analysis/MemorySSA.cpp
index 661922df3595..f2fd2041a6b7 100644
--- a/lib/Analysis/MemorySSA.cpp
+++ b/lib/Analysis/MemorySSA.cpp
@@ -1240,10 +1240,11 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
unsigned long UpperBound = VersionStack.size() - 1;
if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
- DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
- << *(MU->getMemoryInst()) << ")"
- << " because there are " << UpperBound - LocInfo.LowerBound
- << " stores to disambiguate\n");
+ LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
+ << *(MU->getMemoryInst()) << ")"
+ << " because there are "
+ << UpperBound - LocInfo.LowerBound
+ << " stores to disambiguate\n");
// Because we did not walk, LastKill is no longer valid, as this may
// have been a kill.
LocInfo.LastKillValid = false;
@@ -2036,10 +2037,10 @@ MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
: StartingUseOrDef;
MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
- DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *StartingUseOrDef << "\n");
- DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *Clobber << "\n");
+ LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
+ LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
+ LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
+ LLVM_DEBUG(dbgs() << *Clobber << "\n");
return Clobber;
}
@@ -2083,10 +2084,10 @@ MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
}
MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
- DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *DefiningAccess << "\n");
- DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
- DEBUG(dbgs() << *Result << "\n");
+ LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
+ LLVM_DEBUG(dbgs() << *DefiningAccess << "\n");
+ LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
+ LLVM_DEBUG(dbgs() << *Result << "\n");
StartingAccess->setOptimized(Result);
if (MSSA->isLiveOnEntryDef(Result))
diff --git a/lib/Analysis/RegionPass.cpp b/lib/Analysis/RegionPass.cpp
index 00282db8431e..ed17df2e7e93 100644
--- a/lib/Analysis/RegionPass.cpp
+++ b/lib/Analysis/RegionPass.cpp
@@ -158,12 +158,9 @@ bool RGPassManager::runOnFunction(Function &F) {
}
// Print the region tree after all pass.
- DEBUG(
- dbgs() << "\nRegion tree of function " << F.getName()
- << " after all region Pass:\n";
- RI->dump();
- dbgs() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "\nRegion tree of function " << F.getName()
+ << " after all region Pass:\n";
+ RI->dump(); dbgs() << "\n";);
return Changed;
}
@@ -289,8 +286,8 @@ bool RegionPass::skipRegion(Region &R) const {
if (F.hasFnAttribute(Attribute::OptimizeNone)) {
// Report this only once per function.
if (R.getEntry() == &F.getEntryBlock())
- DEBUG(dbgs() << "Skipping pass '" << getPassName()
- << "' on function " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName()
+ << "' on function " << F.getName() << "\n");
return true;
}
return false;
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 47de68c1a359..7cc72c6c1eab 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -4723,7 +4723,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
if (PredIsKnownFalse(StartVal, StartExtended)) {
- DEBUG(dbgs() << "P2 is compile-time false\n";);
+ LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
return None;
}
@@ -4731,7 +4731,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
// NSSW or NUSW)
const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
if (PredIsKnownFalse(Accum, AccumExtended)) {
- DEBUG(dbgs() << "P3 is compile-time false\n";);
+ LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
return None;
}
@@ -4740,7 +4740,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI
if (Expr != ExtendedExpr &&
!isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
- DEBUG (dbgs() << "Added Predicate: " << *Pred);
+ LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
Predicates.push_back(Pred);
}
};
@@ -10633,22 +10633,22 @@ void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
SCEVCollectStrides StrideCollector(*this, Strides);
visitAll(Expr, StrideCollector);
- DEBUG({
- dbgs() << "Strides:\n";
- for (const SCEV *S : Strides)
- dbgs() << *S << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Strides:\n";
+ for (const SCEV *S : Strides)
+ dbgs() << *S << "\n";
+ });
for (const SCEV *S : Strides) {
SCEVCollectTerms TermCollector(Terms);
visitAll(S, TermCollector);
}
- DEBUG({
- dbgs() << "Terms:\n";
- for (const SCEV *T : Terms)
- dbgs() << *T << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Terms:\n";
+ for (const SCEV *T : Terms)
+ dbgs() << *T << "\n";
+ });
SCEVCollectAddRecMultiplies MulCollector(Terms, *this);
visitAll(Expr, MulCollector);
@@ -10759,11 +10759,11 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
if (!containsParameters(Terms))
return;
- DEBUG({
- dbgs() << "Terms:\n";
- for (const SCEV *T : Terms)
- dbgs() << *T << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Terms:\n";
+ for (const SCEV *T : Terms)
+ dbgs() << *T << "\n";
+ });
// Remove duplicates.
array_pod_sort(Terms.begin(), Terms.end());
@@ -10790,11 +10790,11 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
if (const SCEV *NewT = removeConstantFactors(*this, T))
NewTerms.push_back(NewT);
- DEBUG({
- dbgs() << "Terms after sorting:\n";
- for (const SCEV *T : NewTerms)
- dbgs() << *T << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Terms after sorting:\n";
+ for (const SCEV *T : NewTerms)
+ dbgs() << *T << "\n";
+ });
if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
Sizes.clear();
@@ -10804,11 +10804,11 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
// The last element to be pushed into Sizes is the size of an element.
Sizes.push_back(ElementSize);
- DEBUG({
- dbgs() << "Sizes:\n";
- for (const SCEV *S : Sizes)
- dbgs() << *S << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Sizes:\n";
+ for (const SCEV *S : Sizes)
+ dbgs() << *S << "\n";
+ });
}
void ScalarEvolution::computeAccessFunctions(
@@ -10828,13 +10828,13 @@ void ScalarEvolution::computeAccessFunctions(
const SCEV *Q, *R;
SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
- DEBUG({
- dbgs() << "Res: " << *Res << "\n";
- dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
- dbgs() << "Res divided by Sizes[i]:\n";
- dbgs() << "Quotient: " << *Q << "\n";
- dbgs() << "Remainder: " << *R << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Res: " << *Res << "\n";
+ dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
+ dbgs() << "Res divided by Sizes[i]:\n";
+ dbgs() << "Quotient: " << *Q << "\n";
+ dbgs() << "Remainder: " << *R << "\n";
+ });
Res = Q;
@@ -10862,11 +10862,11 @@ void ScalarEvolution::computeAccessFunctions(
std::reverse(Subscripts.begin(), Subscripts.end());
- DEBUG({
- dbgs() << "Subscripts:\n";
- for (const SCEV *S : Subscripts)
- dbgs() << *S << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Subscripts:\n";
+ for (const SCEV *S : Subscripts)
+ dbgs() << *S << "\n";
+ });
}
/// Splits the SCEV into two vectors of SCEVs representing the subscripts and
@@ -10940,17 +10940,17 @@ void ScalarEvolution::delinearize(const SCEV *Expr,
if (Subscripts.empty())
return;
- DEBUG({
- dbgs() << "succeeded to delinearize " << *Expr << "\n";
- dbgs() << "ArrayDecl[UnknownSize]";
- for (const SCEV *S : Sizes)
- dbgs() << "[" << *S << "]";
+ LLVM_DEBUG({
+ dbgs() << "succeeded to delinearize " << *Expr << "\n";
+ dbgs() << "ArrayDecl[UnknownSize]";
+ for (const SCEV *S : Sizes)
+ dbgs() << "[" << *S << "]";
- dbgs() << "\nArrayRef";
- for (const SCEV *S : Subscripts)
- dbgs() << "[" << *S << "]";
- dbgs() << "\n";
- });
+ dbgs() << "\nArrayRef";
+ for (const SCEV *S : Subscripts)
+ dbgs() << "[" << *S << "]";
+ dbgs() << "\n";
+ });
}
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 1f76bdda0792..632ea8e9cdc4 100644
--- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -139,10 +139,11 @@ AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
CriticalPathSet |= CPSet;
}
- DEBUG(dbgs() << "AntiDep Critical-Path Registers:");
- DEBUG(for (unsigned r : CriticalPathSet.set_bits())
- dbgs() << " " << printReg(r, TRI));
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "AntiDep Critical-Path Registers:");
+ LLVM_DEBUG(for (unsigned r
+ : CriticalPathSet.set_bits()) dbgs()
+ << " " << printReg(r, TRI));
+ LLVM_DEBUG(dbgs() << '\n');
}
AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() {
@@ -202,9 +203,9 @@ void AggressiveAntiDepBreaker::Observe(MachineInstr &MI, unsigned Count,
PrescanInstruction(MI, Count, PassthruRegs);
ScanInstruction(MI, Count);
- DEBUG(dbgs() << "Observe: ");
- DEBUG(MI.dump());
- DEBUG(dbgs() << "\tRegs:");
+ LLVM_DEBUG(dbgs() << "Observe: ");
+ LLVM_DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "\tRegs:");
std::vector<unsigned> &DefIndices = State->GetDefIndices();
for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
@@ -215,16 +216,16 @@ void AggressiveAntiDepBreaker::Observe(MachineInstr &MI, unsigned Count,
// conservative location (i.e. the beginning of the previous
// schedule region).
if (State->IsLive(Reg)) {
- DEBUG(if (State->GetGroup(Reg) != 0)
- dbgs() << " " << printReg(Reg, TRI) << "=g" <<
- State->GetGroup(Reg) << "->g0(region live-out)");
+ LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs()
+ << " " << printReg(Reg, TRI) << "=g" << State->GetGroup(Reg)
+ << "->g0(region live-out)");
State->UnionGroups(Reg, 0);
} else if ((DefIndices[Reg] < InsertPosIndex)
&& (DefIndices[Reg] >= Count)) {
DefIndices[Reg] = Count;
}
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr &MI,
@@ -313,7 +314,7 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
// subregister definitions).
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
if (TRI->isSuperRegister(Reg, *AI) && State->IsLive(*AI)) {
- DEBUG(if (!header && footer) dbgs() << footer);
+ LLVM_DEBUG(if (!header && footer) dbgs() << footer);
return;
}
@@ -322,9 +323,11 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DefIndices[Reg] = ~0u;
RegRefs.erase(Reg);
State->LeaveGroup(Reg);
- DEBUG(if (header) {
- dbgs() << header << printReg(Reg, TRI); header = nullptr; });
- DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
+ LLVM_DEBUG(if (header) {
+ dbgs() << header << printReg(Reg, TRI);
+ header = nullptr;
+ });
+ LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
// Repeat for subregisters. Note that we only do this if the superregister
// was not live because otherwise, regardless whether we have an explicit
// use of the subregister, the subregister's contents are needed for the
@@ -336,15 +339,17 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DefIndices[SubregReg] = ~0u;
RegRefs.erase(SubregReg);
State->LeaveGroup(SubregReg);
- DEBUG(if (header) {
- dbgs() << header << printReg(Reg, TRI); header = nullptr; });
- DEBUG(dbgs() << " " << printReg(SubregReg, TRI) << "->g" <<
- State->GetGroup(SubregReg) << tag);
+ LLVM_DEBUG(if (header) {
+ dbgs() << header << printReg(Reg, TRI);
+ header = nullptr;
+ });
+ LLVM_DEBUG(dbgs() << " " << printReg(SubregReg, TRI) << "->g"
+ << State->GetGroup(SubregReg) << tag);
}
}
}
- DEBUG(if (!header && footer) dbgs() << footer);
+ LLVM_DEBUG(if (!header && footer) dbgs() << footer);
}
void AggressiveAntiDepBreaker::PrescanInstruction(
@@ -367,14 +372,15 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
HandleLastUse(Reg, Count + 1, "", "\tDead Def: ", "\n");
}
- DEBUG(dbgs() << "\tDef Groups:");
+ LLVM_DEBUG(dbgs() << "\tDef Groups:");
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g" << State->GetGroup(Reg));
+ LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"
+ << State->GetGroup(Reg));
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
@@ -383,7 +389,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
// can tell user specified registers from compiler-specified.
if (MI.isCall() || MI.hasExtraDefRegAllocReq() || TII->isPredicated(MI) ||
MI.isInlineAsm()) {
- DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
+ LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
@@ -393,8 +399,8 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
unsigned AliasReg = *AI;
if (State->IsLive(AliasReg)) {
State->UnionGroups(Reg, AliasReg);
- DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via "
- << printReg(AliasReg, TRI) << ")");
+ LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via "
+ << printReg(AliasReg, TRI) << ")");
}
}
@@ -406,7 +412,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
RegRefs.insert(std::make_pair(Reg, RR));
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
// Scan the register defs for this instruction and update
// live-ranges.
@@ -437,7 +443,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(
void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
unsigned Count) {
- DEBUG(dbgs() << "\tUse Groups:");
+ LLVM_DEBUG(dbgs() << "\tUse Groups:");
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
@@ -469,7 +475,8 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g" << State->GetGroup(Reg));
+ LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI) << "=g"
+ << State->GetGroup(Reg));
// It wasn't previously live but now it is, this is a kill. Forget
// the previous live-range information and start a new live-range
@@ -477,7 +484,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
HandleLastUse(Reg, Count, "(last-use)");
if (Special) {
- DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
+ LLVM_DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
@@ -489,12 +496,12 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
RegRefs.insert(std::make_pair(Reg, RR));
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
// Form a group of all defs and uses of a KILL instruction to ensure
// that all registers are renamed as a group.
if (MI.isKill()) {
- DEBUG(dbgs() << "\tKill Group:");
+ LLVM_DEBUG(dbgs() << "\tKill Group:");
unsigned FirstReg = 0;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
@@ -504,15 +511,15 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
if (Reg == 0) continue;
if (FirstReg != 0) {
- DEBUG(dbgs() << "=" << printReg(Reg, TRI));
+ LLVM_DEBUG(dbgs() << "=" << printReg(Reg, TRI));
State->UnionGroups(FirstReg, Reg);
} else {
- DEBUG(dbgs() << " " << printReg(Reg, TRI));
+ LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI));
FirstReg = Reg;
}
}
- DEBUG(dbgs() << "->g" << State->GetGroup(FirstReg) << '\n');
+ LLVM_DEBUG(dbgs() << "->g" << State->GetGroup(FirstReg) << '\n');
}
}
@@ -535,7 +542,7 @@ BitVector AggressiveAntiDepBreaker::GetRenameRegisters(unsigned Reg) {
BV &= RCBV;
}
- DEBUG(dbgs() << " " << TRI->getRegClassName(RC));
+ LLVM_DEBUG(dbgs() << " " << TRI->getRegClassName(RC));
}
return BV;
@@ -562,8 +569,8 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// Find the "superest" register in the group. At the same time,
// collect the BitVector of registers that can be used to rename
// each register.
- DEBUG(dbgs() << "\tRename Candidates for Group g" << AntiDepGroupIndex
- << ":\n");
+ LLVM_DEBUG(dbgs() << "\tRename Candidates for Group g" << AntiDepGroupIndex
+ << ":\n");
std::map<unsigned, BitVector> RenameRegisterMap;
unsigned SuperReg = 0;
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
@@ -573,13 +580,13 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// If Reg has any references, then collect possible rename regs
if (RegRefs.count(Reg) > 0) {
- DEBUG(dbgs() << "\t\t" << printReg(Reg, TRI) << ":");
+ LLVM_DEBUG(dbgs() << "\t\t" << printReg(Reg, TRI) << ":");
BitVector &BV = RenameRegisterMap[Reg];
assert(BV.empty());
BV = GetRenameRegisters(Reg);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << " ::";
for (unsigned r : BV.set_bits())
dbgs() << " " << printReg(r, TRI);
@@ -625,11 +632,11 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
ArrayRef<MCPhysReg> Order = RegClassInfo.getOrder(SuperRC);
if (Order.empty()) {
- DEBUG(dbgs() << "\tEmpty Super Regclass!!\n");
+ LLVM_DEBUG(dbgs() << "\tEmpty Super Regclass!!\n");
return false;
}
- DEBUG(dbgs() << "\tFind Registers:");
+ LLVM_DEBUG(dbgs() << "\tFind Registers:");
RenameOrder.insert(RenameOrderType::value_type(SuperRC, Order.size()));
@@ -645,7 +652,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// Don't replace a register with itself.
if (NewSuperReg == SuperReg) continue;
- DEBUG(dbgs() << " [" << printReg(NewSuperReg, TRI) << ':');
+ LLVM_DEBUG(dbgs() << " [" << printReg(NewSuperReg, TRI) << ':');
RenameMap.clear();
// For each referenced group register (which must be a SuperReg or
@@ -662,11 +669,11 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
NewReg = TRI->getSubReg(NewSuperReg, NewSubRegIdx);
}
- DEBUG(dbgs() << " " << printReg(NewReg, TRI));
+ LLVM_DEBUG(dbgs() << " " << printReg(NewReg, TRI));
// Check if Reg can be renamed to NewReg.
if (!RenameRegisterMap[Reg].test(NewReg)) {
- DEBUG(dbgs() << "(no rename)");
+ LLVM_DEBUG(dbgs() << "(no rename)");
goto next_super_reg;
}
@@ -675,7 +682,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// must also check all aliases of NewReg, because we can't define a
// register when any sub or super is already live.
if (State->IsLive(NewReg) || (KillIndices[Reg] > DefIndices[NewReg])) {
- DEBUG(dbgs() << "(live)");
+ LLVM_DEBUG(dbgs() << "(live)");
goto next_super_reg;
} else {
bool found = false;
@@ -683,7 +690,8 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
unsigned AliasReg = *AI;
if (State->IsLive(AliasReg) ||
(KillIndices[Reg] > DefIndices[AliasReg])) {
- DEBUG(dbgs() << "(alias " << printReg(AliasReg, TRI) << " live)");
+ LLVM_DEBUG(dbgs()
+ << "(alias " << printReg(AliasReg, TRI) << " live)");
found = true;
break;
}
@@ -701,7 +709,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
continue;
if (UseMI->getOperand(Idx).isEarlyClobber()) {
- DEBUG(dbgs() << "(ec)");
+ LLVM_DEBUG(dbgs() << "(ec)");
goto next_super_reg;
}
}
@@ -715,7 +723,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
MachineInstr *DefMI = Q.second.Operand->getParent();
if (DefMI->readsRegister(NewReg, TRI)) {
- DEBUG(dbgs() << "(ec)");
+ LLVM_DEBUG(dbgs() << "(ec)");
goto next_super_reg;
}
}
@@ -728,14 +736,14 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// renamed, as recorded in RenameMap.
RenameOrder.erase(SuperRC);
RenameOrder.insert(RenameOrderType::value_type(SuperRC, R));
- DEBUG(dbgs() << "]\n");
+ LLVM_DEBUG(dbgs() << "]\n");
return true;
next_super_reg:
- DEBUG(dbgs() << ']');
+ LLVM_DEBUG(dbgs() << ']');
} while (R != EndR);
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
// No registers are free and available!
return false;
@@ -788,13 +796,13 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
}
#ifndef NDEBUG
- DEBUG(dbgs() << "\n===== Aggressive anti-dependency breaking\n");
- DEBUG(dbgs() << "Available regs:");
+ LLVM_DEBUG(dbgs() << "\n===== Aggressive anti-dependency breaking\n");
+ LLVM_DEBUG(dbgs() << "Available regs:");
for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
if (!State->IsLive(Reg))
- DEBUG(dbgs() << " " << printReg(Reg, TRI));
+ LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI));
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
#endif
BitVector RegAliases(TRI->getNumRegs());
@@ -811,8 +819,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
if (MI.isDebugInstr())
continue;
- DEBUG(dbgs() << "Anti: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Anti: ");
+ LLVM_DEBUG(MI.dump());
std::set<unsigned> PassthruRegs;
GetPassthruRegs(MI, PassthruRegs);
@@ -848,30 +856,30 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
(Edge->getKind() != SDep::Output)) continue;
unsigned AntiDepReg = Edge->getReg();
- DEBUG(dbgs() << "\tAntidep reg: " << printReg(AntiDepReg, TRI));
+ LLVM_DEBUG(dbgs() << "\tAntidep reg: " << printReg(AntiDepReg, TRI));
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
if (!MRI.isAllocatable(AntiDepReg)) {
// Don't break anti-dependencies on non-allocatable registers.
- DEBUG(dbgs() << " (non-allocatable)\n");
+ LLVM_DEBUG(dbgs() << " (non-allocatable)\n");
continue;
} else if (ExcludeRegs && ExcludeRegs->test(AntiDepReg)) {
// Don't break anti-dependencies for critical path registers
// if not on the critical path
- DEBUG(dbgs() << " (not critical-path)\n");
+ LLVM_DEBUG(dbgs() << " (not critical-path)\n");
continue;
} else if (PassthruRegs.count(AntiDepReg) != 0) {
// If the anti-dep register liveness "passes-thru", then
// don't try to change it. It will be changed along with
// the use if required to break an earlier antidep.
- DEBUG(dbgs() << " (passthru)\n");
+ LLVM_DEBUG(dbgs() << " (passthru)\n");
continue;
} else {
// No anti-dep breaking for implicit deps
MachineOperand *AntiDepOp = MI.findRegisterDefOperand(AntiDepReg);
assert(AntiDepOp && "Can't find index for defined register operand");
if (!AntiDepOp || AntiDepOp->isImplicit()) {
- DEBUG(dbgs() << " (implicit)\n");
+ LLVM_DEBUG(dbgs() << " (implicit)\n");
continue;
}
@@ -897,13 +905,13 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
PE = PathSU->Preds.end(); P != PE; ++P) {
if ((P->getSUnit() == NextSU) && (P->getKind() != SDep::Anti) &&
(P->getKind() != SDep::Output)) {
- DEBUG(dbgs() << " (real dependency)\n");
+ LLVM_DEBUG(dbgs() << " (real dependency)\n");
AntiDepReg = 0;
break;
} else if ((P->getSUnit() != NextSU) &&
(P->getKind() == SDep::Data) &&
(P->getReg() == AntiDepReg)) {
- DEBUG(dbgs() << " (other dependency)\n");
+ LLVM_DEBUG(dbgs() << " (other dependency)\n");
AntiDepReg = 0;
break;
}
@@ -941,17 +949,17 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// Determine AntiDepReg's register group.
const unsigned GroupIndex = State->GetGroup(AntiDepReg);
if (GroupIndex == 0) {
- DEBUG(dbgs() << " (zero group)\n");
+ LLVM_DEBUG(dbgs() << " (zero group)\n");
continue;
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
// Look for a suitable register to use to break the anti-dependence.
std::map<unsigned, unsigned> RenameMap;
if (FindSuitableFreeRegisters(GroupIndex, RenameOrder, RenameMap)) {
- DEBUG(dbgs() << "\tBreaking anti-dependence edge on "
- << printReg(AntiDepReg, TRI) << ":");
+ LLVM_DEBUG(dbgs() << "\tBreaking anti-dependence edge on "
+ << printReg(AntiDepReg, TRI) << ":");
// Handle each group register...
for (std::map<unsigned, unsigned>::iterator
@@ -959,9 +967,9 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
unsigned CurrReg = S->first;
unsigned NewReg = S->second;
- DEBUG(dbgs() << " " << printReg(CurrReg, TRI) << "->"
- << printReg(NewReg, TRI) << "("
- << RegRefs.count(CurrReg) << " refs)");
+ LLVM_DEBUG(dbgs() << " " << printReg(CurrReg, TRI) << "->"
+ << printReg(NewReg, TRI) << "("
+ << RegRefs.count(CurrReg) << " refs)");
// Update the references to the old register CurrReg to
// refer to the new register NewReg.
@@ -994,7 +1002,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
}
++Broken;
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
}
}
diff --git a/lib/CodeGen/AllocationOrder.cpp b/lib/CodeGen/AllocationOrder.cpp
index 8e8c1d8e08d1..37dcb0be824e 100644
--- a/lib/CodeGen/AllocationOrder.cpp
+++ b/lib/CodeGen/AllocationOrder.cpp
@@ -39,7 +39,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg,
HardHints = true;
rewind();
- DEBUG({
+ LLVM_DEBUG({
if (!Hints.empty()) {
dbgs() << "hints:";
for (unsigned I = 0, E = Hints.size(); I != E; ++I)
diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp
index 15bbc15021af..570424a79c81 100644
--- a/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -87,8 +87,9 @@ void DIEAbbrev::Emit(const AsmPrinter *AP) const {
// easily, which helps track down where it came from.
if (!dwarf::isValidFormForVersion(AttrData.getForm(),
AP->getDwarfVersion())) {
- DEBUG(dbgs() << "Invalid form " << format("0x%x", AttrData.getForm())
- << " for DWARF version " << AP->getDwarfVersion() << "\n");
+ LLVM_DEBUG(dbgs() << "Invalid form " << format("0x%x", AttrData.getForm())
+ << " for DWARF version " << AP->getDwarfVersion()
+ << "\n");
llvm_unreachable("Invalid form for specified DWARF version");
}
#endif
diff --git a/lib/CodeGen/AsmPrinter/DIEHash.cpp b/lib/CodeGen/AsmPrinter/DIEHash.cpp
index 5392b7150a72..b8f1202494d7 100644
--- a/lib/CodeGen/AsmPrinter/DIEHash.cpp
+++ b/lib/CodeGen/AsmPrinter/DIEHash.cpp
@@ -43,7 +43,7 @@ static StringRef getDIEStringAttr(const DIE &Die, uint16_t Attr) {
/// Adds the string in \p Str to the hash. This also hashes
/// a trailing NULL with the string.
void DIEHash::addString(StringRef Str) {
- DEBUG(dbgs() << "Adding string " << Str << " to hash.\n");
+ LLVM_DEBUG(dbgs() << "Adding string " << Str << " to hash.\n");
Hash.update(Str);
Hash.update(makeArrayRef((uint8_t)'\0'));
}
@@ -53,7 +53,7 @@ void DIEHash::addString(StringRef Str) {
/// Adds the unsigned in \p Value to the hash encoded as a ULEB128.
void DIEHash::addULEB128(uint64_t Value) {
- DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n");
+ LLVM_DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n");
do {
uint8_t Byte = Value & 0x7f;
Value >>= 7;
@@ -64,7 +64,7 @@ void DIEHash::addULEB128(uint64_t Value) {
}
void DIEHash::addSLEB128(int64_t Value) {
- DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n");
+ LLVM_DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n");
bool More;
do {
uint8_t Byte = Value & 0x7f;
@@ -80,7 +80,7 @@ void DIEHash::addSLEB128(int64_t Value) {
/// Including \p Parent adds the context of Parent to the hash..
void DIEHash::addParentContext(const DIE &Parent) {
- DEBUG(dbgs() << "Adding parent context to hash...\n");
+ LLVM_DEBUG(dbgs() << "Adding parent context to hash...\n");
// [7.27.2] For each surrounding type or namespace beginning with the
// outermost such construct...
@@ -108,7 +108,7 @@ void DIEHash::addParentContext(const DIE &Parent) {
// ... Then the name, taken from the DW_AT_name attribute.
StringRef Name = getDIEStringAttr(Die, dwarf::DW_AT_name);
- DEBUG(dbgs() << "... adding context: " << Name << "\n");
+ LLVM_DEBUG(dbgs() << "... adding context: " << Name << "\n");
if (!Name.empty())
addString(Name);
}
@@ -118,9 +118,9 @@ void DIEHash::addParentContext(const DIE &Parent) {
void DIEHash::collectAttributes(const DIE &Die, DIEAttrs &Attrs) {
for (const auto &V : Die.values()) {
- DEBUG(dbgs() << "Attribute: "
- << dwarf::AttributeString(V.getAttribute())
- << " added.\n");
+ LLVM_DEBUG(dbgs() << "Attribute: "
+ << dwarf::AttributeString(V.getAttribute())
+ << " added.\n");
switch (V.getAttribute()) {
#define HANDLE_DIE_HASH_ATTR(NAME) \
case dwarf::NAME: \
diff --git a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
index daa045939ce5..7e7b1c1d0c15 100644
--- a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
+++ b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
@@ -50,8 +50,8 @@ void DbgValueHistoryMap::startInstrRange(InlinedVariable Var,
auto &Ranges = VarInstrRanges[Var];
if (!Ranges.empty() && Ranges.back().second == nullptr &&
Ranges.back().first->isIdenticalTo(MI)) {
- DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n"
- << "\t" << Ranges.back().first << "\t" << MI << "\n");
+ LLVM_DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n"
+ << "\t" << Ranges.back().first << "\t" << MI << "\n");
return;
}
Ranges.push_back(std::make_pair(&MI, nullptr));
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index b16dc3c17594..19aeb60630c1 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1034,7 +1034,7 @@ DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
EndLabel = getLabelBeforeInsn(std::next(I)->first);
assert(EndLabel && "Forgot label after instruction ending a range!");
- DEBUG(dbgs() << "DotDebugLoc: " << *Begin << "\n");
+ LLVM_DEBUG(dbgs() << "DotDebugLoc: " << *Begin << "\n");
auto Value = getDebugLocValue(Begin);
DebugLocEntry Loc(StartLabel, EndLabel, Value);
@@ -1063,7 +1063,7 @@ DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
// Attempt to coalesce the ranges of two otherwise identical
// DebugLocEntries.
auto CurEntry = DebugLoc.rbegin();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << CurEntry->getValues().size() << " Values:\n";
for (auto &Value : CurEntry->getValues())
Value.dump();
diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp
index 7042bc997223..460e7055d9b8 100644
--- a/lib/CodeGen/AtomicExpandPass.cpp
+++ b/lib/CodeGen/AtomicExpandPass.cpp
@@ -379,8 +379,8 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
NewLI->setAlignment(LI->getAlignment());
NewLI->setVolatile(LI->isVolatile());
NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
- DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
-
+ LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
+
Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
LI->replaceAllUsesWith(NewVal);
LI->eraseFromParent();
@@ -462,7 +462,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
NewSI->setAlignment(SI->getAlignment());
NewSI->setVolatile(SI->isVolatile());
NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
- DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
+ LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
SI->eraseFromParent();
return NewSI;
}
@@ -943,7 +943,7 @@ AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *
CI->getSyncScopeID());
NewCI->setVolatile(CI->isVolatile());
NewCI->setWeak(CI->isWeak());
- DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
+ LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
Value *Succ = Builder.CreateExtractValue(NewCI, 1);
diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp
index 8db711305882..190dfc0e4730 100644
--- a/lib/CodeGen/BranchFolding.cpp
+++ b/lib/CodeGen/BranchFolding.cpp
@@ -152,7 +152,7 @@ BranchFolder::BranchFolder(bool defaultEnableTailMerge, bool CommonHoist,
void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
assert(MBB->pred_empty() && "MBB must be dead!");
- DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
+ LLVM_DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
MachineFunction *MF = MBB->getParent();
// drop all successors.
@@ -650,9 +650,9 @@ ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2,
CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2);
if (CommonTailLen == 0)
return false;
- DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1)
- << " and " << printMBBReference(*MBB2) << " is " << CommonTailLen
- << '\n');
+ LLVM_DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1)
+ << " and " << printMBBReference(*MBB2) << " is "
+ << CommonTailLen << '\n');
// It's almost always profitable to merge any number of non-terminator
// instructions with the block that falls through into the common successor.
@@ -807,8 +807,8 @@ bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
SameTails[commonTailIndex].getTailStartPos();
MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
- DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size "
- << maxCommonTailLength);
+ LLVM_DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size "
+ << maxCommonTailLength);
// If the split block unconditionally falls-thru to SuccBB, it will be
// merged. In control flow terms it should then take SuccBB's name. e.g. If
@@ -817,7 +817,7 @@ bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
SuccBB->getBasicBlock() : MBB->getBasicBlock();
MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI, BB);
if (!newMBB) {
- DEBUG(dbgs() << "... failed!");
+ LLVM_DEBUG(dbgs() << "... failed!");
return false;
}
@@ -956,18 +956,19 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
unsigned MinCommonTailLength) {
bool MadeChange = false;
- DEBUG(dbgs() << "\nTryTailMergeBlocks: ";
- for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs()
- << printMBBReference(*MergePotentials[i].getBlock())
- << (i == e - 1 ? "" : ", ");
- dbgs() << "\n"; if (SuccBB) {
- dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n';
- if (PredBB)
- dbgs() << " which has fall-through from "
- << printMBBReference(*PredBB) << "\n";
- } dbgs() << "Looking for common tails of at least "
- << MinCommonTailLength << " instruction"
- << (MinCommonTailLength == 1 ? "" : "s") << '\n';);
+ LLVM_DEBUG(
+ dbgs() << "\nTryTailMergeBlocks: ";
+ for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs()
+ << printMBBReference(*MergePotentials[i].getBlock())
+ << (i == e - 1 ? "" : ", ");
+ dbgs() << "\n"; if (SuccBB) {
+ dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n';
+ if (PredBB)
+ dbgs() << " which has fall-through from "
+ << printMBBReference(*PredBB) << "\n";
+ } dbgs() << "Looking for common tails of at least "
+ << MinCommonTailLength << " instruction"
+ << (MinCommonTailLength == 1 ? "" : "s") << '\n';);
// Sort by hash value so that blocks with identical end sequences sort
// together.
@@ -1047,19 +1048,19 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
// MBB is common tail. Adjust all other BB's to jump to this one.
// Traversal must be forwards so erases work.
- DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB)
- << " for ");
+ LLVM_DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB)
+ << " for ");
for (unsigned int i=0, e = SameTails.size(); i != e; ++i) {
if (commonTailIndex == i)
continue;
- DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock())
- << (i == e - 1 ? "" : ", "));
+ LLVM_DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock())
+ << (i == e - 1 ? "" : ", "));
// Hack the end off BB i, making it jump to BB commonTailIndex instead.
replaceTailWithBranchTo(SameTails[i].getTailStartPos(), *MBB);
// BB i is no longer a predecessor of SuccBB; remove it from the worklist.
MergePotentials.erase(SameTails[i].getMPIter());
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
// We leave commonTailIndex in the worklist in case there are other blocks
// that match it with a smaller number of instructions.
MadeChange = true;
@@ -1363,7 +1364,8 @@ static void copyDebugInfoToPredecessor(const TargetInstrInfo *TII,
for (MachineInstr &MI : MBB.instrs())
if (MI.isDebugValue()) {
TII->duplicate(PredMBB, InsertBefore, MI);
- DEBUG(dbgs() << "Copied debug value from empty block to pred: " << MI);
+ LLVM_DEBUG(dbgs() << "Copied debug value from empty block to pred: "
+ << MI);
}
}
@@ -1374,7 +1376,8 @@ static void copyDebugInfoToSuccessor(const TargetInstrInfo *TII,
for (MachineInstr &MI : MBB.instrs())
if (MI.isDebugValue()) {
TII->duplicate(SuccMBB, InsertBefore, MI);
- DEBUG(dbgs() << "Copied debug value from empty block to succ: " << MI);
+ LLVM_DEBUG(dbgs() << "Copied debug value from empty block to succ: "
+ << MI);
}
}
@@ -1489,8 +1492,8 @@ ReoptimizeBlock:
if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 &&
PrevBB.succ_size() == 1 &&
!MBB->hasAddressTaken() && !MBB->isEHPad()) {
- DEBUG(dbgs() << "\nMerging into block: " << PrevBB
- << "From MBB: " << *MBB);
+ LLVM_DEBUG(dbgs() << "\nMerging into block: " << PrevBB
+ << "From MBB: " << *MBB);
// Remove redundant DBG_VALUEs first.
if (PrevBB.begin() != PrevBB.end()) {
MachineBasicBlock::iterator PrevBBIter = PrevBB.end();
@@ -1576,8 +1579,8 @@ ReoptimizeBlock:
// Reverse the branch so we will fall through on the previous true cond.
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
if (!TII->reverseBranchCondition(NewPriorCond)) {
- DEBUG(dbgs() << "\nMoving MBB: " << *MBB
- << "To make fallthrough to: " << *PriorTBB << "\n");
+ LLVM_DEBUG(dbgs() << "\nMoving MBB: " << *MBB
+ << "To make fallthrough to: " << *PriorTBB << "\n");
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->removeBranch(PrevBB);
diff --git a/lib/CodeGen/BranchRelaxation.cpp b/lib/CodeGen/BranchRelaxation.cpp
index 744612261c88..3866f51e93c6 100644
--- a/lib/CodeGen/BranchRelaxation.cpp
+++ b/lib/CodeGen/BranchRelaxation.cpp
@@ -288,10 +288,11 @@ bool BranchRelaxation::isBlockInRange(
if (TII->isBranchOffsetInRange(MI.getOpcode(), DestOffset - BrOffset))
return true;
- DEBUG(dbgs() << "Out of range branch to destination "
- << printMBBReference(DestBB) << " from "
- << printMBBReference(*MI.getParent()) << " to " << DestOffset
- << " offset " << DestOffset - BrOffset << '\t' << MI);
+ LLVM_DEBUG(dbgs() << "Out of range branch to destination "
+ << printMBBReference(DestBB) << " from "
+ << printMBBReference(*MI.getParent()) << " to "
+ << DestOffset << " offset " << DestOffset - BrOffset << '\t'
+ << MI);
return false;
}
@@ -360,8 +361,9 @@ bool BranchRelaxation::fixupConditionalBranch(MachineInstr &MI) {
// =>
// bne L2
// b L1
- DEBUG(dbgs() << " Invert condition and swap "
- "its destination with " << MBB->back());
+ LLVM_DEBUG(dbgs() << " Invert condition and swap "
+ "its destination with "
+ << MBB->back());
removeBranch(MBB);
insertBranch(MBB, FBB, TBB, Cond);
@@ -384,9 +386,9 @@ bool BranchRelaxation::fixupConditionalBranch(MachineInstr &MI) {
// just created), so we can use the inverted the condition.
MachineBasicBlock &NextBB = *std::next(MachineFunction::iterator(MBB));
- DEBUG(dbgs() << " Insert B to " << printMBBReference(*TBB)
- << ", invert condition and change dest. to "
- << printMBBReference(NextBB) << '\n');
+ LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*TBB)
+ << ", invert condition and change dest. to "
+ << printMBBReference(NextBB) << '\n');
removeBranch(MBB);
// Insert a new conditional branch and a new unconditional branch.
@@ -397,8 +399,8 @@ bool BranchRelaxation::fixupConditionalBranch(MachineInstr &MI) {
}
// Branch cond can't be inverted.
// In this case we always add a block after the MBB.
- DEBUG(dbgs() << " The branch condition can't be inverted. "
- << " Insert a new BB after " << MBB->back());
+ LLVM_DEBUG(dbgs() << " The branch condition can't be inverted. "
+ << " Insert a new BB after " << MBB->back());
if (!FBB)
FBB = &(*std::next(MachineFunction::iterator(MBB)));
@@ -417,11 +419,12 @@ bool BranchRelaxation::fixupConditionalBranch(MachineInstr &MI) {
NewBB = createNewBlockAfter(*MBB);
insertUncondBranch(NewBB, TBB);
- DEBUG(dbgs() << " Insert cond B to the new BB " << printMBBReference(*NewBB)
- << " Keep the exiting condition.\n"
- << " Insert B to " << printMBBReference(*FBB) << ".\n"
- << " In the new BB: Insert B to "
- << printMBBReference(*TBB) << ".\n");
+ LLVM_DEBUG(dbgs() << " Insert cond B to the new BB "
+ << printMBBReference(*NewBB)
+ << " Keep the exiting condition.\n"
+ << " Insert B to " << printMBBReference(*FBB) << ".\n"
+ << " In the new BB: Insert B to "
+ << printMBBReference(*TBB) << ".\n");
// Update the successor lists according to the transformation to follow.
MBB->replaceSuccessor(TBB, NewBB);
@@ -541,7 +544,7 @@ bool BranchRelaxation::relaxBranchInstructions() {
bool BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
- DEBUG(dbgs() << "***** BranchRelaxation *****\n");
+ LLVM_DEBUG(dbgs() << "***** BranchRelaxation *****\n");
const TargetSubtargetInfo &ST = MF->getSubtarget();
TII = ST.getInstrInfo();
@@ -558,7 +561,7 @@ bool BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
// sizes of each block.
scanFunction();
- DEBUG(dbgs() << " Basic blocks before relaxation\n"; dumpBBs(););
+ LLVM_DEBUG(dbgs() << " Basic blocks before relaxation\n"; dumpBBs(););
bool MadeChange = false;
while (relaxBranchInstructions())
@@ -567,7 +570,7 @@ bool BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
// After a while, this might be made debug-only, but it is not expensive.
verify();
- DEBUG(dbgs() << " Basic blocks after relaxation\n\n"; dumpBBs());
+ LLVM_DEBUG(dbgs() << " Basic blocks after relaxation\n\n"; dumpBBs());
BlockInfo.clear();
diff --git a/lib/CodeGen/BreakFalseDeps.cpp b/lib/CodeGen/BreakFalseDeps.cpp
index 2e6d7275c7ef..7f098cb71657 100644
--- a/lib/CodeGen/BreakFalseDeps.cpp
+++ b/lib/CodeGen/BreakFalseDeps.cpp
@@ -165,13 +165,13 @@ bool BreakFalseDeps::shouldBreakDependence(MachineInstr *MI, unsigned OpIdx,
unsigned Pref) {
unsigned reg = MI->getOperand(OpIdx).getReg();
unsigned Clearance = RDA->getClearance(MI, reg);
- DEBUG(dbgs() << "Clearance: " << Clearance << ", want " << Pref);
+ LLVM_DEBUG(dbgs() << "Clearance: " << Clearance << ", want " << Pref);
if (Pref > Clearance) {
- DEBUG(dbgs() << ": Break dependency.\n");
+ LLVM_DEBUG(dbgs() << ": Break dependency.\n");
return true;
}
- DEBUG(dbgs() << ": OK .\n");
+ LLVM_DEBUG(dbgs() << ": OK .\n");
return false;
}
@@ -260,7 +260,7 @@ bool BreakFalseDeps::runOnMachineFunction(MachineFunction &mf) {
RegClassInfo.runOnMachineFunction(mf);
- DEBUG(dbgs() << "********** BREAK FALSE DEPENDENCIES **********\n");
+ LLVM_DEBUG(dbgs() << "********** BREAK FALSE DEPENDENCIES **********\n");
// Traverse the basic blocks.
for (MachineBasicBlock &MBB : mf) {
diff --git a/lib/CodeGen/CalcSpillWeights.cpp b/lib/CodeGen/CalcSpillWeights.cpp
index 5a9026926d68..57541182cab2 100644
--- a/lib/CodeGen/CalcSpillWeights.cpp
+++ b/lib/CodeGen/CalcSpillWeights.cpp
@@ -35,8 +35,8 @@ void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS,
const MachineLoopInfo &MLI,
const MachineBlockFrequencyInfo &MBFI,
VirtRegAuxInfo::NormalizingFn norm) {
- DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
+ << "********** Function: " << MF.getName() << '\n');
MachineRegisterInfo &MRI = MF.getRegInfo();
VirtRegAuxInfo VRAI(MF, LIS, VRM, MLI, MBFI, norm);
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index 87cb2f9a19c7..92996eb3fee5 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -528,7 +528,7 @@ bool CodeGenPrepare::eliminateFallThrough(Function &F) {
BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
if (Term && !Term->isConditional()) {
Changed = true;
- DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
+ LLVM_DEBUG(dbgs() << "To merge:\n" << *SinglePred << "\n\n\n");
// Remember if SinglePred was the entry block of the function.
// If so, we will need to move BB back to the entry position.
bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
@@ -755,7 +755,8 @@ void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
BranchInst *BI = cast<BranchInst>(BB->getTerminator());
BasicBlock *DestBB = BI->getSuccessor(0);
- DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
+ LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
+ << *BB << *DestBB);
// If the destination block has a single pred, then this is a trivial edge,
// just collapse it.
@@ -769,7 +770,7 @@ void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
if (isEntry && BB != &BB->getParent()->getEntryBlock())
BB->moveBefore(&BB->getParent()->getEntryBlock());
- DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
+ LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
return;
}
}
@@ -807,7 +808,7 @@ void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
BB->eraseFromParent();
++NumBlocksElim;
- DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
+ LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
}
// Computes a map of base pointer relocation instructions to corresponding
@@ -1272,8 +1273,8 @@ static bool sinkAndCmp0Expression(Instruction *AndI,
if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
return false;
- DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
- DEBUG(AndI->getParent()->dump());
+ LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
+ LLVM_DEBUG(AndI->getParent()->dump());
// Push the 'and' into the same block as the icmp 0. There should only be
// one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
@@ -1286,7 +1287,7 @@ static bool sinkAndCmp0Expression(Instruction *AndI,
// Preincrement use iterator so we don't invalidate it.
++UI;
- DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
+ LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
// Keep the 'and' in the same place if the use is already in the same block.
Instruction *InsertPt =
@@ -1300,7 +1301,7 @@ static bool sinkAndCmp0Expression(Instruction *AndI,
// Replace a use of the 'and' with a use of the new 'and'.
TheUse = InsertedAnd;
++NumAndUses;
- DEBUG(User->getParent()->dump());
+ LLVM_DEBUG(User->getParent()->dump());
}
// We removed all uses, nuke the and.
@@ -2130,13 +2131,14 @@ class TypePromotionTransaction {
/// Move \p Inst before \p Before.
InstructionMoveBefore(Instruction *Inst, Instruction *Before)
: TypePromotionAction(Inst), Position(Inst) {
- DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n");
+ LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
+ << "\n");
Inst->moveBefore(Before);
}
/// Move the instruction back to its original position.
void undo() override {
- DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
Position.insert(Inst);
}
};
@@ -2153,18 +2155,18 @@ class TypePromotionTransaction {
/// Set \p Idx operand of \p Inst with \p NewVal.
OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
: TypePromotionAction(Inst), Idx(Idx) {
- DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
- << "for:" << *Inst << "\n"
- << "with:" << *NewVal << "\n");
+ LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
+ << "for:" << *Inst << "\n"
+ << "with:" << *NewVal << "\n");
Origin = Inst->getOperand(Idx);
Inst->setOperand(Idx, NewVal);
}
/// Restore the original value of the instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
- << "for: " << *Inst << "\n"
- << "with: " << *Origin << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
+ << "for: " << *Inst << "\n"
+ << "with: " << *Origin << "\n");
Inst->setOperand(Idx, Origin);
}
};
@@ -2178,7 +2180,7 @@ class TypePromotionTransaction {
public:
/// Remove \p Inst from the uses of the operands of \p Inst.
OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
- DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
unsigned NumOpnds = Inst->getNumOperands();
OriginalValues.reserve(NumOpnds);
for (unsigned It = 0; It < NumOpnds; ++It) {
@@ -2194,7 +2196,7 @@ class TypePromotionTransaction {
/// Restore the original list of uses.
void undo() override {
- DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
Inst->setOperand(It, OriginalValues[It]);
}
@@ -2211,7 +2213,7 @@ class TypePromotionTransaction {
TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
IRBuilder<> Builder(Opnd);
Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
- DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
}
/// Get the built value.
@@ -2219,7 +2221,7 @@ class TypePromotionTransaction {
/// Remove the built instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
@@ -2237,7 +2239,7 @@ class TypePromotionTransaction {
: TypePromotionAction(InsertPt) {
IRBuilder<> Builder(InsertPt);
Val = Builder.CreateSExt(Opnd, Ty, "promoted");
- DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
}
/// Get the built value.
@@ -2245,7 +2247,7 @@ class TypePromotionTransaction {
/// Remove the built instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
@@ -2263,7 +2265,7 @@ class TypePromotionTransaction {
: TypePromotionAction(InsertPt) {
IRBuilder<> Builder(InsertPt);
Val = Builder.CreateZExt(Opnd, Ty, "promoted");
- DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
}
/// Get the built value.
@@ -2271,7 +2273,7 @@ class TypePromotionTransaction {
/// Remove the built instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
@@ -2286,15 +2288,15 @@ class TypePromotionTransaction {
/// Mutate the type of \p Inst into \p NewTy.
TypeMutator(Instruction *Inst, Type *NewTy)
: TypePromotionAction(Inst), OrigTy(Inst->getType()) {
- DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
- << "\n");
+ LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
+ << "\n");
Inst->mutateType(NewTy);
}
/// Mutate the instruction back to its original type.
void undo() override {
- DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
- << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
+ << "\n");
Inst->mutateType(OrigTy);
}
};
@@ -2321,8 +2323,8 @@ class TypePromotionTransaction {
public:
/// Replace all the use of \p Inst by \p New.
UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
- DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
- << "\n");
+ LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
+ << "\n");
// Record the original uses.
for (Use &U : Inst->uses()) {
Instruction *UserI = cast<Instruction>(U.getUser());
@@ -2334,7 +2336,7 @@ class TypePromotionTransaction {
/// Reassign the original uses of Inst to Inst.
void undo() override {
- DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
for (use_iterator UseIt = OriginalUses.begin(),
EndIt = OriginalUses.end();
UseIt != EndIt; ++UseIt) {
@@ -2369,7 +2371,7 @@ class TypePromotionTransaction {
RemovedInsts(RemovedInsts) {
if (New)
Replacer = new UsesReplacer(Inst, New);
- DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
RemovedInsts.insert(Inst);
/// The instructions removed here will be freed after completing
/// optimizeBlock() for all blocks as we need to keep track of the
@@ -2382,7 +2384,7 @@ class TypePromotionTransaction {
/// Resurrect the instruction and reassign it to the proper uses if
/// new value was provided when build this action.
void undo() override {
- DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
Inserter.insert(Inst);
if (Replacer)
Replacer->undo();
@@ -3592,19 +3594,19 @@ Value *TypePromotionHelper::promoteOperandForOther(
// Step #3.
Instruction *ExtForOpnd = Ext;
- DEBUG(dbgs() << "Propagate Ext to operands\n");
+ LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
++OpIdx) {
- DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
+ LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
!shouldExtOperand(ExtOpnd, OpIdx)) {
- DEBUG(dbgs() << "No need to propagate\n");
+ LLVM_DEBUG(dbgs() << "No need to propagate\n");
continue;
}
// Check if we can statically extend the operand.
Value *Opnd = ExtOpnd->getOperand(OpIdx);
if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
- DEBUG(dbgs() << "Statically extend\n");
+ LLVM_DEBUG(dbgs() << "Statically extend\n");
unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
: Cst->getValue().zext(BitWidth);
@@ -3613,7 +3615,7 @@ Value *TypePromotionHelper::promoteOperandForOther(
}
// UndefValue are typed, so we have to statically sign extend them.
if (isa<UndefValue>(Opnd)) {
- DEBUG(dbgs() << "Statically extend\n");
+ LLVM_DEBUG(dbgs() << "Statically extend\n");
TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
continue;
}
@@ -3622,7 +3624,7 @@ Value *TypePromotionHelper::promoteOperandForOther(
// Check if Ext was reused to extend an operand.
if (!ExtForOpnd) {
// If yes, create a new one.
- DEBUG(dbgs() << "More operands to ext\n");
+ LLVM_DEBUG(dbgs() << "More operands to ext\n");
Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
: TPT.createZExt(Ext, Opnd, Ext->getType());
if (!isa<Instruction>(ValForExtOpnd)) {
@@ -3643,7 +3645,7 @@ Value *TypePromotionHelper::promoteOperandForOther(
ExtForOpnd = nullptr;
}
if (ExtForOpnd == Ext) {
- DEBUG(dbgs() << "Extension is useless now\n");
+ LLVM_DEBUG(dbgs() << "Extension is useless now\n");
TPT.eraseInstruction(Ext);
}
return ExtOpnd;
@@ -3659,7 +3661,8 @@ Value *TypePromotionHelper::promoteOperandForOther(
/// \return True if the promotion is profitable, false otherwise.
bool AddressingModeMatcher::isPromotionProfitable(
unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
- DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n');
+ LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
+ << '\n');
// The cost of the new extensions is greater than the cost of the
// old extension plus what we folded.
// This is not profitable.
@@ -3930,7 +3933,7 @@ bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
PromotedOperand)) {
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
- DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
+ LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
TPT.rollback(LastKnownGood);
return false;
}
@@ -4393,7 +4396,8 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
return IsNonLocalValue(V, MemoryInst->getParent());
})) {
- DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
+ << "\n");
return false;
}
@@ -4412,16 +4416,16 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
if (SunkAddr) {
- DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
+ << " for " << *MemoryInst << "\n");
if (SunkAddr->getType() != Addr->getType())
SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
} else if (AddrSinkUsingGEPs ||
(!AddrSinkUsingGEPs.getNumOccurrences() && TM && TTI->useAA())) {
// By default, we use the GEP-based method when AA is used later. This
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
- DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
+ << " for " << *MemoryInst << "\n");
Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *ResultPtr = nullptr, *ResultIndex = nullptr;
@@ -4560,8 +4564,8 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
return false;
- DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
+ << " for " << *MemoryInst << "\n");
Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *Result = nullptr;
@@ -5927,8 +5931,9 @@ class VectorPromoteHelper {
VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
Arg0OVK, Arg1OVK);
}
- DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
- << ScalarCost << "\nVector: " << VectorCost << '\n');
+ LLVM_DEBUG(
+ dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
+ << ScalarCost << "\nVector: " << VectorCost << '\n');
return ScalarCost > VectorCost;
}
@@ -6133,35 +6138,36 @@ bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
// => we would need to check that we are moving it at a cheaper place and
// we do not do that for now.
BasicBlock *Parent = Inst->getParent();
- DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
// If the transition has more than one use, assume this is not going to be
// beneficial.
while (Inst->hasOneUse()) {
Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
- DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
+ LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
if (ToBePromoted->getParent() != Parent) {
- DEBUG(dbgs() << "Instruction to promote is in a different block ("
- << ToBePromoted->getParent()->getName()
- << ") than the transition (" << Parent->getName() << ").\n");
+ LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
+ << ToBePromoted->getParent()->getName()
+ << ") than the transition (" << Parent->getName()
+ << ").\n");
return false;
}
if (VPH.canCombine(ToBePromoted)) {
- DEBUG(dbgs() << "Assume " << *Inst << '\n'
- << "will be combined with: " << *ToBePromoted << '\n');
+ LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
+ << "will be combined with: " << *ToBePromoted << '\n');
VPH.recordCombineInstruction(ToBePromoted);
bool Changed = VPH.promote();
NumStoreExtractExposed += Changed;
return Changed;
}
- DEBUG(dbgs() << "Try promoting.\n");
+ LLVM_DEBUG(dbgs() << "Try promoting.\n");
if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
return false;
- DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
+ LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
VPH.enqueueForPromotion(ToBePromoted);
Inst = ToBePromoted;
@@ -6656,7 +6662,8 @@ bool CodeGenPrepare::placeDbgValues(Function &F) {
// after it.
if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
continue;
- DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
+ LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
+ << *DVI << ' ' << *VI);
DVI->removeFromParent();
if (isa<PHINode>(VI))
DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
@@ -6735,7 +6742,7 @@ bool CodeGenPrepare::splitBranchCondition(Function &F) {
!match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) )
continue;
- DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
+ LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
// Create a new BB.
auto TmpBB =
@@ -6863,8 +6870,8 @@ bool CodeGenPrepare::splitBranchCondition(Function &F) {
MadeChange = true;
- DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
- TmpBB->dump());
+ LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
+ TmpBB->dump());
}
return MadeChange;
}
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 608fc4ba89b3..840e5ede6444 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -461,14 +461,14 @@ BreakAntiDependencies(const std::vector<SUnit> &SUnits,
#ifndef NDEBUG
{
- DEBUG(dbgs() << "Critical path has total latency "
- << (Max->getDepth() + Max->Latency) << "\n");
- DEBUG(dbgs() << "Available regs:");
+ LLVM_DEBUG(dbgs() << "Critical path has total latency "
+ << (Max->getDepth() + Max->Latency) << "\n");
+ LLVM_DEBUG(dbgs() << "Available regs:");
for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
if (KillIndices[Reg] == ~0u)
- DEBUG(dbgs() << " " << printReg(Reg, TRI));
+ LLVM_DEBUG(dbgs() << " " << printReg(Reg, TRI));
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
#endif
@@ -645,10 +645,10 @@ BreakAntiDependencies(const std::vector<SUnit> &SUnits,
AntiDepReg,
LastNewReg[AntiDepReg],
RC, ForbidRegs)) {
- DEBUG(dbgs() << "Breaking anti-dependence edge on "
- << printReg(AntiDepReg, TRI) << " with "
- << RegRefs.count(AntiDepReg) << " references"
- << " using " << printReg(NewReg, TRI) << "!\n");
+ LLVM_DEBUG(dbgs() << "Breaking anti-dependence edge on "
+ << printReg(AntiDepReg, TRI) << " with "
+ << RegRefs.count(AntiDepReg) << " references"
+ << " using " << printReg(NewReg, TRI) << "!\n");
// Update the references to the old register to refer to the new
// register.
diff --git a/lib/CodeGen/DFAPacketizer.cpp b/lib/CodeGen/DFAPacketizer.cpp
index 848db444270d..cd302e78cc3e 100644
--- a/lib/CodeGen/DFAPacketizer.cpp
+++ b/lib/CodeGen/DFAPacketizer.cpp
@@ -222,7 +222,7 @@ VLIWPacketizerList::~VLIWPacketizerList() {
// End the current packet, bundle packet instructions and reset DFA state.
void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MI) {
- DEBUG({
+ LLVM_DEBUG({
if (!CurrentPacketMIs.empty()) {
dbgs() << "Finalizing packet:\n";
for (MachineInstr *MI : CurrentPacketMIs)
@@ -235,7 +235,7 @@ void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
}
CurrentPacketMIs.clear();
ResourceTracker->clearResources();
- DEBUG(dbgs() << "End packet\n");
+ LLVM_DEBUG(dbgs() << "End packet\n");
}
// Bundle machine instructions into packets.
@@ -248,7 +248,7 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
std::distance(BeginItr, EndItr));
VLIWScheduler->schedule();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Scheduling DAG of the packetize region\n";
for (SUnit &SU : VLIWScheduler->SUnits)
SU.dumpAll(VLIWScheduler);
@@ -287,10 +287,10 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
assert(SUI && "Missing SUnit Info!");
// Ask DFA if machine resource is available for MI.
- DEBUG(dbgs() << "Checking resources for adding MI to packet " << MI);
+ LLVM_DEBUG(dbgs() << "Checking resources for adding MI to packet " << MI);
bool ResourceAvail = ResourceTracker->canReserveResources(MI);
- DEBUG({
+ LLVM_DEBUG({
if (ResourceAvail)
dbgs() << " Resources are available for adding MI to packet\n";
else
@@ -302,31 +302,33 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
SUnit *SUJ = MIToSUnit[MJ];
assert(SUJ && "Missing SUnit Info!");
- DEBUG(dbgs() << " Checking against MJ " << *MJ);
+ LLVM_DEBUG(dbgs() << " Checking against MJ " << *MJ);
// Is it legal to packetize SUI and SUJ together.
if (!isLegalToPacketizeTogether(SUI, SUJ)) {
- DEBUG(dbgs() << " Not legal to add MI, try to prune\n");
+ LLVM_DEBUG(dbgs() << " Not legal to add MI, try to prune\n");
// Allow packetization if dependency can be pruned.
if (!isLegalToPruneDependencies(SUI, SUJ)) {
// End the packet if dependency cannot be pruned.
- DEBUG(dbgs() << " Could not prune dependencies for adding MI\n");
+ LLVM_DEBUG(dbgs()
+ << " Could not prune dependencies for adding MI\n");
endPacket(MBB, MI);
break;
}
- DEBUG(dbgs() << " Pruned dependence for adding MI\n");
+ LLVM_DEBUG(dbgs() << " Pruned dependence for adding MI\n");
}
}
} else {
- DEBUG(if (ResourceAvail)
- dbgs() << "Resources are available, but instruction should not be "
- "added to packet\n " << MI);
+ LLVM_DEBUG(if (ResourceAvail) dbgs()
+ << "Resources are available, but instruction should not be "
+ "added to packet\n "
+ << MI);
// End the packet if resource is not available, or if the instruction
// shoud not be added to the current packet.
endPacket(MBB, MI);
}
// Add MI to the current packet.
- DEBUG(dbgs() << "* Adding MI to packet " << MI << '\n');
+ LLVM_DEBUG(dbgs() << "* Adding MI to packet " << MI << '\n');
BeginItr = addToPacket(MI);
} // For all instructions in the packetization range.
diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp
index e6a54bb300f2..ff44c5660bad 100644
--- a/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -125,7 +125,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
// If the instruction is dead, delete it!
if (isDead(MI)) {
- DEBUG(dbgs() << "DeadMachineInstructionElim: DELETING: " << *MI);
+ LLVM_DEBUG(dbgs() << "DeadMachineInstructionElim: DELETING: " << *MI);
// It is possible that some DBG_VALUE instructions refer to this
// instruction. They get marked as undef and will be deleted
// in the live debug variable analysis.
diff --git a/lib/CodeGen/DetectDeadLanes.cpp b/lib/CodeGen/DetectDeadLanes.cpp
index 7d7eb57352a2..c83db476a4de 100644
--- a/lib/CodeGen/DetectDeadLanes.cpp
+++ b/lib/CodeGen/DetectDeadLanes.cpp
@@ -439,7 +439,7 @@ LaneBitmask DetectDeadLanes::determineInitialUsedLanes(unsigned Reg) {
const TargetRegisterClass *DstRC = MRI->getRegClass(DefReg);
CrossCopy = isCrossCopy(*MRI, UseMI, DstRC, MO);
if (CrossCopy)
- DEBUG(dbgs() << "Copy across incompatible classes: " << UseMI);
+ LLVM_DEBUG(dbgs() << "Copy across incompatible classes: " << UseMI);
}
if (!CrossCopy)
@@ -520,17 +520,15 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) {
transferDefinedLanesStep(MO, Info.DefinedLanes);
}
- DEBUG(
- dbgs() << "Defined/Used lanes:\n";
- for (unsigned RegIdx = 0; RegIdx < NumVirtRegs; ++RegIdx) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(RegIdx);
- const VRegInfo &Info = VRegInfos[RegIdx];
- dbgs() << printReg(Reg, nullptr)
- << " Used: " << PrintLaneMask(Info.UsedLanes)
- << " Def: " << PrintLaneMask(Info.DefinedLanes) << '\n';
- }
- dbgs() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "Defined/Used lanes:\n"; for (unsigned RegIdx = 0;
+ RegIdx < NumVirtRegs;
+ ++RegIdx) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(RegIdx);
+ const VRegInfo &Info = VRegInfos[RegIdx];
+ dbgs() << printReg(Reg, nullptr)
+ << " Used: " << PrintLaneMask(Info.UsedLanes)
+ << " Def: " << PrintLaneMask(Info.DefinedLanes) << '\n';
+ } dbgs() << "\n";);
bool Again = false;
// Mark operands as dead/unused.
@@ -545,18 +543,19 @@ bool DetectDeadLanes::runOnce(MachineFunction &MF) {
unsigned RegIdx = TargetRegisterInfo::virtReg2Index(Reg);
const VRegInfo &RegInfo = VRegInfos[RegIdx];
if (MO.isDef() && !MO.isDead() && RegInfo.UsedLanes.none()) {
- DEBUG(dbgs() << "Marking operand '" << MO << "' as dead in " << MI);
+ LLVM_DEBUG(dbgs()
+ << "Marking operand '" << MO << "' as dead in " << MI);
MO.setIsDead();
}
if (MO.readsReg()) {
bool CrossCopy = false;
if (isUndefRegAtInput(MO, RegInfo)) {
- DEBUG(dbgs() << "Marking operand '" << MO << "' as undef in "
- << MI);
+ LLVM_DEBUG(dbgs()
+ << "Marking operand '" << MO << "' as undef in " << MI);
MO.setIsUndef();
} else if (isUndefInput(MO, &CrossCopy)) {
- DEBUG(dbgs() << "Marking operand '" << MO << "' as undef in "
- << MI);
+ LLVM_DEBUG(dbgs()
+ << "Marking operand '" << MO << "' as undef in " << MI);
MO.setIsUndef();
if (CrossCopy)
Again = true;
@@ -577,7 +576,7 @@ bool DetectDeadLanes::runOnMachineFunction(MachineFunction &MF) {
// so we safe the compile time.
MRI = &MF.getRegInfo();
if (!MRI->subRegLivenessEnabled()) {
- DEBUG(dbgs() << "Skipping Detect dead lanes pass\n");
+ LLVM_DEBUG(dbgs() << "Skipping Detect dead lanes pass\n");
return false;
}
diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp
index c0c67c40376f..098afd885f2f 100644
--- a/lib/CodeGen/EarlyIfConversion.cpp
+++ b/lib/CodeGen/EarlyIfConversion.cpp
@@ -185,7 +185,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
// Reject any live-in physregs. It's probably CPSR/EFLAGS, and very hard to
// get right.
if (!MBB->livein_empty()) {
- DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
return false;
}
@@ -199,14 +199,14 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
continue;
if (++InstrCount > BlockInstrLimit && !Stress) {
- DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
- << BlockInstrLimit << " instructions.\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
+ << BlockInstrLimit << " instructions.\n");
return false;
}
// There shouldn't normally be any phis in a single-predecessor block.
if (I->isPHI()) {
- DEBUG(dbgs() << "Can't hoist: " << *I);
+ LLVM_DEBUG(dbgs() << "Can't hoist: " << *I);
return false;
}
@@ -214,21 +214,21 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
// speculate GOT or constant pool loads that are guaranteed not to trap,
// but we don't support that for now.
if (I->mayLoad()) {
- DEBUG(dbgs() << "Won't speculate load: " << *I);
+ LLVM_DEBUG(dbgs() << "Won't speculate load: " << *I);
return false;
}
// We never speculate stores, so an AA pointer isn't necessary.
bool DontMoveAcrossStore = true;
if (!I->isSafeToMove(nullptr, DontMoveAcrossStore)) {
- DEBUG(dbgs() << "Can't speculate: " << *I);
+ LLVM_DEBUG(dbgs() << "Can't speculate: " << *I);
return false;
}
// Check for any dependencies on Head instructions.
for (const MachineOperand &MO : I->operands()) {
if (MO.isRegMask()) {
- DEBUG(dbgs() << "Won't speculate regmask: " << *I);
+ LLVM_DEBUG(dbgs() << "Won't speculate regmask: " << *I);
return false;
}
if (!MO.isReg())
@@ -246,9 +246,10 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
if (!DefMI || DefMI->getParent() != Head)
continue;
if (InsertAfter.insert(DefMI).second)
- DEBUG(dbgs() << printMBBReference(*MBB) << " depends on " << *DefMI);
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " depends on "
+ << *DefMI);
if (DefMI->isTerminator()) {
- DEBUG(dbgs() << "Can't insert instructions below terminator.\n");
+ LLVM_DEBUG(dbgs() << "Can't insert instructions below terminator.\n");
return false;
}
}
@@ -279,7 +280,7 @@ bool SSAIfConv::findInsertionPoint() {
--I;
// Some of the conditional code depends in I.
if (InsertAfter.count(&*I)) {
- DEBUG(dbgs() << "Can't insert code after " << *I);
+ LLVM_DEBUG(dbgs() << "Can't insert code after " << *I);
return false;
}
@@ -313,7 +314,7 @@ bool SSAIfConv::findInsertionPoint() {
// Some of the clobbered registers are live before I, not a valid insertion
// point.
if (!LiveRegUnits.empty()) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Would clobber";
for (SparseSet<unsigned>::const_iterator
i = LiveRegUnits.begin(), e = LiveRegUnits.end(); i != e; ++i)
@@ -325,10 +326,10 @@ bool SSAIfConv::findInsertionPoint() {
// This is a valid insertion point.
InsertionPoint = I;
- DEBUG(dbgs() << "Can insert before " << *I);
+ LLVM_DEBUG(dbgs() << "Can insert before " << *I);
return true;
}
- DEBUG(dbgs() << "No legal insertion point found.\n");
+ LLVM_DEBUG(dbgs() << "No legal insertion point found.\n");
return false;
}
@@ -361,39 +362,39 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
if (Succ1->pred_size() != 1 || Succ1->succ_size() != 1 ||
Succ1->succ_begin()[0] != Tail)
return false;
- DEBUG(dbgs() << "\nDiamond: " << printMBBReference(*Head) << " -> "
- << printMBBReference(*Succ0) << "/"
- << printMBBReference(*Succ1) << " -> "
- << printMBBReference(*Tail) << '\n');
+ LLVM_DEBUG(dbgs() << "\nDiamond: " << printMBBReference(*Head) << " -> "
+ << printMBBReference(*Succ0) << "/"
+ << printMBBReference(*Succ1) << " -> "
+ << printMBBReference(*Tail) << '\n');
// Live-in physregs are tricky to get right when speculating code.
if (!Tail->livein_empty()) {
- DEBUG(dbgs() << "Tail has live-ins.\n");
+ LLVM_DEBUG(dbgs() << "Tail has live-ins.\n");
return false;
}
} else {
- DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> "
- << printMBBReference(*Succ0) << " -> "
- << printMBBReference(*Tail) << '\n');
+ LLVM_DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> "
+ << printMBBReference(*Succ0) << " -> "
+ << printMBBReference(*Tail) << '\n');
}
// This is a triangle or a diamond.
// If Tail doesn't have any phis, there must be side effects.
if (Tail->empty() || !Tail->front().isPHI()) {
- DEBUG(dbgs() << "No phis in tail.\n");
+ LLVM_DEBUG(dbgs() << "No phis in tail.\n");
return false;
}
// The branch we're looking to eliminate must be analyzable.
Cond.clear();
if (TII->analyzeBranch(*Head, TBB, FBB, Cond)) {
- DEBUG(dbgs() << "Branch not analyzable.\n");
+ LLVM_DEBUG(dbgs() << "Branch not analyzable.\n");
return false;
}
// This is weird, probably some sort of degenerate CFG.
if (!TBB) {
- DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch.\n");
+ LLVM_DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch.\n");
return false;
}
@@ -422,7 +423,7 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
// Get target information.
if (!TII->canInsertSelect(*Head, Cond, PI.TReg, PI.FReg,
PI.CondCycles, PI.TCycles, PI.FCycles)) {
- DEBUG(dbgs() << "Can't convert: " << *PI.PHI);
+ LLVM_DEBUG(dbgs() << "Can't convert: " << *PI.PHI);
return false;
}
}
@@ -459,10 +460,10 @@ void SSAIfConv::replacePHIInstrs() {
// Convert all PHIs to select instructions inserted before FirstTerm.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
PHIInfo &PI = PHIs[i];
- DEBUG(dbgs() << "If-converting " << *PI.PHI);
+ LLVM_DEBUG(dbgs() << "If-converting " << *PI.PHI);
unsigned DstReg = PI.PHI->getOperand(0).getReg();
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
- DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
+ LLVM_DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
PI.PHI->eraseFromParent();
PI.PHI = nullptr;
}
@@ -481,7 +482,7 @@ void SSAIfConv::rewritePHIOperands() {
PHIInfo &PI = PHIs[i];
unsigned DstReg = 0;
- DEBUG(dbgs() << "If-converting " << *PI.PHI);
+ LLVM_DEBUG(dbgs() << "If-converting " << *PI.PHI);
if (PI.TReg == PI.FReg) {
// We do not need the select instruction if both incoming values are
// equal.
@@ -491,7 +492,7 @@ void SSAIfConv::rewritePHIOperands() {
DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
TII->insertSelect(*Head, FirstTerm, HeadDL,
DstReg, Cond, PI.TReg, PI.FReg);
- DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
+ LLVM_DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
}
// Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
@@ -505,7 +506,7 @@ void SSAIfConv::rewritePHIOperands() {
PI.PHI->RemoveOperand(i-2);
}
}
- DEBUG(dbgs() << " --> " << *PI.PHI);
+ LLVM_DEBUG(dbgs() << " --> " << *PI.PHI);
}
}
@@ -563,8 +564,8 @@ void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
assert(Head->succ_empty() && "Additional head successors?");
if (!ExtraPreds && Head->isLayoutSuccessor(Tail)) {
// Splice Tail onto the end of Head.
- DEBUG(dbgs() << "Joining tail " << printMBBReference(*Tail) << " into head "
- << printMBBReference(*Head) << '\n');
+ LLVM_DEBUG(dbgs() << "Joining tail " << printMBBReference(*Tail)
+ << " into head " << printMBBReference(*Head) << '\n');
Head->splice(Head->end(), Tail,
Tail->begin(), Tail->end());
Head->transferSuccessorsAndUpdatePHIs(Tail);
@@ -572,12 +573,12 @@ void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
Tail->eraseFromParent();
} else {
// We need a branch to Tail, let code placement work it out later.
- DEBUG(dbgs() << "Converting to unconditional branch.\n");
+ LLVM_DEBUG(dbgs() << "Converting to unconditional branch.\n");
SmallVector<MachineOperand, 0> EmptyCond;
TII->insertBranch(*Head, Tail, nullptr, EmptyCond, HeadDL);
Head->addSuccessor(Tail);
}
- DEBUG(dbgs() << *Head);
+ LLVM_DEBUG(dbgs() << *Head);
}
@@ -692,7 +693,7 @@ bool EarlyIfConverter::shouldConvertIf() {
MachineTraceMetrics::Trace TBBTrace = MinInstr->getTrace(IfConv.getTPred());
MachineTraceMetrics::Trace FBBTrace = MinInstr->getTrace(IfConv.getFPred());
- DEBUG(dbgs() << "TBB: " << TBBTrace << "FBB: " << FBBTrace);
+ LLVM_DEBUG(dbgs() << "TBB: " << TBBTrace << "FBB: " << FBBTrace);
unsigned MinCrit = std::min(TBBTrace.getCriticalPath(),
FBBTrace.getCriticalPath());
@@ -706,10 +707,10 @@ bool EarlyIfConverter::shouldConvertIf() {
if (IfConv.TBB != IfConv.Tail)
ExtraBlocks.push_back(IfConv.TBB);
unsigned ResLength = FBBTrace.getResourceLength(ExtraBlocks);
- DEBUG(dbgs() << "Resource length " << ResLength
- << ", minimal critical path " << MinCrit << '\n');
+ LLVM_DEBUG(dbgs() << "Resource length " << ResLength
+ << ", minimal critical path " << MinCrit << '\n');
if (ResLength > MinCrit + CritLimit) {
- DEBUG(dbgs() << "Not enough available ILP.\n");
+ LLVM_DEBUG(dbgs() << "Not enough available ILP.\n");
return false;
}
@@ -719,7 +720,7 @@ bool EarlyIfConverter::shouldConvertIf() {
MachineTraceMetrics::Trace HeadTrace = MinInstr->getTrace(IfConv.Head);
unsigned BranchDepth =
HeadTrace.getInstrCycles(*IfConv.Head->getFirstTerminator()).Depth;
- DEBUG(dbgs() << "Branch depth: " << BranchDepth << '\n');
+ LLVM_DEBUG(dbgs() << "Branch depth: " << BranchDepth << '\n');
// Look at all the tail phis, and compute the critical path extension caused
// by inserting select instructions.
@@ -728,15 +729,15 @@ bool EarlyIfConverter::shouldConvertIf() {
SSAIfConv::PHIInfo &PI = IfConv.PHIs[i];
unsigned Slack = TailTrace.getInstrSlack(*PI.PHI);
unsigned MaxDepth = Slack + TailTrace.getInstrCycles(*PI.PHI).Depth;
- DEBUG(dbgs() << "Slack " << Slack << ":\t" << *PI.PHI);
+ LLVM_DEBUG(dbgs() << "Slack " << Slack << ":\t" << *PI.PHI);
// The condition is pulled into the critical path.
unsigned CondDepth = adjCycles(BranchDepth, PI.CondCycles);
if (CondDepth > MaxDepth) {
unsigned Extra = CondDepth - MaxDepth;
- DEBUG(dbgs() << "Condition adds " << Extra << " cycles.\n");
+ LLVM_DEBUG(dbgs() << "Condition adds " << Extra << " cycles.\n");
if (Extra > CritLimit) {
- DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ LLVM_DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
return false;
}
}
@@ -745,9 +746,9 @@ bool EarlyIfConverter::shouldConvertIf() {
unsigned TDepth = adjCycles(TBBTrace.getPHIDepth(*PI.PHI), PI.TCycles);
if (TDepth > MaxDepth) {
unsigned Extra = TDepth - MaxDepth;
- DEBUG(dbgs() << "TBB data adds " << Extra << " cycles.\n");
+ LLVM_DEBUG(dbgs() << "TBB data adds " << Extra << " cycles.\n");
if (Extra > CritLimit) {
- DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ LLVM_DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
return false;
}
}
@@ -756,9 +757,9 @@ bool EarlyIfConverter::shouldConvertIf() {
unsigned FDepth = adjCycles(FBBTrace.getPHIDepth(*PI.PHI), PI.FCycles);
if (FDepth > MaxDepth) {
unsigned Extra = FDepth - MaxDepth;
- DEBUG(dbgs() << "FBB data adds " << Extra << " cycles.\n");
+ LLVM_DEBUG(dbgs() << "FBB data adds " << Extra << " cycles.\n");
if (Extra > CritLimit) {
- DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ LLVM_DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
return false;
}
}
@@ -783,8 +784,8 @@ bool EarlyIfConverter::tryConvertIf(MachineBasicBlock *MBB) {
}
bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
+ << "********** Function: " << MF.getName() << '\n');
if (skipFunction(MF.getFunction()))
return false;
diff --git a/lib/CodeGen/ExecutionDomainFix.cpp b/lib/CodeGen/ExecutionDomainFix.cpp
index 6413489e9d94..458dcf2b0e26 100644
--- a/lib/CodeGen/ExecutionDomainFix.cpp
+++ b/lib/CodeGen/ExecutionDomainFix.cpp
@@ -161,7 +161,7 @@ void ExecutionDomainFix::enterBasicBlock(
// This is the entry block.
if (MBB->pred_empty()) {
- DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n");
return;
}
@@ -200,9 +200,9 @@ void ExecutionDomainFix::enterBasicBlock(
force(rx, pdv->getFirstDomain());
}
}
- DEBUG(dbgs() << printMBBReference(*MBB)
- << (!TraversedMBB.IsDone ? ": incomplete\n"
- : ": all preds known\n"));
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB)
+ << (!TraversedMBB.IsDone ? ": incomplete\n"
+ : ": all preds known\n"));
}
void ExecutionDomainFix::leaveBasicBlock(
@@ -245,7 +245,7 @@ void ExecutionDomainFix::processDefs(MachineInstr *MI, bool Kill) {
continue;
for (int rx : regIndices(MO.getReg())) {
// This instruction explicitly defines rx.
- DEBUG(dbgs() << printReg(RC->getRegister(rx), TRI) << ":\t" << *MI);
+ LLVM_DEBUG(dbgs() << printReg(RC->getRegister(rx), TRI) << ":\t" << *MI);
// Kill off domains redefined by generic instructions.
if (Kill)
@@ -420,8 +420,8 @@ bool ExecutionDomainFix::runOnMachineFunction(MachineFunction &mf) {
LiveRegs.clear();
assert(NumRegs == RC->getNumRegs() && "Bad regclass");
- DEBUG(dbgs() << "********** FIX EXECUTION DOMAIN: "
- << TRI->getRegClassName(RC) << " **********\n");
+ LLVM_DEBUG(dbgs() << "********** FIX EXECUTION DOMAIN: "
+ << TRI->getRegClassName(RC) << " **********\n");
// If no relevant registers are used in the function, we can skip it
// completely.
diff --git a/lib/CodeGen/ExpandPostRAPseudos.cpp b/lib/CodeGen/ExpandPostRAPseudos.cpp
index 6ef97d6dd5ec..bc747fc610f8 100644
--- a/lib/CodeGen/ExpandPostRAPseudos.cpp
+++ b/lib/CodeGen/ExpandPostRAPseudos.cpp
@@ -93,11 +93,11 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
assert(TargetRegisterInfo::isPhysicalRegister(InsReg) &&
"Inserted value must be in a physical register");
- DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
+ LLVM_DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
if (MI->allDefsAreDead()) {
MI->setDesc(TII->get(TargetOpcode::KILL));
- DEBUG(dbgs() << "subreg: replaced by: " << *MI);
+ LLVM_DEBUG(dbgs() << "subreg: replaced by: " << *MI);
return true;
}
@@ -110,10 +110,10 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
MI->setDesc(TII->get(TargetOpcode::KILL));
MI->RemoveOperand(3); // SubIdx
MI->RemoveOperand(1); // Imm
- DEBUG(dbgs() << "subreg: replace by: " << *MI);
+ LLVM_DEBUG(dbgs() << "subreg: replace by: " << *MI);
return true;
}
- DEBUG(dbgs() << "subreg: eliminated!");
+ LLVM_DEBUG(dbgs() << "subreg: eliminated!");
} else {
TII->copyPhysReg(*MBB, MI, MI->getDebugLoc(), DstSubReg, InsReg,
MI->getOperand(2).isKill());
@@ -122,10 +122,10 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
MachineBasicBlock::iterator CopyMI = MI;
--CopyMI;
CopyMI->addRegisterDefined(DstReg);
- DEBUG(dbgs() << "subreg: " << *CopyMI);
+ LLVM_DEBUG(dbgs() << "subreg: " << *CopyMI);
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
MBB->erase(MI);
return true;
}
@@ -133,9 +133,9 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
bool ExpandPostRA::LowerCopy(MachineInstr *MI) {
if (MI->allDefsAreDead()) {
- DEBUG(dbgs() << "dead copy: " << *MI);
+ LLVM_DEBUG(dbgs() << "dead copy: " << *MI);
MI->setDesc(TII->get(TargetOpcode::KILL));
- DEBUG(dbgs() << "replaced by: " << *MI);
+ LLVM_DEBUG(dbgs() << "replaced by: " << *MI);
return true;
}
@@ -144,14 +144,15 @@ bool ExpandPostRA::LowerCopy(MachineInstr *MI) {
bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
if (IdentityCopy || SrcMO.isUndef()) {
- DEBUG(dbgs() << (IdentityCopy ? "identity copy: " : "undef copy: ") << *MI);
+ LLVM_DEBUG(dbgs() << (IdentityCopy ? "identity copy: " : "undef copy: ")
+ << *MI);
// No need to insert an identity copy instruction, but replace with a KILL
// if liveness is changed.
if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
// We must make sure the super-register gets killed. Replace the
// instruction with KILL.
MI->setDesc(TII->get(TargetOpcode::KILL));
- DEBUG(dbgs() << "replaced by: " << *MI);
+ LLVM_DEBUG(dbgs() << "replaced by: " << *MI);
return true;
}
// Vanilla identity copy.
@@ -159,13 +160,13 @@ bool ExpandPostRA::LowerCopy(MachineInstr *MI) {
return true;
}
- DEBUG(dbgs() << "real copy: " << *MI);
+ LLVM_DEBUG(dbgs() << "real copy: " << *MI);
TII->copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(),
DstMO.getReg(), SrcMO.getReg(), SrcMO.isKill());
if (MI->getNumOperands() > 2)
TransferImplicitOperands(MI);
- DEBUG({
+ LLVM_DEBUG({
MachineBasicBlock::iterator dMI = MI;
dbgs() << "replaced by: " << *(--dMI);
});
@@ -177,9 +178,9 @@ bool ExpandPostRA::LowerCopy(MachineInstr *MI) {
/// copies.
///
bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "Machine Function\n"
- << "********** EXPANDING POST-RA PSEUDO INSTRS **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Machine Function\n"
+ << "********** EXPANDING POST-RA PSEUDO INSTRS **********\n"
+ << "********** Function: " << MF.getName() << '\n');
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo();
diff --git a/lib/CodeGen/FaultMaps.cpp b/lib/CodeGen/FaultMaps.cpp
index 2924b011e0c1..361558a0e562 100644
--- a/lib/CodeGen/FaultMaps.cpp
+++ b/lib/CodeGen/FaultMaps.cpp
@@ -62,17 +62,17 @@ void FaultMaps::serializeToFaultMapSection() {
// Emit a dummy symbol to force section inclusion.
OS.EmitLabel(OutContext.getOrCreateSymbol(Twine("__LLVM_FaultMaps")));
- DEBUG(dbgs() << "********** Fault Map Output **********\n");
+ LLVM_DEBUG(dbgs() << "********** Fault Map Output **********\n");
// Header
OS.EmitIntValue(FaultMapVersion, 1); // Version.
OS.EmitIntValue(0, 1); // Reserved.
OS.EmitIntValue(0, 2); // Reserved.
- DEBUG(dbgs() << WFMP << "#functions = " << FunctionInfos.size() << "\n");
+ LLVM_DEBUG(dbgs() << WFMP << "#functions = " << FunctionInfos.size() << "\n");
OS.EmitIntValue(FunctionInfos.size(), 4);
- DEBUG(dbgs() << WFMP << "functions:\n");
+ LLVM_DEBUG(dbgs() << WFMP << "functions:\n");
for (const auto &FFI : FunctionInfos)
emitFunctionInfo(FFI.first, FFI.second);
@@ -82,25 +82,25 @@ void FaultMaps::emitFunctionInfo(const MCSymbol *FnLabel,
const FunctionFaultInfos &FFI) {
MCStreamer &OS = *AP.OutStreamer;
- DEBUG(dbgs() << WFMP << " function addr: " << *FnLabel << "\n");
+ LLVM_DEBUG(dbgs() << WFMP << " function addr: " << *FnLabel << "\n");
OS.EmitSymbolValue(FnLabel, 8);
- DEBUG(dbgs() << WFMP << " #faulting PCs: " << FFI.size() << "\n");
+ LLVM_DEBUG(dbgs() << WFMP << " #faulting PCs: " << FFI.size() << "\n");
OS.EmitIntValue(FFI.size(), 4);
OS.EmitIntValue(0, 4); // Reserved
for (auto &Fault : FFI) {
- DEBUG(dbgs() << WFMP << " fault type: "
- << faultTypeToString(Fault.Kind) << "\n");
+ LLVM_DEBUG(dbgs() << WFMP << " fault type: "
+ << faultTypeToString(Fault.Kind) << "\n");
OS.EmitIntValue(Fault.Kind, 4);
- DEBUG(dbgs() << WFMP << " faulting PC offset: "
- << *Fault.FaultingOffsetExpr << "\n");
+ LLVM_DEBUG(dbgs() << WFMP << " faulting PC offset: "
+ << *Fault.FaultingOffsetExpr << "\n");
OS.EmitValue(Fault.FaultingOffsetExpr, 4);
- DEBUG(dbgs() << WFMP << " fault handler PC offset: "
- << *Fault.HandlerOffsetExpr << "\n");
+ LLVM_DEBUG(dbgs() << WFMP << " fault handler PC offset: "
+ << *Fault.HandlerOffsetExpr << "\n");
OS.EmitValue(Fault.HandlerOffsetExpr, 4);
}
}
diff --git a/lib/CodeGen/GlobalISel/Combiner.cpp b/lib/CodeGen/GlobalISel/Combiner.cpp
index dbc9dbc53de6..0bc5b87de150 100644
--- a/lib/CodeGen/GlobalISel/Combiner.cpp
+++ b/lib/CodeGen/GlobalISel/Combiner.cpp
@@ -40,7 +40,7 @@ bool Combiner::combineMachineInstrs(MachineFunction &MF) {
MRI = &MF.getRegInfo();
Builder.setMF(MF);
- DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n');
MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
@@ -61,7 +61,7 @@ bool Combiner::combineMachineInstrs(MachineFunction &MF) {
++MII;
// Erase dead insts before even adding to the list.
if (isTriviallyDead(*CurMI, *MRI)) {
- DEBUG(dbgs() << *CurMI << "Is dead; erasing.\n");
+ LLVM_DEBUG(dbgs() << *CurMI << "Is dead; erasing.\n");
CurMI->eraseFromParentAndMarkDBGValuesForRemoval();
continue;
}
@@ -71,7 +71,7 @@ bool Combiner::combineMachineInstrs(MachineFunction &MF) {
// Main Loop. Process the instructions here.
while (!WorkList.empty()) {
MachineInstr *CurrInst = WorkList.pop_back_val();
- DEBUG(dbgs() << "Try combining " << *CurrInst << "\n";);
+ LLVM_DEBUG(dbgs() << "Try combining " << *CurrInst << "\n";);
Changed |= CInfo.combine(*CurrInst, Builder);
}
MFChanged |= Changed;
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index b0a40f82f9b6..18572fea181b 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -652,7 +652,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
const Value *Address = DI.getAddress();
if (!Address || isa<UndefValue>(Address)) {
- DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return true;
}
diff --git a/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 7923a888118a..7d37a6ce4604 100644
--- a/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -65,7 +65,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n');
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
const InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector();
@@ -116,12 +116,12 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
else
--MII;
- DEBUG(dbgs() << "Selecting: \n " << MI);
+ LLVM_DEBUG(dbgs() << "Selecting: \n " << MI);
// We could have folded this instruction away already, making it dead.
// If so, erase it.
if (isTriviallyDead(MI, MRI)) {
- DEBUG(dbgs() << "Is dead; erasing.\n");
+ LLVM_DEBUG(dbgs() << "Is dead; erasing.\n");
MI.eraseFromParentAndMarkDBGValuesForRemoval();
continue;
}
@@ -134,7 +134,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
}
// Dump the range of instructions that MI expanded into.
- DEBUG({
+ LLVM_DEBUG({
auto InsertedBegin = ReachedBegin ? MBB->begin() : std::next(MII);
dbgs() << "Into:\n";
for (auto &InsertedMI : make_range(InsertedBegin, AfterIt))
@@ -218,7 +218,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
auto &TLI = *MF.getSubtarget().getTargetLowering();
TLI.finalizeLowering(MF);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Rules covered by selecting function: " << MF.getName() << ":";
for (auto RuleID : CoverageInfo.covered())
dbgs() << " id" << RuleID;
diff --git a/lib/CodeGen/GlobalISel/Legalizer.cpp b/lib/CodeGen/GlobalISel/Legalizer.cpp
index f09b0d9f11e7..6ce5851d436f 100644
--- a/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -72,7 +72,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
if (MF.getProperties().hasProperty(
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n');
init(MF);
const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
@@ -112,7 +112,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
else
InstList.insert(MI);
}
- DEBUG(dbgs() << ".. .. New MI: " << *MI;);
+ LLVM_DEBUG(dbgs() << ".. .. New MI: " << *MI;);
});
const LegalizerInfo &LInfo(Helper.getLegalizerInfo());
LegalizationArtifactCombiner ArtCombiner(Helper.MIRBuilder, MF.getRegInfo(), LInfo);
@@ -127,7 +127,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
MachineInstr &MI = *InstList.pop_back_val();
assert(isPreISelGenericOpcode(MI.getOpcode()) && "Expecting generic opcode");
if (isTriviallyDead(MI, MRI)) {
- DEBUG(dbgs() << MI << "Is dead; erasing.\n");
+ LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
MI.eraseFromParentAndMarkDBGValuesForRemoval();
continue;
}
@@ -148,7 +148,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
MachineInstr &MI = *ArtifactList.pop_back_val();
assert(isPreISelGenericOpcode(MI.getOpcode()) && "Expecting generic opcode");
if (isTriviallyDead(MI, MRI)) {
- DEBUG(dbgs() << MI << "Is dead; erasing.\n");
+ LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
RemoveDeadInstFromLists(&MI);
MI.eraseFromParentAndMarkDBGValuesForRemoval();
continue;
@@ -156,7 +156,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
SmallVector<MachineInstr *, 4> DeadInstructions;
if (ArtCombiner.tryCombineInstruction(MI, DeadInstructions)) {
for (auto *DeadMI : DeadInstructions) {
- DEBUG(dbgs() << ".. Erasing Dead Instruction " << *DeadMI);
+ LLVM_DEBUG(dbgs() << ".. Erasing Dead Instruction " << *DeadMI);
RemoveDeadInstFromLists(DeadMI);
DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
}
diff --git a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 410c3b722705..b5cc7f7727f2 100644
--- a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -35,34 +35,34 @@ LegalizerHelper::LegalizerHelper(MachineFunction &MF)
LegalizerHelper::LegalizeResult
LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
- DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
auto Step = LI.getAction(MI, MRI);
switch (Step.Action) {
case Legal:
- DEBUG(dbgs() << ".. Already legal\n");
+ LLVM_DEBUG(dbgs() << ".. Already legal\n");
return AlreadyLegal;
case Libcall:
- DEBUG(dbgs() << ".. Convert to libcall\n");
+ LLVM_DEBUG(dbgs() << ".. Convert to libcall\n");
return libcall(MI);
case NarrowScalar:
- DEBUG(dbgs() << ".. Narrow scalar\n");
+ LLVM_DEBUG(dbgs() << ".. Narrow scalar\n");
return narrowScalar(MI, Step.TypeIdx, Step.NewType);
case WidenScalar:
- DEBUG(dbgs() << ".. Widen scalar\n");
+ LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
return widenScalar(MI, Step.TypeIdx, Step.NewType);
case Lower:
- DEBUG(dbgs() << ".. Lower\n");
+ LLVM_DEBUG(dbgs() << ".. Lower\n");
return lower(MI, Step.TypeIdx, Step.NewType);
case FewerElements:
- DEBUG(dbgs() << ".. Reduce number of elements\n");
+ LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n");
return fewerElementsVector(MI, Step.TypeIdx, Step.NewType);
case Custom:
- DEBUG(dbgs() << ".. Custom legalization\n");
+ LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
return LI.legalizeCustom(MI, MRI, MIRBuilder) ? Legalized
: UnableToLegalize;
default:
- DEBUG(dbgs() << ".. Unable to legalize\n");
+ LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
return UnableToLegalize;
}
}
diff --git a/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index a681e9f7b2dc..3ff5e97e8839 100644
--- a/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -58,18 +58,18 @@ raw_ostream &LegalityQuery::print(raw_ostream &OS) const {
}
LegalizeActionStep LegalizeRuleSet::apply(const LegalityQuery &Query) const {
- DEBUG(dbgs() << "Applying legalizer ruleset to: "; Query.print(dbgs());
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Applying legalizer ruleset to: "; Query.print(dbgs());
+ dbgs() << "\n");
if (Rules.empty()) {
- DEBUG(dbgs() << ".. fallback to legacy rules (no rules defined)\n");
+ LLVM_DEBUG(dbgs() << ".. fallback to legacy rules (no rules defined)\n");
return {LegalizeAction::UseLegacyRules, 0, LLT{}};
}
for (const auto &Rule : Rules) {
if (Rule.match(Query)) {
- DEBUG(dbgs() << ".. match\n");
+ LLVM_DEBUG(dbgs() << ".. match\n");
std::pair<unsigned, LLT> Mutation = Rule.determineMutation(Query);
- DEBUG(dbgs() << ".. .. " << (unsigned)Rule.getAction() << ", "
- << Mutation.first << ", " << Mutation.second << "\n");
+ LLVM_DEBUG(dbgs() << ".. .. " << (unsigned)Rule.getAction() << ", "
+ << Mutation.first << ", " << Mutation.second << "\n");
assert((Query.Types[Mutation.first] != Mutation.second ||
Rule.getAction() == Lower ||
Rule.getAction() == MoreElements ||
@@ -77,9 +77,9 @@ LegalizeActionStep LegalizeRuleSet::apply(const LegalityQuery &Query) const {
"Simple loop detected");
return {Rule.getAction(), Mutation.first, Mutation.second};
} else
- DEBUG(dbgs() << ".. no match\n");
+ LLVM_DEBUG(dbgs() << ".. no match\n");
}
- DEBUG(dbgs() << ".. unsupported\n");
+ LLVM_DEBUG(dbgs() << ".. unsupported\n");
return {LegalizeAction::Unsupported, 0, LLT{}};
}
@@ -247,11 +247,11 @@ unsigned LegalizerInfo::getOpcodeIdxForOpcode(unsigned Opcode) const {
unsigned LegalizerInfo::getActionDefinitionsIdx(unsigned Opcode) const {
unsigned OpcodeIdx = getOpcodeIdxForOpcode(Opcode);
if (unsigned Alias = RulesForOpcode[OpcodeIdx].getAlias()) {
- DEBUG(dbgs() << ".. opcode " << Opcode << " is aliased to " << Alias
- << "\n");
+ LLVM_DEBUG(dbgs() << ".. opcode " << Opcode << " is aliased to " << Alias
+ << "\n");
OpcodeIdx = getOpcodeIdxForOpcode(Alias);
- DEBUG(dbgs() << ".. opcode " << Alias << " is aliased to "
- << RulesForOpcode[OpcodeIdx].getAlias() << "\n");
+ LLVM_DEBUG(dbgs() << ".. opcode " << Alias << " is aliased to "
+ << RulesForOpcode[OpcodeIdx].getAlias() << "\n");
assert(RulesForOpcode[OpcodeIdx].getAlias() == 0 && "Cannot chain aliases");
}
@@ -305,13 +305,14 @@ LegalizerInfo::getAction(const LegalityQuery &Query) const {
for (unsigned i = 0; i < Query.Types.size(); ++i) {
auto Action = getAspectAction({Query.Opcode, i, Query.Types[i]});
if (Action.first != Legal) {
- DEBUG(dbgs() << ".. (legacy) Type " << i << " Action="
- << (unsigned)Action.first << ", " << Action.second << "\n");
+ LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i
+ << " Action=" << (unsigned)Action.first << ", "
+ << Action.second << "\n");
return {Action.first, i, Action.second};
} else
- DEBUG(dbgs() << ".. (legacy) Type " << i << " Legal\n");
+ LLVM_DEBUG(dbgs() << ".. (legacy) Type " << i << " Legal\n");
}
- DEBUG(dbgs() << ".. (legacy) Legal\n");
+ LLVM_DEBUG(dbgs() << ".. (legacy) Legal\n");
return {Legal, 0, LLT{}};
}
diff --git a/lib/CodeGen/GlobalISel/Localizer.cpp b/lib/CodeGen/GlobalISel/Localizer.cpp
index 8e16470b6f90..d7dc2c2c18c5 100644
--- a/lib/CodeGen/GlobalISel/Localizer.cpp
+++ b/lib/CodeGen/GlobalISel/Localizer.cpp
@@ -59,7 +59,7 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Localize instructions for: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Localize instructions for: " << MF.getName() << '\n');
init(MF);
@@ -73,7 +73,7 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
for (MachineInstr &MI : MBB) {
if (LocalizedInstrs.count(&MI) || !shouldLocalize(MI))
continue;
- DEBUG(dbgs() << "Should localize: " << MI);
+ LLVM_DEBUG(dbgs() << "Should localize: " << MI);
assert(MI.getDesc().getNumDefs() == 1 &&
"More than one definition not supported yet");
unsigned Reg = MI.getOperand(0).getReg();
@@ -85,12 +85,12 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
MachineOperand &MOUse = *MOIt++;
// Check if the use is already local.
MachineBasicBlock *InsertMBB;
- DEBUG(MachineInstr &MIUse = *MOUse.getParent();
- dbgs() << "Checking use: " << MIUse
- << " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n');
+ LLVM_DEBUG(MachineInstr &MIUse = *MOUse.getParent();
+ dbgs() << "Checking use: " << MIUse
+ << " #Opd: " << MIUse.getOperandNo(&MOUse) << '\n');
if (isLocalUse(MOUse, MI, InsertMBB))
continue;
- DEBUG(dbgs() << "Fixing non-local use\n");
+ LLVM_DEBUG(dbgs() << "Fixing non-local use\n");
Changed = true;
auto MBBAndReg = std::make_pair(InsertMBB, Reg);
auto NewVRegIt = MBBWithLocalDef.find(MBBAndReg);
@@ -111,10 +111,10 @@ bool Localizer::runOnMachineFunction(MachineFunction &MF) {
LocalizedMI->getOperand(0).setReg(NewReg);
NewVRegIt =
MBBWithLocalDef.insert(std::make_pair(MBBAndReg, NewReg)).first;
- DEBUG(dbgs() << "Inserted: " << *LocalizedMI);
+ LLVM_DEBUG(dbgs() << "Inserted: " << *LocalizedMI);
}
- DEBUG(dbgs() << "Update use with: " << printReg(NewVRegIt->second)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Update use with: " << printReg(NewVRegIt->second)
+ << '\n');
// Update the user reg.
MOUse.setReg(NewVRegIt->second);
}
diff --git a/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index ba59b148e7f9..03bfae7c4653 100644
--- a/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -76,7 +76,7 @@ RegBankSelect::RegBankSelect(Mode RunningMode)
if (RegBankSelectMode.getNumOccurrences() != 0) {
OptMode = RegBankSelectMode;
if (RegBankSelectMode != RunningMode)
- DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n");
+ LLVM_DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n");
}
}
@@ -123,11 +123,11 @@ bool RegBankSelect::assignmentMatch(
// Reg is free of assignment, a simple assignment will make the
// register bank to match.
OnlyAssign = CurRegBank == nullptr;
- DEBUG(dbgs() << "Does assignment already match: ";
- if (CurRegBank) dbgs() << *CurRegBank; else dbgs() << "none";
- dbgs() << " against ";
- assert(DesiredRegBrank && "The mapping must be valid");
- dbgs() << *DesiredRegBrank << '\n';);
+ LLVM_DEBUG(dbgs() << "Does assignment already match: ";
+ if (CurRegBank) dbgs() << *CurRegBank; else dbgs() << "none";
+ dbgs() << " against ";
+ assert(DesiredRegBrank && "The mapping must be valid");
+ dbgs() << *DesiredRegBrank << '\n';);
return CurRegBank == DesiredRegBrank;
}
@@ -160,8 +160,8 @@ bool RegBankSelect::repairReg(
// same types because the type is a placeholder when this function is called.
MachineInstr *MI =
MIRBuilder.buildInstrNoInsert(TargetOpcode::COPY).addDef(Dst).addUse(Src);
- DEBUG(dbgs() << "Copy: " << printReg(Src) << " to: " << printReg(Dst)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Copy: " << printReg(Src) << " to: " << printReg(Dst)
+ << '\n');
// TODO:
// Check if MI is legal. if not, we need to legalize all the
// instructions we are going to insert.
@@ -246,7 +246,7 @@ const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
MappingCost CurCost =
computeMapping(MI, *CurMapping, LocalRepairPts, &Cost);
if (CurCost < Cost) {
- DEBUG(dbgs() << "New best: " << CurCost << '\n');
+ LLVM_DEBUG(dbgs() << "New best: " << CurCost << '\n');
Cost = CurCost;
BestMapping = CurMapping;
RepairPts.clear();
@@ -398,11 +398,11 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
MappingCost Cost(MBFI ? MBFI->getBlockFreq(MI.getParent()) : 1);
bool Saturated = Cost.addLocalCost(InstrMapping.getCost());
assert(!Saturated && "Possible mapping saturated the cost");
- DEBUG(dbgs() << "Evaluating mapping cost for: " << MI);
- DEBUG(dbgs() << "With: " << InstrMapping << '\n');
+ LLVM_DEBUG(dbgs() << "Evaluating mapping cost for: " << MI);
+ LLVM_DEBUG(dbgs() << "With: " << InstrMapping << '\n');
RepairPts.clear();
if (BestCost && Cost > *BestCost) {
- DEBUG(dbgs() << "Mapping is too expensive from the start\n");
+ LLVM_DEBUG(dbgs() << "Mapping is too expensive from the start\n");
return Cost;
}
@@ -418,17 +418,17 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
unsigned Reg = MO.getReg();
if (!Reg)
continue;
- DEBUG(dbgs() << "Opd" << OpIdx << '\n');
+ LLVM_DEBUG(dbgs() << "Opd" << OpIdx << '\n');
const RegisterBankInfo::ValueMapping &ValMapping =
InstrMapping.getOperandMapping(OpIdx);
// If Reg is already properly mapped, this is free.
bool Assign;
if (assignmentMatch(Reg, ValMapping, Assign)) {
- DEBUG(dbgs() << "=> is free (match).\n");
+ LLVM_DEBUG(dbgs() << "=> is free (match).\n");
continue;
}
if (Assign) {
- DEBUG(dbgs() << "=> is free (simple assignment).\n");
+ LLVM_DEBUG(dbgs() << "=> is free (simple assignment).\n");
RepairPts.emplace_back(RepairingPlacement(MI, OpIdx, *TRI, *this,
RepairingPlacement::Reassign));
continue;
@@ -447,7 +447,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
// Check that the materialization of the repairing is possible.
if (!RepairPt.canMaterialize()) {
- DEBUG(dbgs() << "Mapping involves impossible repairing\n");
+ LLVM_DEBUG(dbgs() << "Mapping involves impossible repairing\n");
return MappingCost::ImpossibleCost();
}
@@ -510,7 +510,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
// Stop looking into what it takes to repair, this is already
// too expensive.
if (BestCost && Cost > *BestCost) {
- DEBUG(dbgs() << "Mapping is too expensive, stop processing\n");
+ LLVM_DEBUG(dbgs() << "Mapping is too expensive, stop processing\n");
return Cost;
}
@@ -520,7 +520,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
break;
}
}
- DEBUG(dbgs() << "Total cost is: " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "Total cost is: " << Cost << "\n");
return Cost;
}
@@ -560,14 +560,14 @@ bool RegBankSelect::applyMapping(
}
// Second, rewrite the instruction.
- DEBUG(dbgs() << "Actual mapping of the operands: " << OpdMapper << '\n');
+ LLVM_DEBUG(dbgs() << "Actual mapping of the operands: " << OpdMapper << '\n');
RBI->applyMapping(OpdMapper);
return true;
}
bool RegBankSelect::assignInstr(MachineInstr &MI) {
- DEBUG(dbgs() << "Assign: " << MI);
+ LLVM_DEBUG(dbgs() << "Assign: " << MI);
// Remember the repairing placement for all the operands.
SmallVector<RepairingPlacement, 4> RepairPts;
@@ -588,7 +588,7 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
// Make sure the mapping is valid for MI.
assert(BestMapping->verify(MI) && "Invalid instruction mapping");
- DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n');
+ LLVM_DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n');
// After this call, MI may not be valid anymore.
// Do not use it.
@@ -601,7 +601,7 @@ bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {
MachineFunctionProperties::Property::FailedISel))
return false;
- DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n');
const Function &F = MF.getFunction();
Mode SaveOptMode = OptMode;
if (F.hasFnAttribute(Attribute::OptimizeNone))
diff --git a/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
index 8977c7d81aa8..dd15567ef1c1 100644
--- a/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
+++ b/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
@@ -73,7 +73,7 @@ bool RegisterBankInfo::verify(const TargetRegisterInfo &TRI) const {
const RegisterBank &RegBank = getRegBank(Idx);
assert(Idx == RegBank.getID() &&
"ID does not match the index in the array");
- DEBUG(dbgs() << "Verify " << RegBank << '\n');
+ LLVM_DEBUG(dbgs() << "Verify " << RegBank << '\n');
assert(RegBank.verify(TRI) && "RegBank is invalid");
}
#endif // NDEBUG
@@ -404,18 +404,18 @@ RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
MachineInstr &MI = OpdMapper.getMI();
MachineRegisterInfo &MRI = OpdMapper.getMRI();
- DEBUG(dbgs() << "Applying default-like mapping\n");
+ LLVM_DEBUG(dbgs() << "Applying default-like mapping\n");
for (unsigned OpIdx = 0,
EndIdx = OpdMapper.getInstrMapping().getNumOperands();
OpIdx != EndIdx; ++OpIdx) {
- DEBUG(dbgs() << "OpIdx " << OpIdx);
+ LLVM_DEBUG(dbgs() << "OpIdx " << OpIdx);
MachineOperand &MO = MI.getOperand(OpIdx);
if (!MO.isReg()) {
- DEBUG(dbgs() << " is not a register, nothing to be done\n");
+ LLVM_DEBUG(dbgs() << " is not a register, nothing to be done\n");
continue;
}
if (!MO.getReg()) {
- DEBUG(dbgs() << " is %%noreg, nothing to be done\n");
+ LLVM_DEBUG(dbgs() << " is %%noreg, nothing to be done\n");
continue;
}
assert(OpdMapper.getInstrMapping().getOperandMapping(OpIdx).NumBreakDowns !=
@@ -427,14 +427,14 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
iterator_range<SmallVectorImpl<unsigned>::const_iterator> NewRegs =
OpdMapper.getVRegs(OpIdx);
if (NewRegs.begin() == NewRegs.end()) {
- DEBUG(dbgs() << " has not been repaired, nothing to be done\n");
+ LLVM_DEBUG(dbgs() << " has not been repaired, nothing to be done\n");
continue;
}
unsigned OrigReg = MO.getReg();
unsigned NewReg = *NewRegs.begin();
- DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr));
+ LLVM_DEBUG(dbgs() << " changed, replace " << printReg(OrigReg, nullptr));
MO.setReg(NewReg);
- DEBUG(dbgs() << " with " << printReg(NewReg, nullptr));
+ LLVM_DEBUG(dbgs() << " with " << printReg(NewReg, nullptr));
// The OperandsMapper creates plain scalar, we may have to fix that.
// Check if the types match and if not, fix that.
@@ -448,11 +448,11 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() &&
"Types with difference size cannot be handled by the default "
"mapping");
- DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to "
- << OrigTy);
+ LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to "
+ << OrigTy);
MRI.setType(NewReg, OrigTy);
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
}
diff --git a/lib/CodeGen/GlobalISel/Utils.cpp b/lib/CodeGen/GlobalISel/Utils.cpp
index f5b97656c3ba..055f4cb64588 100644
--- a/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/lib/CodeGen/GlobalISel/Utils.cpp
@@ -101,7 +101,7 @@ bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
if (!MO.isReg())
continue;
- DEBUG(dbgs() << "Converting operand: " << MO << '\n');
+ LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
assert(MO.isReg() && "Unsupported non-reg operand");
unsigned Reg = MO.getReg();
diff --git a/lib/CodeGen/GlobalMerge.cpp b/lib/CodeGen/GlobalMerge.cpp
index be4ba4d75a5d..cda5861f6760 100644
--- a/lib/CodeGen/GlobalMerge.cpp
+++ b/lib/CodeGen/GlobalMerge.cpp
@@ -442,8 +442,8 @@ bool GlobalMerge::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
Type *Int32Ty = Type::getInt32Ty(M.getContext());
auto &DL = M.getDataLayout();
- DEBUG(dbgs() << " Trying to merge set, starts with #"
- << GlobalSet.find_first() << "\n");
+ LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #"
+ << GlobalSet.find_first() << "\n");
ssize_t i = GlobalSet.find_first();
while (i != -1) {
diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp
index 0ae28abc7f13..83c168bf6866 100644
--- a/lib/CodeGen/IfConversion.cpp
+++ b/lib/CodeGen/IfConversion.cpp
@@ -361,14 +361,14 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
getAnalysisIfAvailable<MachineModuleInfo>());
}
- DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
- << MF.getName() << "\'");
+ LLVM_DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
+ << MF.getName() << "\'");
if (FnNum < IfCvtFnStart || (IfCvtFnStop != -1 && FnNum > IfCvtFnStop)) {
- DEBUG(dbgs() << " skipped\n");
+ LLVM_DEBUG(dbgs() << " skipped\n");
return false;
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
MF.RenumberBlocks();
BBAnalysis.resize(MF.getNumBlockIDs());
@@ -406,14 +406,14 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
case ICSimpleFalse: {
bool isFalse = Kind == ICSimpleFalse;
if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break;
- DEBUG(dbgs() << "Ifcvt (Simple"
- << (Kind == ICSimpleFalse ? " false" : "")
- << "): " << printMBBReference(*BBI.BB) << " ("
- << ((Kind == ICSimpleFalse) ? BBI.FalseBB->getNumber()
- : BBI.TrueBB->getNumber())
- << ") ");
+ LLVM_DEBUG(dbgs() << "Ifcvt (Simple"
+ << (Kind == ICSimpleFalse ? " false" : "")
+ << "): " << printMBBReference(*BBI.BB) << " ("
+ << ((Kind == ICSimpleFalse) ? BBI.FalseBB->getNumber()
+ : BBI.TrueBB->getNumber())
+ << ") ");
RetVal = IfConvertSimple(BBI, Kind);
- DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
+ LLVM_DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
if (isFalse) ++NumSimpleFalse;
else ++NumSimple;
@@ -430,16 +430,16 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
if (DisableTriangleR && !isFalse && isRev) break;
if (DisableTriangleF && isFalse && !isRev) break;
if (DisableTriangleFR && isFalse && isRev) break;
- DEBUG(dbgs() << "Ifcvt (Triangle");
+ LLVM_DEBUG(dbgs() << "Ifcvt (Triangle");
if (isFalse)
- DEBUG(dbgs() << " false");
+ LLVM_DEBUG(dbgs() << " false");
if (isRev)
- DEBUG(dbgs() << " rev");
- DEBUG(dbgs() << "): " << printMBBReference(*BBI.BB)
- << " (T:" << BBI.TrueBB->getNumber()
- << ",F:" << BBI.FalseBB->getNumber() << ") ");
+ LLVM_DEBUG(dbgs() << " rev");
+ LLVM_DEBUG(dbgs() << "): " << printMBBReference(*BBI.BB)
+ << " (T:" << BBI.TrueBB->getNumber()
+ << ",F:" << BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertTriangle(BBI, Kind);
- DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
+ LLVM_DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
if (isFalse) {
if (isRev) ++NumTriangleFRev;
@@ -453,24 +453,25 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
}
case ICDiamond:
if (DisableDiamond) break;
- DEBUG(dbgs() << "Ifcvt (Diamond): " << printMBBReference(*BBI.BB)
- << " (T:" << BBI.TrueBB->getNumber()
- << ",F:" << BBI.FalseBB->getNumber() << ") ");
+ LLVM_DEBUG(dbgs() << "Ifcvt (Diamond): " << printMBBReference(*BBI.BB)
+ << " (T:" << BBI.TrueBB->getNumber()
+ << ",F:" << BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2,
Token->TClobbersPred,
Token->FClobbersPred);
- DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
+ LLVM_DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) ++NumDiamonds;
break;
case ICForkedDiamond:
if (DisableForkedDiamond) break;
- DEBUG(dbgs() << "Ifcvt (Forked Diamond): " << printMBBReference(*BBI.BB)
- << " (T:" << BBI.TrueBB->getNumber()
- << ",F:" << BBI.FalseBB->getNumber() << ") ");
+ LLVM_DEBUG(dbgs() << "Ifcvt (Forked Diamond): "
+ << printMBBReference(*BBI.BB)
+ << " (T:" << BBI.TrueBB->getNumber()
+ << ",F:" << BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertForkedDiamond(BBI, Kind, NumDups, NumDups2,
Token->TClobbersPred,
Token->FClobbersPred);
- DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
+ LLVM_DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) ++NumForkedDiamonds;
break;
}
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index 3939e2359951..478ea9b2bc55 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -336,7 +336,7 @@ void InlineSpiller::collectRegsToSpill() {
if (isRegToSpill(SnipReg))
continue;
RegsToSpill.push_back(SnipReg);
- DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
+ LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
++NumSnippets;
}
}
@@ -388,8 +388,8 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
LiveInterval &OrigLI = LIS.getInterval(Original);
VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
- DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
- << *StackInt << '\n');
+ LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
+ << *StackInt << '\n');
// We are going to spill SrcVNI immediately after its def, so clear out
// any later spills of the same value.
@@ -410,7 +410,7 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
MRI.getRegClass(SrcReg), &TRI);
--MII; // Point to store instruction.
LIS.InsertMachineInstrInMaps(*MII);
- DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
+ LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
++NumSpills;
@@ -429,8 +429,8 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
LiveInterval *LI;
std::tie(LI, VNI) = WorkList.pop_back_val();
unsigned Reg = LI->reg;
- DEBUG(dbgs() << "Checking redundant spills for "
- << VNI->id << '@' << VNI->def << " in " << *LI << '\n');
+ LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
+ << VNI->def << " in " << *LI << '\n');
// Regs to spill are taken care of.
if (isRegToSpill(Reg))
@@ -438,7 +438,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
// Add all of VNI's live range to StackInt.
StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
- DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
+ LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
// Find all spills and copies of VNI.
for (MachineRegisterInfo::use_instr_nodbg_iterator
@@ -466,7 +466,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
// Erase spills.
int FI;
if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
- DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
+ LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
// eliminateDeadDefs won't normally remove stores, so switch opcode.
MI.setDesc(TII.get(TargetOpcode::KILL));
DeadDefs.push_back(&MI);
@@ -528,13 +528,13 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
if (!ParentVNI) {
- DEBUG(dbgs() << "\tadding <undef> flags: ");
+ LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
MO.setIsUndef();
}
- DEBUG(dbgs() << UseIdx << '\t' << MI);
+ LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
return true;
}
@@ -548,7 +548,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
markValueUsed(&VirtReg, ParentVNI);
- DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
+ LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
return false;
}
@@ -556,7 +556,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
// same register for uses and defs.
if (RI.Tied) {
markValueUsed(&VirtReg, ParentVNI);
- DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
+ LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
return false;
}
@@ -582,8 +582,8 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
NewMI->setDebugLoc(MI.getDebugLoc());
(void)DefIdx;
- DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
- << *LIS.getInstructionFromIndex(DefIdx));
+ LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
+ << *LIS.getInstructionFromIndex(DefIdx));
// Replace operands
for (const auto &OpPair : Ops) {
@@ -593,7 +593,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
MO.setIsKill();
}
}
- DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
+ LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
++NumRemats;
return true;
@@ -638,7 +638,7 @@ void InlineSpiller::reMaterializeAll() {
MI->addRegisterDead(Reg, &TRI);
if (!MI->allDefsAreDead())
continue;
- DEBUG(dbgs() << "All defs dead: " << *MI);
+ LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
DeadDefs.push_back(MI);
}
}
@@ -647,7 +647,7 @@ void InlineSpiller::reMaterializeAll() {
// deleted here.
if (DeadDefs.empty())
return;
- DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
+ LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
// LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
@@ -670,7 +670,8 @@ void InlineSpiller::reMaterializeAll() {
RegsToSpill[ResultPos++] = Reg;
}
RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
- DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
+ LLVM_DEBUG(dbgs() << RegsToSpill.size()
+ << " registers to spill after remat.\n");
}
//===----------------------------------------------------------------------===//
@@ -692,7 +693,7 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
if (!IsLoad)
HSpiller.rmFromMergeableSpills(*MI, StackSlot);
- DEBUG(dbgs() << "Coalescing stack access: " << *MI);
+ LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
LIS.RemoveMachineInstrFromMaps(*MI);
MI->eraseFromParent();
@@ -849,8 +850,8 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
FoldMI->RemoveOperand(i - 1);
}
- DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
- "folded"));
+ LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
+ "folded"));
if (!WasCopy)
++NumFolded;
@@ -873,8 +874,8 @@ void InlineSpiller::insertReload(unsigned NewVReg,
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
- DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
- NewVReg));
+ LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
+ NewVReg));
++NumReloads;
}
@@ -913,8 +914,8 @@ void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end());
- DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
- "spill"));
+ LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS,
+ "spill"));
++NumSpills;
if (IsRealSpill)
HSpiller.addToMergeableSpills(*std::next(MI), StackSlot, Original);
@@ -922,7 +923,7 @@ void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill,
/// spillAroundUses - insert spill code around each use of Reg.
void InlineSpiller::spillAroundUses(unsigned Reg) {
- DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
+ LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
LiveInterval &OldLI = LIS.getInterval(Reg);
// Iterate over instructions using Reg.
@@ -935,7 +936,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
if (MI->isDebugInstr()) {
// Modify DBG_VALUE now that the value is in a spill slot.
MachineBasicBlock *MBB = MI->getParent();
- DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
+ LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
MBB->erase(MI);
continue;
@@ -966,7 +967,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
if (SibReg && isSibling(SibReg)) {
// This may actually be a copy between snippets.
if (isRegToSpill(SibReg)) {
- DEBUG(dbgs() << "Found new snippet copy: " << *MI);
+ LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
SnippetCopies.insert(MI);
continue;
}
@@ -1009,7 +1010,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
hasLiveDef = true;
}
}
- DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
+ LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
// FIXME: Use a second vreg if instruction has no tied ops.
if (RI.Writes)
@@ -1035,7 +1036,7 @@ void InlineSpiller::spillAll() {
for (unsigned Reg : RegsToSpill)
StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
StackInt->getValNumInfo(0));
- DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
+ LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
// Spill around uses of all RegsToSpill.
for (unsigned Reg : RegsToSpill)
@@ -1043,7 +1044,7 @@ void InlineSpiller::spillAll() {
// Hoisted spills may cause dead code.
if (!DeadDefs.empty()) {
- DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
+ LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
}
@@ -1075,10 +1076,10 @@ void InlineSpiller::spill(LiveRangeEdit &edit) {
StackSlot = VRM.getStackSlot(Original);
StackInt = nullptr;
- DEBUG(dbgs() << "Inline spilling "
- << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
- << ':' << edit.getParent()
- << "\nFrom original " << printReg(Original) << '\n');
+ LLVM_DEBUG(dbgs() << "Inline spilling "
+ << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
+ << ':' << edit.getParent() << "\nFrom original "
+ << printReg(Original) << '\n');
assert(edit.getParent().isSpillable() &&
"Attempting to spill already spilled value.");
assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
@@ -1262,11 +1263,11 @@ void HoistSpillHelper::getVisitOrders(
"Orders have different size with WorkSet");
#ifndef NDEBUG
- DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
+ LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
for (; RIt != Orders.rend(); RIt++)
- DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
+ LLVM_DEBUG(dbgs() << "\n");
#endif
}
@@ -1375,7 +1376,7 @@ void HoistSpillHelper::runHoistSpills(
// Current Block is the BB containing the new hoisted spill. Add it to
// SpillsToKeep. LiveReg is the source of the new spill.
SpillsToKeep[*RIt] = LiveReg;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "spills in BB: ";
for (const auto Rspill : SpillsInSubTree)
dbgs() << Rspill->getBlock()->getNumber() << " ";
@@ -1431,7 +1432,7 @@ void HoistSpillHelper::hoistAllSpills() {
if (Ent.second.empty())
continue;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
<< "Equal spills in BB: ";
for (const auto spill : EqValSpills)
@@ -1446,7 +1447,7 @@ void HoistSpillHelper::hoistAllSpills() {
runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Finally inserted spills in BB: ";
for (const auto Ispill : SpillsToIns)
dbgs() << Ispill.first->getNumber() << " ";
diff --git a/lib/CodeGen/InterleavedAccessPass.cpp b/lib/CodeGen/InterleavedAccessPass.cpp
index e3dc96494739..fd2ff162630a 100644
--- a/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/lib/CodeGen/InterleavedAccessPass.cpp
@@ -332,7 +332,7 @@ bool InterleavedAccess::lowerInterleavedLoad(
if (!tryReplaceExtracts(Extracts, Shuffles))
return false;
- DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n");
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n");
// Try to create target specific intrinsics to replace the load and shuffles.
if (!TLI->lowerInterleavedLoad(LI, Shuffles, Indices, Factor))
@@ -424,7 +424,7 @@ bool InterleavedAccess::lowerInterleavedStore(
if (!isReInterleaveMask(SVI->getShuffleMask(), Factor, MaxFactor, OpNumElts))
return false;
- DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n");
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n");
// Try to create target specific intrinsics to replace the store and shuffle.
if (!TLI->lowerInterleavedStore(SI, SVI, Factor))
@@ -441,7 +441,7 @@ bool InterleavedAccess::runOnFunction(Function &F) {
if (!TPC || !LowerInterleavedAccesses)
return false;
- DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n");
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &TM = TPC->getTM<TargetMachine>();
diff --git a/lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp b/lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp
index 996d40ca6e1e..5b52cc66a297 100644
--- a/lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp
+++ b/lib/CodeGen/LazyMachineBlockFrequencyInfo.cpp
@@ -57,23 +57,23 @@ MachineBlockFrequencyInfo &
LazyMachineBlockFrequencyInfoPass::calculateIfNotAvailable() const {
auto *MBFI = getAnalysisIfAvailable<MachineBlockFrequencyInfo>();
if (MBFI) {
- DEBUG(dbgs() << "MachineBlockFrequencyInfo is available\n");
+ LLVM_DEBUG(dbgs() << "MachineBlockFrequencyInfo is available\n");
return *MBFI;
}
auto &MBPI = getAnalysis<MachineBranchProbabilityInfo>();
auto *MLI = getAnalysisIfAvailable<MachineLoopInfo>();
auto *MDT = getAnalysisIfAvailable<MachineDominatorTree>();
- DEBUG(dbgs() << "Building MachineBlockFrequencyInfo on the fly\n");
- DEBUG(if (MLI) dbgs() << "LoopInfo is available\n");
+ LLVM_DEBUG(dbgs() << "Building MachineBlockFrequencyInfo on the fly\n");
+ LLVM_DEBUG(if (MLI) dbgs() << "LoopInfo is available\n");
if (!MLI) {
- DEBUG(dbgs() << "Building LoopInfo on the fly\n");
+ LLVM_DEBUG(dbgs() << "Building LoopInfo on the fly\n");
// First create a dominator tree.
- DEBUG(if (MDT) dbgs() << "DominatorTree is available\n");
+ LLVM_DEBUG(if (MDT) dbgs() << "DominatorTree is available\n");
if (!MDT) {
- DEBUG(dbgs() << "Building DominatorTree on the fly\n");
+ LLVM_DEBUG(dbgs() << "Building DominatorTree on the fly\n");
OwnedMDT = make_unique<MachineDominatorTree>();
OwnedMDT->getBase().recalculate(*MF);
MDT = OwnedMDT.get();
diff --git a/lib/CodeGen/LiveDebugValues.cpp b/lib/CodeGen/LiveDebugValues.cpp
index 0554908584ee..82d8580ab750 100644
--- a/lib/CodeGen/LiveDebugValues.cpp
+++ b/lib/CodeGen/LiveDebugValues.cpp
@@ -480,8 +480,8 @@ void LiveDebugValues::transferSpillInst(MachineInstr &MI,
// Check if the register is the location of a debug value.
for (unsigned ID : OpenRanges.getVarLocs()) {
if (VarLocIDs[ID].isDescribedByReg() == Reg) {
- DEBUG(dbgs() << "Spilling Register " << printReg(Reg, TRI) << '('
- << VarLocIDs[ID].Var.getVar()->getName() << ")\n");
+ LLVM_DEBUG(dbgs() << "Spilling Register " << printReg(Reg, TRI) << '('
+ << VarLocIDs[ID].Var.getVar()->getName() << ")\n");
// Create a DBG_VALUE instruction to describe the Var in its spilled
// location, but don't insert it yet to avoid invalidating the
@@ -494,8 +494,8 @@ void LiveDebugValues::transferSpillInst(MachineInstr &MI,
MachineInstr *SpDMI =
BuildMI(*MF, DMI->getDebugLoc(), DMI->getDesc(), true, SpillBase,
DMI->getDebugVariable(), SpillExpr);
- DEBUG(dbgs() << "Creating DBG_VALUE inst for spill: ";
- SpDMI->print(dbgs(), false, TII));
+ LLVM_DEBUG(dbgs() << "Creating DBG_VALUE inst for spill: ";
+ SpDMI->print(dbgs(), false, TII));
// The newly created DBG_VALUE instruction SpDMI must be inserted after
// MI. Keep track of the pairing.
@@ -527,10 +527,12 @@ bool LiveDebugValues::transferTerminatorInst(MachineInstr &MI,
if (OpenRanges.empty())
return false;
- DEBUG(for (unsigned ID : OpenRanges.getVarLocs()) {
- // Copy OpenRanges to OutLocs, if not already present.
- dbgs() << "Add to OutLocs: "; VarLocIDs[ID].dump();
- });
+ LLVM_DEBUG(for (unsigned ID
+ : OpenRanges.getVarLocs()) {
+ // Copy OpenRanges to OutLocs, if not already present.
+ dbgs() << "Add to OutLocs: ";
+ VarLocIDs[ID].dump();
+ });
VarLocSet &VLS = OutLocs[CurMBB];
Changed = VLS |= OpenRanges.getVarLocs();
OpenRanges.clear();
@@ -556,7 +558,7 @@ bool LiveDebugValues::transfer(MachineInstr &MI, OpenRangesSet &OpenRanges,
bool LiveDebugValues::join(MachineBasicBlock &MBB, VarLocInMBB &OutLocs,
VarLocInMBB &InLocs, const VarLocMap &VarLocIDs,
SmallPtrSet<const MachineBasicBlock *, 16> &Visited) {
- DEBUG(dbgs() << "join MBB: " << MBB.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getName() << "\n");
bool Changed = false;
VarLocSet InLocsT; // Temporary incoming locations.
@@ -616,7 +618,7 @@ bool LiveDebugValues::join(MachineBasicBlock &MBB, VarLocInMBB &OutLocs,
DMI->getDebugVariable(), DMI->getDebugExpression());
if (DMI->isIndirectDebugValue())
MI->getOperand(1).setImm(DMI->getOperand(1).getImm());
- DEBUG(dbgs() << "Inserted: "; MI->dump(););
+ LLVM_DEBUG(dbgs() << "Inserted: "; MI->dump(););
ILS.set(ID);
++NumInserted;
Changed = true;
@@ -627,7 +629,7 @@ bool LiveDebugValues::join(MachineBasicBlock &MBB, VarLocInMBB &OutLocs,
/// Calculate the liveness information for the given machine function and
/// extend ranges across basic blocks.
bool LiveDebugValues::ExtendRanges(MachineFunction &MF) {
- DEBUG(dbgs() << "\nDebug Range Extension\n");
+ LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n");
bool Changed = false;
bool OLChanged = false;
@@ -658,8 +660,8 @@ bool LiveDebugValues::ExtendRanges(MachineFunction &MF) {
transfer(MI, OpenRanges, OutLocs, VarLocIDs, Spills,
/*transferSpills=*/false);
- DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, "OutLocs after initialization",
- dbgs()));
+ LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs,
+ "OutLocs after initialization", dbgs()));
ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
unsigned int RPONumber = 0;
@@ -679,7 +681,7 @@ bool LiveDebugValues::ExtendRanges(MachineFunction &MF) {
// thing twice. We could avoid this with a custom priority queue, but this
// is probably not worth it.
SmallPtrSet<MachineBasicBlock *, 16> OnPending;
- DEBUG(dbgs() << "Processing Worklist\n");
+ LLVM_DEBUG(dbgs() << "Processing Worklist\n");
while (!Worklist.empty()) {
MachineBasicBlock *MBB = OrderToBB[Worklist.top()];
Worklist.pop();
@@ -701,10 +703,10 @@ bool LiveDebugValues::ExtendRanges(MachineFunction &MF) {
SP.DebugInst);
Spills.clear();
- DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs,
- "OutLocs after propagating", dbgs()));
- DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs,
- "InLocs after propagating", dbgs()));
+ LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs,
+ "OutLocs after propagating", dbgs()));
+ LLVM_DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs,
+ "InLocs after propagating", dbgs()));
if (OLChanged) {
OLChanged = false;
@@ -721,8 +723,8 @@ bool LiveDebugValues::ExtendRanges(MachineFunction &MF) {
assert(Pending.empty() && "Pending should be empty");
}
- DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, "Final OutLocs", dbgs()));
- DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs, "Final InLocs", dbgs()));
+ LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs, "Final OutLocs", dbgs()));
+ LLVM_DEBUG(printVarLocInMBB(MF, InLocs, VarLocIDs, "Final InLocs", dbgs()));
return Changed;
}
diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp
index 9940b2949319..91c5e8565c07 100644
--- a/lib/CodeGen/LiveDebugVariables.cpp
+++ b/lib/CodeGen/LiveDebugVariables.cpp
@@ -511,7 +511,7 @@ bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) {
if (MI.getNumOperands() != 4 ||
!(MI.getOperand(1).isReg() || MI.getOperand(1).isImm()) ||
!MI.getOperand(2).isMetadata()) {
- DEBUG(dbgs() << "Can't handle " << MI);
+ LLVM_DEBUG(dbgs() << "Can't handle " << MI);
return false;
}
@@ -530,8 +530,8 @@ bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) {
// The DBG_VALUE is described by a virtual register that does not have a
// live interval. Discard the DBG_VALUE.
Discard = true;
- DEBUG(dbgs() << "Discarding debug info (no LIS interval): "
- << Idx << " " << MI);
+ LLVM_DEBUG(dbgs() << "Discarding debug info (no LIS interval): " << Idx
+ << " " << MI);
} else {
// The DBG_VALUE is only valid if either Reg is live out from Idx, or Reg
// is defined dead at Idx (where Idx is the slot index for the instruction
@@ -542,8 +542,8 @@ bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) {
// We have found a DBG_VALUE with the value in a virtual register that
// is not live. Discard the DBG_VALUE.
Discard = true;
- DEBUG(dbgs() << "Discarding debug info (reg not live): "
- << Idx << " " << MI);
+ LLVM_DEBUG(dbgs() << "Discarding debug info (reg not live): " << Idx
+ << " " << MI);
}
}
}
@@ -688,7 +688,8 @@ void UserValue::addDefsFromCopies(
if (CopyValues.empty())
return;
- DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI << '\n');
+ LLVM_DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI
+ << '\n');
// Try to add defs of the copied values for each kill point.
for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
@@ -702,8 +703,8 @@ void UserValue::addDefsFromCopies(
LocMap::iterator I = locInts.find(Idx);
if (I.valid() && I.start() <= Idx)
continue;
- DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #"
- << DstVNI->id << " in " << *DstLI << '\n');
+ LLVM_DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #"
+ << DstVNI->id << " in " << *DstLI << '\n');
MachineInstr *CopyMI = LIS.getInstructionFromIndex(DstVNI->def);
assert(CopyMI && CopyMI->isCopy() && "Bad copy value");
unsigned LocNo = getLocationNo(CopyMI->getOperand(0));
@@ -851,12 +852,12 @@ bool LDVImpl::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
LIS = &pass.getAnalysis<LiveIntervals>();
TRI = mf.getSubtarget().getRegisterInfo();
- DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
- << mf.getName() << " **********\n");
+ LLVM_DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
+ << mf.getName() << " **********\n");
bool Changed = collectDebugValues(mf);
computeIntervals();
- DEBUG(print(dbgs()));
+ LLVM_DEBUG(print(dbgs()));
ModifiedMF = Changed;
return Changed;
}
@@ -902,7 +903,7 @@ LiveDebugVariables::~LiveDebugVariables() {
bool
UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals& LIS) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Splitting Loc" << OldLocNo << '\t';
print(dbgs(), nullptr);
});
@@ -985,8 +986,8 @@ UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
while (LocMapI.valid()) {
DbgValueLocation v = LocMapI.value();
if (v.locNo() == OldLocNo) {
- DEBUG(dbgs() << "Erasing [" << LocMapI.start() << ';'
- << LocMapI.stop() << ")\n");
+ LLVM_DEBUG(dbgs() << "Erasing [" << LocMapI.start() << ';'
+ << LocMapI.stop() << ")\n");
LocMapI.erase();
} else {
if (v.locNo() > OldLocNo)
@@ -995,7 +996,10 @@ UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
}
}
- DEBUG({dbgs() << "Split result: \t"; print(dbgs(), nullptr);});
+ LLVM_DEBUG({
+ dbgs() << "Split result: \t";
+ print(dbgs(), nullptr);
+ });
return DidChange;
}
@@ -1213,11 +1217,11 @@ void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
if (trimmedDefs.count(Start))
Start = Start.getPrevIndex();
- DEBUG(dbgs() << "\t[" << Start << ';' << Stop << "):" << Loc.locNo());
+ LLVM_DEBUG(dbgs() << "\t[" << Start << ';' << Stop << "):" << Loc.locNo());
MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start)->getIterator();
SlotIndex MBBEnd = LIS.getMBBEndIdx(&*MBB);
- DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd);
+ LLVM_DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd);
insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI);
// This interval may span multiple basic blocks.
// Insert a DBG_VALUE into each one.
@@ -1227,10 +1231,10 @@ void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
if (++MBB == MFEnd)
break;
MBBEnd = LIS.getMBBEndIdx(&*MBB);
- DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd);
+ LLVM_DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd);
insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI);
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
if (MBB == MFEnd)
break;
@@ -1239,13 +1243,13 @@ void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS,
}
void LDVImpl::emitDebugValues(VirtRegMap *VRM) {
- DEBUG(dbgs() << "********** EMITTING LIVE DEBUG VARIABLES **********\n");
+ LLVM_DEBUG(dbgs() << "********** EMITTING LIVE DEBUG VARIABLES **********\n");
if (!MF)
return;
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
BitVector SpilledLocations;
for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
- DEBUG(userValues[i]->print(dbgs(), TRI));
+ LLVM_DEBUG(userValues[i]->print(dbgs(), TRI));
userValues[i]->rewriteLocations(*VRM, *TRI, SpilledLocations);
userValues[i]->emitDebugValues(VRM, *LIS, *TII, *TRI, SpilledLocations);
}
diff --git a/lib/CodeGen/LiveIntervals.cpp b/lib/CodeGen/LiveIntervals.cpp
index ed0a62b99f24..f4a56a343a9d 100644
--- a/lib/CodeGen/LiveIntervals.cpp
+++ b/lib/CodeGen/LiveIntervals.cpp
@@ -148,7 +148,7 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
getRegUnit(i);
}
- DEBUG(dump());
+ LLVM_DEBUG(dump());
return true;
}
@@ -311,7 +311,7 @@ void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
/// entering the entry block or a landing pad.
void LiveIntervals::computeLiveInRegUnits() {
RegUnitRanges.resize(TRI->getNumRegUnits());
- DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
+ LLVM_DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
// Keep track of the live range sets allocated.
SmallVector<unsigned, 8> NewRanges;
@@ -324,7 +324,7 @@ void LiveIntervals::computeLiveInRegUnits() {
// Create phi-defs at Begin for all live-in registers.
SlotIndex Begin = Indexes->getMBBStartIdx(&MBB);
- DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB));
+ LLVM_DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB));
for (const auto &LI : MBB.liveins()) {
for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
unsigned Unit = *Units;
@@ -336,12 +336,12 @@ void LiveIntervals::computeLiveInRegUnits() {
}
VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator());
(void)VNI;
- DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << '#' << VNI->id);
+ LLVM_DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << '#' << VNI->id);
}
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
- DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
+ LLVM_DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
// Compute the 'normal' part of the ranges.
for (unsigned Unit : NewRanges)
@@ -397,7 +397,7 @@ static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
}
// VNI is live-in to MBB.
- DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
+ LLVM_DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
// Make sure VNI is live-out from the predecessors.
@@ -414,7 +414,7 @@ static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
bool LiveIntervals::shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead) {
- DEBUG(dbgs() << "Shrink: " << *li << '\n');
+ LLVM_DEBUG(dbgs() << "Shrink: " << *li << '\n');
assert(TargetRegisterInfo::isVirtualRegister(li->reg)
&& "Can only shrink virtual registers");
@@ -443,9 +443,10 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
// This shouldn't happen: readsVirtualRegister returns true, but there is
// no live value. It is likely caused by a target getting <undef> flags
// wrong.
- DEBUG(dbgs() << Idx << '\t' << UseMI
- << "Warning: Instr claims to read non-existent value in "
- << *li << '\n');
+ LLVM_DEBUG(
+ dbgs() << Idx << '\t' << UseMI
+ << "Warning: Instr claims to read non-existent value in "
+ << *li << '\n');
continue;
}
// Special case: An early-clobber tied operand reads and writes the
@@ -466,7 +467,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
// Handle dead values.
bool CanSeparate = computeDeadValues(*li, dead);
- DEBUG(dbgs() << "Shrunk: " << *li << '\n');
+ LLVM_DEBUG(dbgs() << "Shrunk: " << *li << '\n');
return CanSeparate;
}
@@ -496,7 +497,7 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
// This is a dead PHI. Remove it.
VNI->markUnused();
LI.removeSegment(I);
- DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
+ LLVM_DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
MayHaveSplitComponents = true;
} else {
// This is a dead def. Make sure the instruction knows.
@@ -504,7 +505,7 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
assert(MI && "No instruction defining live value");
MI->addRegisterDead(LI.reg, TRI);
if (dead && MI->allDefsAreDead()) {
- DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
dead->push_back(MI);
}
}
@@ -513,7 +514,7 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI,
}
void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
- DEBUG(dbgs() << "Shrink: " << SR << '\n');
+ LLVM_DEBUG(dbgs() << "Shrink: " << SR << '\n');
assert(TargetRegisterInfo::isVirtualRegister(Reg)
&& "Can only shrink virtual registers");
// Find all the values used, including PHI kills.
@@ -572,13 +573,14 @@ void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
- DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
+ LLVM_DEBUG(dbgs() << "Dead PHI at " << VNI->def
+ << " may separate interval\n");
VNI->markUnused();
SR.removeSegment(*Segment);
}
}
- DEBUG(dbgs() << "Shrunk: " << SR << '\n');
+ LLVM_DEBUG(dbgs() << "Shrunk: " << SR << '\n');
}
void LiveIntervals::extendToIndices(LiveRange &LR,
@@ -943,7 +945,8 @@ public:
/// Update all live ranges touched by MI, assuming a move from OldIdx to
/// NewIdx.
void updateAllRanges(MachineInstr *MI) {
- DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
+ LLVM_DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": "
+ << *MI);
bool hasRegMask = false;
for (MachineOperand &MO : MI->operands()) {
if (MO.isRegMask())
@@ -993,7 +996,7 @@ private:
void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
if (!Updated.insert(&LR).second)
return;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << " ";
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
dbgs() << printReg(Reg);
@@ -1008,7 +1011,7 @@ private:
handleMoveDown(LR);
else
handleMoveUp(LR, Reg, LaneMask);
- DEBUG(dbgs() << " -->\t" << LR << '\n');
+ LLVM_DEBUG(dbgs() << " -->\t" << LR << '\n');
LR.verify();
}
@@ -1611,7 +1614,7 @@ void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
unsigned NumComp = ConEQ.Classify(LI);
if (NumComp <= 1)
return;
- DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n');
+ LLVM_DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n');
unsigned Reg = LI.reg;
const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
for (unsigned I = 1; I < NumComp; ++I) {
diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp
index 22f6b3260f41..8dfe8b68c3af 100644
--- a/lib/CodeGen/LiveRangeEdit.cpp
+++ b/lib/CodeGen/LiveRangeEdit.cpp
@@ -220,8 +220,8 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
if (!DefMI->isSafeToMove(nullptr, SawStore))
return false;
- DEBUG(dbgs() << "Try to fold single def: " << *DefMI
- << " into single use: " << *UseMI);
+ LLVM_DEBUG(dbgs() << "Try to fold single def: " << *DefMI
+ << " into single use: " << *UseMI);
SmallVector<unsigned, 8> Ops;
if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second)
@@ -230,7 +230,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
MachineInstr *FoldMI = TII.foldMemoryOperand(*UseMI, Ops, *DefMI, &LIS);
if (!FoldMI)
return false;
- DEBUG(dbgs() << " folded: " << *FoldMI);
+ LLVM_DEBUG(dbgs() << " folded: " << *FoldMI);
LIS.ReplaceMachineInstrInMaps(*UseMI, *FoldMI);
UseMI->eraseFromParent();
DefMI->addRegisterDead(LI->reg, nullptr);
@@ -267,18 +267,18 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
}
// Never delete inline asm.
if (MI->isInlineAsm()) {
- DEBUG(dbgs() << "Won't delete: " << Idx << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << "Won't delete: " << Idx << '\t' << *MI);
return;
}
// Use the same criteria as DeadMachineInstructionElim.
bool SawStore = false;
if (!MI->isSafeToMove(nullptr, SawStore)) {
- DEBUG(dbgs() << "Can't delete: " << Idx << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << "Can't delete: " << Idx << '\t' << *MI);
return;
}
- DEBUG(dbgs() << "Deleting dead def " << Idx << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << "Deleting dead def " << Idx << '\t' << *MI);
// Collect virtual registers to be erased after MI is gone.
SmallVector<unsigned, 8> RegsToErase;
@@ -352,7 +352,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
continue;
MI->RemoveOperand(i-1);
}
- DEBUG(dbgs() << "Converted physregs to:\t" << *MI);
+ LLVM_DEBUG(dbgs() << "Converted physregs to:\t" << *MI);
} else {
// If the dest of MI is an original reg and MI is reMaterializable,
// don't delete the inst. Replace the dest with a new reg, and keep
@@ -465,7 +465,7 @@ LiveRangeEdit::calculateRegClassAndHint(MachineFunction &MF,
for (unsigned I = 0, Size = size(); I < Size; ++I) {
LiveInterval &LI = LIS.getInterval(get(I));
if (MRI.recomputeRegClass(LI.reg))
- DEBUG({
+ LLVM_DEBUG({
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
dbgs() << "Inflated " << printReg(LI.reg) << " to "
<< TRI->getRegClassName(MRI.getRegClass(LI.reg)) << '\n';
diff --git a/lib/CodeGen/LiveRangeShrink.cpp b/lib/CodeGen/LiveRangeShrink.cpp
index 1aa3bbd90a99..f75d513c89f5 100644
--- a/lib/CodeGen/LiveRangeShrink.cpp
+++ b/lib/CodeGen/LiveRangeShrink.cpp
@@ -111,7 +111,7 @@ bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
InstOrderMap IOM;
// Map from register to instruction order (value of IOM) where the
diff --git a/lib/CodeGen/LiveRegMatrix.cpp b/lib/CodeGen/LiveRegMatrix.cpp
index d8faf75466c1..e72977b02675 100644
--- a/lib/CodeGen/LiveRegMatrix.cpp
+++ b/lib/CodeGen/LiveRegMatrix.cpp
@@ -102,37 +102,37 @@ static bool foreachUnit(const TargetRegisterInfo *TRI,
}
void LiveRegMatrix::assign(LiveInterval &VirtReg, unsigned PhysReg) {
- DEBUG(dbgs() << "assigning " << printReg(VirtReg.reg, TRI)
- << " to " << printReg(PhysReg, TRI) << ':');
+ LLVM_DEBUG(dbgs() << "assigning " << printReg(VirtReg.reg, TRI) << " to "
+ << printReg(PhysReg, TRI) << ':');
assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
- foreachUnit(TRI, VirtReg, PhysReg, [&](unsigned Unit,
- const LiveRange &Range) {
- DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << ' ' << Range);
- Matrix[Unit].unify(VirtReg, Range);
- return false;
- });
+ foreachUnit(
+ TRI, VirtReg, PhysReg, [&](unsigned Unit, const LiveRange &Range) {
+ LLVM_DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << ' ' << Range);
+ Matrix[Unit].unify(VirtReg, Range);
+ return false;
+ });
++NumAssigned;
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
void LiveRegMatrix::unassign(LiveInterval &VirtReg) {
unsigned PhysReg = VRM->getPhys(VirtReg.reg);
- DEBUG(dbgs() << "unassigning " << printReg(VirtReg.reg, TRI)
- << " from " << printReg(PhysReg, TRI) << ':');
+ LLVM_DEBUG(dbgs() << "unassigning " << printReg(VirtReg.reg, TRI) << " from "
+ << printReg(PhysReg, TRI) << ':');
VRM->clearVirt(VirtReg.reg);
- foreachUnit(TRI, VirtReg, PhysReg, [&](unsigned Unit,
- const LiveRange &Range) {
- DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI));
- Matrix[Unit].extract(VirtReg, Range);
- return false;
- });
+ foreachUnit(TRI, VirtReg, PhysReg,
+ [&](unsigned Unit, const LiveRange &Range) {
+ LLVM_DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI));
+ Matrix[Unit].extract(VirtReg, Range);
+ return false;
+ });
++NumUnassigned;
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
bool LiveRegMatrix::isPhysRegUsed(unsigned PhysReg) const {
diff --git a/lib/CodeGen/LocalStackSlotAllocation.cpp b/lib/CodeGen/LocalStackSlotAllocation.cpp
index b2a0d21e023b..1dbf11ee96e7 100644
--- a/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -164,8 +164,8 @@ void LocalStackSlotPass::AdjustStackOffset(MachineFrameInfo &MFI,
Offset = (Offset + Align - 1) / Align * Align;
int64_t LocalOffset = StackGrowsDown ? -Offset : Offset;
- DEBUG(dbgs() << "Allocate FI(" << FrameIdx << ") to local offset "
- << LocalOffset << "\n");
+ LLVM_DEBUG(dbgs() << "Allocate FI(" << FrameIdx << ") to local offset "
+ << LocalOffset << "\n");
// Keep the offset available for base register allocation
LocalOffsets[FrameIdx] = LocalOffset;
// And tell MFI about it for PEI to use later
@@ -351,7 +351,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
assert(MFI.isObjectPreAllocated(FrameIdx) &&
"Only pre-allocated locals expected!");
- DEBUG(dbgs() << "Considering: " << MI);
+ LLVM_DEBUG(dbgs() << "Considering: " << MI);
unsigned idx = 0;
for (unsigned f = MI.getNumOperands(); idx != f; ++idx) {
@@ -367,7 +367,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
int64_t Offset = 0;
int64_t FrameSizeAdjust = StackGrowsDown ? MFI.getLocalFrameSize() : 0;
- DEBUG(dbgs() << " Replacing FI in: " << MI);
+ LLVM_DEBUG(dbgs() << " Replacing FI in: " << MI);
// If we have a suitable base register available, use it; otherwise
// create a new one. Note that any offset encoded in the
@@ -377,7 +377,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
if (UsedBaseReg &&
lookupCandidateBaseReg(BaseReg, BaseOffset, FrameSizeAdjust,
LocalOffset, MI, TRI)) {
- DEBUG(dbgs() << " Reusing base register " << BaseReg << "\n");
+ LLVM_DEBUG(dbgs() << " Reusing base register " << BaseReg << "\n");
// We found a register to reuse.
Offset = FrameSizeAdjust + LocalOffset - BaseOffset;
} else {
@@ -405,8 +405,9 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
const TargetRegisterClass *RC = TRI->getPointerRegClass(*MF);
BaseReg = Fn.getRegInfo().createVirtualRegister(RC);
- DEBUG(dbgs() << " Materializing base register " << BaseReg <<
- " at frame local offset " << LocalOffset + InstrOffset << "\n");
+ LLVM_DEBUG(dbgs() << " Materializing base register " << BaseReg
+ << " at frame local offset "
+ << LocalOffset + InstrOffset << "\n");
// Tell the target to insert the instruction to initialize
// the base register.
@@ -427,7 +428,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// Modify the instruction to use the new base register rather
// than the frame index operand.
TRI->resolveFrameIndex(MI, BaseReg, Offset);
- DEBUG(dbgs() << "Resolved: " << MI);
+ LLVM_DEBUG(dbgs() << "Resolved: " << MI);
++NumReplacements;
}
diff --git a/lib/CodeGen/MIRCanonicalizerPass.cpp b/lib/CodeGen/MIRCanonicalizerPass.cpp
index a04e9942131f..8f02423d88aa 100644
--- a/lib/CodeGen/MIRCanonicalizerPass.cpp
+++ b/lib/CodeGen/MIRCanonicalizerPass.cpp
@@ -141,7 +141,7 @@ rescheduleLexographically(std::vector<MachineInstr *> instructions,
for (auto &II : StringInstrMap) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Splicing ";
II.second->dump();
dbgs() << " right before: ";
@@ -233,7 +233,7 @@ static bool rescheduleCanonically(unsigned &PseudoIdempotentInstCount,
continue;
}
- DEBUG(dbgs() << "Operand " << 0 << " of "; II->dump(); MO.dump(););
+ LLVM_DEBUG(dbgs() << "Operand " << 0 << " of "; II->dump(); MO.dump(););
MachineInstr *Def = II;
unsigned Distance = ~0U;
@@ -280,7 +280,7 @@ static bool rescheduleCanonically(unsigned &PseudoIdempotentInstCount,
if (DefI == BBE || UseI == BBE)
continue;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Splicing ";
DefI->dump();
dbgs() << " right before: ";
@@ -302,13 +302,15 @@ static bool rescheduleCanonically(unsigned &PseudoIdempotentInstCount,
if (UseI == MBB->instr_end())
continue;
- DEBUG(dbgs() << "Rescheduling Multi-Use Instructions Lexographically.";);
+ LLVM_DEBUG(
+ dbgs() << "Rescheduling Multi-Use Instructions Lexographically.";);
Changed |= rescheduleLexographically(
E.second, MBB, [&]() -> MachineBasicBlock::iterator { return UseI; });
}
PseudoIdempotentInstCount = PseudoIdempotentInstructions.size();
- DEBUG(dbgs() << "Rescheduling Idempotent Instructions Lexographically.";);
+ LLVM_DEBUG(
+ dbgs() << "Rescheduling Idempotent Instructions Lexographically.";);
Changed |= rescheduleLexographically(
PseudoIdempotentInstructions, MBB,
[&]() -> MachineBasicBlock::iterator { return MBB->begin(); });
@@ -384,7 +386,7 @@ static std::vector<MachineInstr *> populateCandidates(MachineBasicBlock *MBB) {
if (!MI->mayStore() && !MI->isBranch() && !DoesMISideEffect)
continue;
- DEBUG(dbgs() << "Found Candidate: "; MI->dump(););
+ LLVM_DEBUG(dbgs() << "Found Candidate: "; MI->dump(););
Candidates.push_back(MI);
}
@@ -405,7 +407,7 @@ static void doCandidateWalk(std::vector<TypedVReg> &VRegs,
RegQueue.pop();
if (TReg.isFrameIndex()) {
- DEBUG(dbgs() << "Popping frame index.\n";);
+ LLVM_DEBUG(dbgs() << "Popping frame index.\n";);
VRegs.push_back(TypedVReg(RSE_FrameIndex));
continue;
}
@@ -414,7 +416,7 @@ static void doCandidateWalk(std::vector<TypedVReg> &VRegs,
unsigned Reg = TReg.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Popping vreg ";
MRI.def_begin(Reg)->dump();
dbgs() << "\n";
@@ -426,7 +428,7 @@ static void doCandidateWalk(std::vector<TypedVReg> &VRegs,
VRegs.push_back(TypedVReg(Reg));
}
} else {
- DEBUG(dbgs() << "Popping physreg.\n";);
+ LLVM_DEBUG(dbgs() << "Popping physreg.\n";);
VRegs.push_back(TypedVReg(Reg));
continue;
}
@@ -442,7 +444,7 @@ static void doCandidateWalk(std::vector<TypedVReg> &VRegs,
break;
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\n========================\n";
dbgs() << "Visited MI: ";
Def->dump();
@@ -454,7 +456,7 @@ static void doCandidateWalk(std::vector<TypedVReg> &VRegs,
MachineOperand &MO = Def->getOperand(I);
if (MO.isFI()) {
- DEBUG(dbgs() << "Pushing frame index.\n";);
+ LLVM_DEBUG(dbgs() << "Pushing frame index.\n";);
RegQueue.push(TypedVReg(RSE_FrameIndex));
}
@@ -526,7 +528,7 @@ GetVRegRenameMap(const std::vector<TypedVReg> &VRegs,
// from a copy from a frame index. So it's safe to skip by one.
unsigned LastRenameReg = NVC.incrementVirtualVReg();
(void)LastRenameReg;
- DEBUG(dbgs() << "Skipping rename for FI " << LastRenameReg << "\n";);
+ LLVM_DEBUG(dbgs() << "Skipping rename for FI " << LastRenameReg << "\n";);
continue;
} else if (vreg.isCandidate()) {
@@ -543,7 +545,7 @@ GetVRegRenameMap(const std::vector<TypedVReg> &VRegs,
} else if (!TargetRegisterInfo::isVirtualRegister(vreg.getReg())) {
unsigned LastRenameReg = NVC.incrementVirtualVReg();
(void)LastRenameReg;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Skipping rename for Phys Reg " << LastRenameReg << "\n";
});
continue;
@@ -551,26 +553,27 @@ GetVRegRenameMap(const std::vector<TypedVReg> &VRegs,
auto Reg = vreg.getReg();
if (llvm::find(renamedInOtherBB, Reg) != renamedInOtherBB.end()) {
- DEBUG(dbgs() << "Vreg " << Reg << " already renamed in other BB.\n";);
+ LLVM_DEBUG(dbgs() << "Vreg " << Reg
+ << " already renamed in other BB.\n";);
continue;
}
auto Rename = NVC.createVirtualRegister(MRI.getRegClass(Reg));
if (VRegRenameMap.find(Reg) == VRegRenameMap.end()) {
- DEBUG(dbgs() << "Mapping vreg ";);
+ LLVM_DEBUG(dbgs() << "Mapping vreg ";);
if (MRI.reg_begin(Reg) != MRI.reg_end()) {
- DEBUG(auto foo = &*MRI.reg_begin(Reg); foo->dump(););
+ LLVM_DEBUG(auto foo = &*MRI.reg_begin(Reg); foo->dump(););
} else {
- DEBUG(dbgs() << Reg;);
+ LLVM_DEBUG(dbgs() << Reg;);
}
- DEBUG(dbgs() << " to ";);
+ LLVM_DEBUG(dbgs() << " to ";);
if (MRI.reg_begin(Rename) != MRI.reg_end()) {
- DEBUG(auto foo = &*MRI.reg_begin(Rename); foo->dump(););
+ LLVM_DEBUG(auto foo = &*MRI.reg_begin(Rename); foo->dump(););
} else {
- DEBUG(dbgs() << Rename;);
+ LLVM_DEBUG(dbgs() << Rename;);
}
- DEBUG(dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "\n";);
VRegRenameMap.insert(std::pair<unsigned, unsigned>(Reg, Rename));
}
@@ -638,18 +641,19 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB,
if (CanonicalizeBasicBlockNumber != ~0U) {
if (CanonicalizeBasicBlockNumber != basicBlockNum++)
return false;
- DEBUG(dbgs() << "\n Canonicalizing BasicBlock " << MBB->getName() << "\n";);
+ LLVM_DEBUG(dbgs() << "\n Canonicalizing BasicBlock " << MBB->getName()
+ << "\n";);
}
if (llvm::find(bbNames, MBB->getName()) != bbNames.end()) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Found potentially duplicate BasicBlocks: " << MBB->getName()
<< "\n";
});
return false;
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\n\n NEW BASIC BLOCK: " << MBB->getName() << " \n\n";
dbgs() << "\n\n================================================\n\n";
});
@@ -659,16 +663,17 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB,
MachineRegisterInfo &MRI = MF.getRegInfo();
bbNames.push_back(MBB->getName());
- DEBUG(dbgs() << "\n\n NEW BASIC BLOCK: " << MBB->getName() << "\n\n";);
+ LLVM_DEBUG(dbgs() << "\n\n NEW BASIC BLOCK: " << MBB->getName() << "\n\n";);
- DEBUG(dbgs() << "MBB Before Canonical Copy Propagation:\n"; MBB->dump(););
+ LLVM_DEBUG(dbgs() << "MBB Before Canonical Copy Propagation:\n";
+ MBB->dump(););
Changed |= propagateLocalCopies(MBB);
- DEBUG(dbgs() << "MBB After Canonical Copy Propagation:\n"; MBB->dump(););
+ LLVM_DEBUG(dbgs() << "MBB After Canonical Copy Propagation:\n"; MBB->dump(););
- DEBUG(dbgs() << "MBB Before Scheduling:\n"; MBB->dump(););
+ LLVM_DEBUG(dbgs() << "MBB Before Scheduling:\n"; MBB->dump(););
unsigned IdempotentInstCount = 0;
Changed |= rescheduleCanonically(IdempotentInstCount, MBB);
- DEBUG(dbgs() << "MBB After Scheduling:\n"; MBB->dump(););
+ LLVM_DEBUG(dbgs() << "MBB After Scheduling:\n"; MBB->dump(););
std::vector<MachineInstr *> Candidates = populateCandidates(MBB);
std::vector<MachineInstr *> VisitedMIs;
@@ -693,7 +698,7 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB,
if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
continue;
- DEBUG(dbgs() << "Enqueue register"; MO.dump(); dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "Enqueue register"; MO.dump(); dbgs() << "\n";);
RegQueue.push(TypedVReg(MO.getReg()));
}
@@ -710,7 +715,7 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB,
if (!MO.isReg() && !MO.isFI())
continue;
- DEBUG(dbgs() << "Enqueue Reg/FI"; MO.dump(); dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "Enqueue Reg/FI"; MO.dump(); dbgs() << "\n";);
RegQueue.push(MO.isReg() ? TypedVReg(MO.getReg())
: TypedVReg(RSE_FrameIndex));
@@ -752,8 +757,10 @@ static bool runOnBasicBlock(MachineBasicBlock *MBB,
Changed |= doDefKillClear(MBB);
- DEBUG(dbgs() << "Updated MachineBasicBlock:\n"; MBB->dump(); dbgs() << "\n";);
- DEBUG(dbgs() << "\n\n================================================\n\n");
+ LLVM_DEBUG(dbgs() << "Updated MachineBasicBlock:\n"; MBB->dump();
+ dbgs() << "\n";);
+ LLVM_DEBUG(
+ dbgs() << "\n\n================================================\n\n");
return Changed;
}
@@ -763,19 +770,21 @@ bool MIRCanonicalizer::runOnMachineFunction(MachineFunction &MF) {
if (CanonicalizeFunctionNumber != ~0U) {
if (CanonicalizeFunctionNumber != functionNum++)
return false;
- DEBUG(dbgs() << "\n Canonicalizing Function " << MF.getName() << "\n";);
+ LLVM_DEBUG(dbgs() << "\n Canonicalizing Function " << MF.getName()
+ << "\n";);
}
// we need a valid vreg to create a vreg type for skipping all those
// stray vreg numbers so reach alignment/canonical vreg values.
std::vector<MachineBasicBlock *> RPOList = GetRPOList(MF);
- DEBUG(dbgs() << "\n\n NEW MACHINE FUNCTION: " << MF.getName() << " \n\n";
- dbgs() << "\n\n================================================\n\n";
- dbgs() << "Total Basic Blocks: " << RPOList.size() << "\n";
- for (auto MBB
- : RPOList) { dbgs() << MBB->getName() << "\n"; } dbgs()
- << "\n\n================================================\n\n";);
+ LLVM_DEBUG(
+ dbgs() << "\n\n NEW MACHINE FUNCTION: " << MF.getName() << " \n\n";
+ dbgs() << "\n\n================================================\n\n";
+ dbgs() << "Total Basic Blocks: " << RPOList.size() << "\n";
+ for (auto MBB
+ : RPOList) { dbgs() << MBB->getName() << "\n"; } dbgs()
+ << "\n\n================================================\n\n";);
std::vector<StringRef> BBNames;
std::vector<unsigned> RenamedInOtherBB;
diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp
index 319174de2d82..64cc2e669ad0 100644
--- a/lib/CodeGen/MachineBasicBlock.cpp
+++ b/lib/CodeGen/MachineBasicBlock.cpp
@@ -855,9 +855,9 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
- DEBUG(dbgs() << "Splitting critical edge: " << printMBBReference(*this)
- << " -- " << printMBBReference(*NMBB) << " -- "
- << printMBBReference(*Succ) << '\n');
+ LLVM_DEBUG(dbgs() << "Splitting critical edge: " << printMBBReference(*this)
+ << " -- " << printMBBReference(*NMBB) << " -- "
+ << printMBBReference(*Succ) << '\n');
LiveIntervals *LIS = P.getAnalysisIfAvailable<LiveIntervals>();
SlotIndexes *Indexes = P.getAnalysisIfAvailable<SlotIndexes>();
@@ -886,7 +886,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
LV->getVarInfo(Reg).removeKill(*MI)) {
KilledRegs.push_back(Reg);
- DEBUG(dbgs() << "Removing terminator kill: " << *MI);
+ LLVM_DEBUG(dbgs() << "Removing terminator kill: " << *MI);
OI->setIsKill(false);
}
}
@@ -977,7 +977,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ,
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
LV->getVarInfo(Reg).Kills.push_back(&*I);
- DEBUG(dbgs() << "Restored terminator kill: " << *I);
+ LLVM_DEBUG(dbgs() << "Restored terminator kill: " << *I);
break;
}
}
@@ -1110,8 +1110,8 @@ bool MachineBasicBlock::canSplitCriticalEdge(
// case that we can't handle. Since this never happens in properly optimized
// code, just skip those edges.
if (TBB && TBB == FBB) {
- DEBUG(dbgs() << "Won't split critical edge after degenerate "
- << printMBBReference(*this) << '\n');
+ LLVM_DEBUG(dbgs() << "Won't split critical edge after degenerate "
+ << printMBBReference(*this) << '\n');
return false;
}
return true;
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index ec43097c23b2..7ca19945b8fe 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -643,7 +643,8 @@ BranchProbability MachineBlockPlacement::collectViableSuccessors(
if (SuccChain == &Chain) {
SkipSucc = true;
} else if (Succ != *SuccChain->begin()) {
- DEBUG(dbgs() << " " << getBlockName(Succ) << " -> Mid chain!\n");
+ LLVM_DEBUG(dbgs() << " " << getBlockName(Succ)
+ << " -> Mid chain!\n");
continue;
}
}
@@ -1010,7 +1011,7 @@ MachineBlockPlacement::getBestTrellisSuccessor(
// If we have a trellis, and BB doesn't have the best fallthrough edges,
// we shouldn't choose any successor. We've already looked and there's a
// better fallthrough edge for all the successors.
- DEBUG(dbgs() << "Trellis, but not one of the chosen edges.\n");
+ LLVM_DEBUG(dbgs() << "Trellis, but not one of the chosen edges.\n");
return Result;
}
@@ -1027,10 +1028,11 @@ MachineBlockPlacement::getBestTrellisSuccessor(
canTailDuplicateUnplacedPreds(BB, Succ2, Chain, BlockFilter) &&
isProfitableToTailDup(BB, Succ2, MBPI->getEdgeProbability(BB, Succ1),
Chain, BlockFilter)) {
- DEBUG(BranchProbability Succ2Prob = getAdjustedProbability(
- MBPI->getEdgeProbability(BB, Succ2), AdjustedSumProb);
- dbgs() << " Selected: " << getBlockName(Succ2)
- << ", probability: " << Succ2Prob << " (Tail Duplicate)\n");
+ LLVM_DEBUG(BranchProbability Succ2Prob = getAdjustedProbability(
+ MBPI->getEdgeProbability(BB, Succ2), AdjustedSumProb);
+ dbgs() << " Selected: " << getBlockName(Succ2)
+ << ", probability: " << Succ2Prob
+ << " (Tail Duplicate)\n");
Result.BB = Succ2;
Result.ShouldTailDup = true;
return Result;
@@ -1041,10 +1043,10 @@ MachineBlockPlacement::getBestTrellisSuccessor(
ComputedEdges[BestB.Src] = { BestB.Dest, false };
auto TrellisSucc = BestA.Dest;
- DEBUG(BranchProbability SuccProb = getAdjustedProbability(
- MBPI->getEdgeProbability(BB, TrellisSucc), AdjustedSumProb);
- dbgs() << " Selected: " << getBlockName(TrellisSucc)
- << ", probability: " << SuccProb << " (Trellis)\n");
+ LLVM_DEBUG(BranchProbability SuccProb = getAdjustedProbability(
+ MBPI->getEdgeProbability(BB, TrellisSucc), AdjustedSumProb);
+ dbgs() << " Selected: " << getBlockName(TrellisSucc)
+ << ", probability: " << SuccProb << " (Trellis)\n");
Result.BB = TrellisSucc;
return Result;
}
@@ -1150,7 +1152,7 @@ void MachineBlockPlacement::precomputeTriangleChains() {
if (TriangleChainCount == 0)
return;
- DEBUG(dbgs() << "Pre-computing triangle chains.\n");
+ LLVM_DEBUG(dbgs() << "Pre-computing triangle chains.\n");
// Map from last block to the chain that contains it. This allows us to extend
// chains as we find new triangles.
DenseMap<const MachineBasicBlock *, TriangleChain> TriangleChainMap;
@@ -1224,8 +1226,9 @@ void MachineBlockPlacement::precomputeTriangleChains() {
MachineBasicBlock *dst = Chain.Edges.back();
Chain.Edges.pop_back();
for (MachineBasicBlock *src : reverse(Chain.Edges)) {
- DEBUG(dbgs() << "Marking edge: " << getBlockName(src) << "->" <<
- getBlockName(dst) << " as pre-computed based on triangles.\n");
+ LLVM_DEBUG(dbgs() << "Marking edge: " << getBlockName(src) << "->"
+ << getBlockName(dst)
+ << " as pre-computed based on triangles.\n");
auto InsertResult = ComputedEdges.insert({src, {dst, true}});
assert(InsertResult.second && "Block seen twice.");
@@ -1431,8 +1434,8 @@ bool MachineBlockPlacement::hasBetterLayoutPredecessor(
}
if (BadCFGConflict) {
- DEBUG(dbgs() << " Not a candidate: " << getBlockName(Succ) << " -> " << SuccProb
- << " (prob) (non-cold CFG conflict)\n");
+ LLVM_DEBUG(dbgs() << " Not a candidate: " << getBlockName(Succ) << " -> "
+ << SuccProb << " (prob) (non-cold CFG conflict)\n");
return true;
}
@@ -1462,7 +1465,8 @@ MachineBlockPlacement::selectBestSuccessor(
auto AdjustedSumProb =
collectViableSuccessors(BB, Chain, BlockFilter, Successors);
- DEBUG(dbgs() << "Selecting best successor for: " << getBlockName(BB) << "\n");
+ LLVM_DEBUG(dbgs() << "Selecting best successor for: " << getBlockName(BB)
+ << "\n");
// if we already precomputed the best successor for BB, return that if still
// applicable.
@@ -1503,18 +1507,18 @@ MachineBlockPlacement::selectBestSuccessor(
continue;
}
- DEBUG(
- dbgs() << " Candidate: " << getBlockName(Succ) << ", probability: "
- << SuccProb
+ LLVM_DEBUG(
+ dbgs() << " Candidate: " << getBlockName(Succ)
+ << ", probability: " << SuccProb
<< (SuccChain.UnscheduledPredecessors != 0 ? " (CFG break)" : "")
<< "\n");
if (BestSucc.BB && BestProb >= SuccProb) {
- DEBUG(dbgs() << " Not the best candidate, continuing\n");
+ LLVM_DEBUG(dbgs() << " Not the best candidate, continuing\n");
continue;
}
- DEBUG(dbgs() << " Setting it as best candidate\n");
+ LLVM_DEBUG(dbgs() << " Setting it as best candidate\n");
BestSucc.BB = Succ;
BestProb = SuccProb;
}
@@ -1539,10 +1543,9 @@ MachineBlockPlacement::selectBestSuccessor(
break;
if (canTailDuplicateUnplacedPreds(BB, Succ, Chain, BlockFilter)
&& (isProfitableToTailDup(BB, Succ, BestProb, Chain, BlockFilter))) {
- DEBUG(
- dbgs() << " Candidate: " << getBlockName(Succ) << ", probability: "
- << DupProb
- << " (Tail Duplicate)\n");
+ LLVM_DEBUG(dbgs() << " Candidate: " << getBlockName(Succ)
+ << ", probability: " << DupProb
+ << " (Tail Duplicate)\n");
BestSucc.BB = Succ;
BestSucc.ShouldTailDup = true;
break;
@@ -1550,7 +1553,7 @@ MachineBlockPlacement::selectBestSuccessor(
}
if (BestSucc.BB)
- DEBUG(dbgs() << " Selected: " << getBlockName(BestSucc.BB) << "\n");
+ LLVM_DEBUG(dbgs() << " Selected: " << getBlockName(BestSucc.BB) << "\n");
return BestSucc;
}
@@ -1596,8 +1599,8 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock(
"Found CFG-violating block");
BlockFrequency CandidateFreq = MBFI->getBlockFreq(MBB);
- DEBUG(dbgs() << " " << getBlockName(MBB) << " -> ";
- MBFI->printBlockFreq(dbgs(), CandidateFreq) << " (freq)\n");
+ LLVM_DEBUG(dbgs() << " " << getBlockName(MBB) << " -> ";
+ MBFI->printBlockFreq(dbgs(), CandidateFreq) << " (freq)\n");
// For ehpad, we layout the least probable first as to avoid jumping back
// from least probable landingpads to more probable ones.
@@ -1723,8 +1726,8 @@ void MachineBlockPlacement::buildChain(
if (!BestSucc)
break;
- DEBUG(dbgs() << "Unnatural loop CFG detected, forcibly merging the "
- "layout successor until the CFG reduces\n");
+ LLVM_DEBUG(dbgs() << "Unnatural loop CFG detected, forcibly merging the "
+ "layout successor until the CFG reduces\n");
}
// Placement may have changed tail duplication opportunities.
@@ -1743,15 +1746,15 @@ void MachineBlockPlacement::buildChain(
// Zero out UnscheduledPredecessors for the successor we're about to merge in case
// we selected a successor that didn't fit naturally into the CFG.
SuccChain.UnscheduledPredecessors = 0;
- DEBUG(dbgs() << "Merging from " << getBlockName(BB) << " to "
- << getBlockName(BestSucc) << "\n");
+ LLVM_DEBUG(dbgs() << "Merging from " << getBlockName(BB) << " to "
+ << getBlockName(BestSucc) << "\n");
markChainSuccessors(SuccChain, LoopHeaderBB, BlockFilter);
Chain.merge(BestSucc, &SuccChain);
BB = *std::prev(Chain.end());
}
- DEBUG(dbgs() << "Finished forming chain for header block "
- << getBlockName(*Chain.begin()) << "\n");
+ LLVM_DEBUG(dbgs() << "Finished forming chain for header block "
+ << getBlockName(*Chain.begin()) << "\n");
}
/// Find the best loop top block for layout.
@@ -1784,17 +1787,17 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L,
if (!LoopBlockSet.count(*HeaderChain.begin()))
return L.getHeader();
- DEBUG(dbgs() << "Finding best loop top for: " << getBlockName(L.getHeader())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Finding best loop top for: "
+ << getBlockName(L.getHeader()) << "\n");
BlockFrequency BestPredFreq;
MachineBasicBlock *BestPred = nullptr;
for (MachineBasicBlock *Pred : L.getHeader()->predecessors()) {
if (!LoopBlockSet.count(Pred))
continue;
- DEBUG(dbgs() << " header pred: " << getBlockName(Pred) << ", has "
- << Pred->succ_size() << " successors, ";
- MBFI->printBlockFreq(dbgs(), Pred) << " freq\n");
+ LLVM_DEBUG(dbgs() << " header pred: " << getBlockName(Pred) << ", has "
+ << Pred->succ_size() << " successors, ";
+ MBFI->printBlockFreq(dbgs(), Pred) << " freq\n");
if (Pred->succ_size() > 1)
continue;
@@ -1809,7 +1812,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L,
// If no direct predecessor is fine, just use the loop header.
if (!BestPred) {
- DEBUG(dbgs() << " final top unchanged\n");
+ LLVM_DEBUG(dbgs() << " final top unchanged\n");
return L.getHeader();
}
@@ -1819,7 +1822,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L,
*BestPred->pred_begin() != L.getHeader())
BestPred = *BestPred->pred_begin();
- DEBUG(dbgs() << " final top: " << getBlockName(BestPred) << "\n");
+ LLVM_DEBUG(dbgs() << " final top: " << getBlockName(BestPred) << "\n");
return BestPred;
}
@@ -1851,8 +1854,8 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L,
// blocks where rotating to exit with that block will reach an outer loop.
SmallPtrSet<MachineBasicBlock *, 4> BlocksExitingToOuterLoop;
- DEBUG(dbgs() << "Finding best loop exit for: " << getBlockName(L.getHeader())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Finding best loop exit for: "
+ << getBlockName(L.getHeader()) << "\n");
for (MachineBasicBlock *MBB : L.getBlocks()) {
BlockChain &Chain = *BlockToChain[MBB];
// Ensure that this block is at the end of a chain; otherwise it could be
@@ -1875,15 +1878,15 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L,
BlockChain &SuccChain = *BlockToChain[Succ];
// Don't split chains, either this chain or the successor's chain.
if (&Chain == &SuccChain) {
- DEBUG(dbgs() << " exiting: " << getBlockName(MBB) << " -> "
- << getBlockName(Succ) << " (chain conflict)\n");
+ LLVM_DEBUG(dbgs() << " exiting: " << getBlockName(MBB) << " -> "
+ << getBlockName(Succ) << " (chain conflict)\n");
continue;
}
auto SuccProb = MBPI->getEdgeProbability(MBB, Succ);
if (LoopBlockSet.count(Succ)) {
- DEBUG(dbgs() << " looping: " << getBlockName(MBB) << " -> "
- << getBlockName(Succ) << " (" << SuccProb << ")\n");
+ LLVM_DEBUG(dbgs() << " looping: " << getBlockName(MBB) << " -> "
+ << getBlockName(Succ) << " (" << SuccProb << ")\n");
HasLoopingSucc = true;
continue;
}
@@ -1896,9 +1899,10 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L,
}
BlockFrequency ExitEdgeFreq = MBFI->getBlockFreq(MBB) * SuccProb;
- DEBUG(dbgs() << " exiting: " << getBlockName(MBB) << " -> "
- << getBlockName(Succ) << " [L:" << SuccLoopDepth << "] (";
- MBFI->printBlockFreq(dbgs(), ExitEdgeFreq) << ")\n");
+ LLVM_DEBUG(dbgs() << " exiting: " << getBlockName(MBB) << " -> "
+ << getBlockName(Succ) << " [L:" << SuccLoopDepth
+ << "] (";
+ MBFI->printBlockFreq(dbgs(), ExitEdgeFreq) << ")\n");
// Note that we bias this toward an existing layout successor to retain
// incoming order in the absence of better information. The exit must have
// a frequency higher than the current exit before we consider breaking
@@ -1922,11 +1926,12 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L,
// Without a candidate exiting block or with only a single block in the
// loop, just use the loop header to layout the loop.
if (!ExitingBB) {
- DEBUG(dbgs() << " No other candidate exit blocks, using loop header\n");
+ LLVM_DEBUG(
+ dbgs() << " No other candidate exit blocks, using loop header\n");
return nullptr;
}
if (L.getNumBlocks() == 1) {
- DEBUG(dbgs() << " Loop has 1 block, using loop header as exit\n");
+ LLVM_DEBUG(dbgs() << " Loop has 1 block, using loop header as exit\n");
return nullptr;
}
@@ -1937,7 +1942,8 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L,
!BlocksExitingToOuterLoop.count(ExitingBB))
return nullptr;
- DEBUG(dbgs() << " Best exiting block: " << getBlockName(ExitingBB) << "\n");
+ LLVM_DEBUG(dbgs() << " Best exiting block: " << getBlockName(ExitingBB)
+ << "\n");
return ExitingBB;
}
@@ -2014,8 +2020,8 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
return;
}
- DEBUG(dbgs() << "Rotating loop to put exit " << getBlockName(ExitingBB)
- << " at bottom\n");
+ LLVM_DEBUG(dbgs() << "Rotating loop to put exit " << getBlockName(ExitingBB)
+ << " at bottom\n");
std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end());
}
@@ -2150,8 +2156,9 @@ void MachineBlockPlacement::rotateLoopWithProfile(
}
}
- DEBUG(dbgs() << "The cost of loop rotation by making " << getBlockName(*Iter)
- << " to the top: " << Cost.getFrequency() << "\n");
+ LLVM_DEBUG(dbgs() << "The cost of loop rotation by making "
+ << getBlockName(*Iter)
+ << " to the top: " << Cost.getFrequency() << "\n");
if (Cost < SmallestRotationCost) {
SmallestRotationCost = Cost;
@@ -2160,8 +2167,8 @@ void MachineBlockPlacement::rotateLoopWithProfile(
}
if (RotationPos != LoopChain.end()) {
- DEBUG(dbgs() << "Rotate loop by making " << getBlockName(*RotationPos)
- << " to the top\n");
+ LLVM_DEBUG(dbgs() << "Rotate loop by making " << getBlockName(*RotationPos)
+ << " to the top\n");
std::rotate(LoopChain.begin(), RotationPos, LoopChain.end());
}
}
@@ -2265,7 +2272,7 @@ void MachineBlockPlacement::buildLoopChains(const MachineLoop &L) {
else
rotateLoop(LoopChain, PreferredLoopExit, LoopBlockSet);
- DEBUG({
+ LLVM_DEBUG({
// Crash at the end so we get all of the debugging output first.
bool BadLoop = false;
if (LoopChain.UnscheduledPredecessors) {
@@ -2324,9 +2331,9 @@ void MachineBlockPlacement::buildCFGChains() {
// Ensure that the layout successor is a viable block, as we know that
// fallthrough is a possibility.
assert(NextFI != FE && "Can't fallthrough past the last block.");
- DEBUG(dbgs() << "Pre-merging due to unanalyzable fallthrough: "
- << getBlockName(BB) << " -> " << getBlockName(NextBB)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Pre-merging due to unanalyzable fallthrough: "
+ << getBlockName(BB) << " -> " << getBlockName(NextBB)
+ << "\n");
Chain->merge(NextBB, nullptr);
#ifndef NDEBUG
BlocksWithUnanalyzableExits.insert(&*BB);
@@ -2356,7 +2363,7 @@ void MachineBlockPlacement::buildCFGChains() {
#ifndef NDEBUG
using FunctionBlockSetType = SmallPtrSet<MachineBasicBlock *, 16>;
#endif
- DEBUG({
+ LLVM_DEBUG({
// Crash at the end so we get all of the debugging output first.
bool BadFunc = false;
FunctionBlockSetType FunctionBlockSet;
@@ -2381,11 +2388,11 @@ void MachineBlockPlacement::buildCFGChains() {
// Splice the blocks into place.
MachineFunction::iterator InsertPos = F->begin();
- DEBUG(dbgs() << "[MBP] Function: "<< F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "[MBP] Function: " << F->getName() << "\n");
for (MachineBasicBlock *ChainBB : FunctionChain) {
- DEBUG(dbgs() << (ChainBB == *FunctionChain.begin() ? "Placing chain "
- : " ... ")
- << getBlockName(ChainBB) << "\n");
+ LLVM_DEBUG(dbgs() << (ChainBB == *FunctionChain.begin() ? "Placing chain "
+ : " ... ")
+ << getBlockName(ChainBB) << "\n");
if (InsertPos != MachineFunction::iterator(ChainBB))
F->splice(InsertPos, ChainBB);
else
@@ -2470,11 +2477,11 @@ void MachineBlockPlacement::optimizeBranches() {
MBPI->getEdgeProbability(ChainBB, FBB) >
MBPI->getEdgeProbability(ChainBB, TBB) &&
!TII->reverseBranchCondition(Cond)) {
- DEBUG(dbgs() << "Reverse order of the two branches: "
- << getBlockName(ChainBB) << "\n");
- DEBUG(dbgs() << " Edge probability: "
- << MBPI->getEdgeProbability(ChainBB, FBB) << " vs "
- << MBPI->getEdgeProbability(ChainBB, TBB) << "\n");
+ LLVM_DEBUG(dbgs() << "Reverse order of the two branches: "
+ << getBlockName(ChainBB) << "\n");
+ LLVM_DEBUG(dbgs() << " Edge probability: "
+ << MBPI->getEdgeProbability(ChainBB, FBB) << " vs "
+ << MBPI->getEdgeProbability(ChainBB, TBB) << "\n");
DebugLoc dl; // FIXME: this is nowhere
TII->removeBranch(*ChainBB);
TII->insertBranch(*ChainBB, FBB, TBB, Cond, dl);
@@ -2638,8 +2645,8 @@ bool MachineBlockPlacement::maybeTailDuplicateBlock(
if (!shouldTailDuplicate(BB))
return false;
- DEBUG(dbgs() << "Redoing tail duplication for Succ#"
- << BB->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Redoing tail duplication for Succ#" << BB->getNumber()
+ << "\n");
// This has to be a callback because none of it can be done after
// BB is deleted.
@@ -2687,8 +2694,8 @@ bool MachineBlockPlacement::maybeTailDuplicateBlock(
if (RemBB == PreferredLoopExit)
PreferredLoopExit = nullptr;
- DEBUG(dbgs() << "TailDuplicator deleted block: "
- << getBlockName(RemBB) << "\n");
+ LLVM_DEBUG(dbgs() << "TailDuplicator deleted block: "
+ << getBlockName(RemBB) << "\n");
};
auto RemovalCallbackRef =
function_ref<void(MachineBasicBlock*)>(RemovalCallback);
diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp
index 75a3d071bdc4..cca4aa86bb0d 100644
--- a/lib/CodeGen/MachineCSE.cpp
+++ b/lib/CodeGen/MachineCSE.cpp
@@ -178,8 +178,8 @@ bool MachineCSE::PerformTrivialCopyPropagation(MachineInstr *MI,
continue;
if (!MRI->constrainRegAttrs(SrcReg, Reg))
continue;
- DEBUG(dbgs() << "Coalescing: " << *DefMI);
- DEBUG(dbgs() << "*** to: " << *MI);
+ LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
+ LLVM_DEBUG(dbgs() << "*** to: " << *MI);
// Propagate SrcReg of copies to MI.
MO.setReg(SrcReg);
MRI->clearKillFlags(SrcReg);
@@ -455,13 +455,13 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
}
void MachineCSE::EnterScope(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
ScopeType *Scope = new ScopeType(VNT);
ScopeMap[MBB] = Scope;
}
void MachineCSE::ExitScope(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
assert(SI != ScopeMap.end());
delete SI->second;
@@ -545,8 +545,8 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// Found a common subexpression, eliminate it.
unsigned CSVN = VNT.lookup(MI);
MachineInstr *CSMI = Exps[CSVN];
- DEBUG(dbgs() << "Examining: " << *MI);
- DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI);
+ LLVM_DEBUG(dbgs() << "Examining: " << *MI);
+ LLVM_DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI);
// Check if it's profitable to perform this CSE.
bool DoCSE = true;
@@ -580,7 +580,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
"Do not CSE physical register defs!");
if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) {
- DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
+ LLVM_DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
DoCSE = false;
break;
}
@@ -589,7 +589,8 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// within the constraints (register class, bank, or low-level type) of
// the old instruction.
if (!MRI->constrainRegAttrs(NewReg, OldReg)) {
- DEBUG(dbgs() << "*** Not the same register constraints, avoid CSE!\n");
+ LLVM_DEBUG(
+ dbgs() << "*** Not the same register constraints, avoid CSE!\n");
DoCSE = false;
break;
}
diff --git a/lib/CodeGen/MachineCombiner.cpp b/lib/CodeGen/MachineCombiner.cpp
index 498b4cb836f6..0c6efff7bb40 100644
--- a/lib/CodeGen/MachineCombiner.cpp
+++ b/lib/CodeGen/MachineCombiner.cpp
@@ -308,8 +308,8 @@ bool MachineCombiner::improvesCriticalPathLen(
unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth;
- DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: "
- << NewRootDepth << "\tRootDepth: " << RootDepth);
+ LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: "
+ << NewRootDepth << "\tRootDepth: " << RootDepth);
// For a transform such as reassociation, the cost equation is
// conservatively calculated so that we must improve the depth (data
@@ -317,9 +317,10 @@ bool MachineCombiner::improvesCriticalPathLen(
// Being conservative also protects against inaccuracies in the underlying
// machine trace metrics and CPU models.
if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) {
- DEBUG(dbgs() << "\tIt MustReduceDepth ");
- DEBUG(NewRootDepth < RootDepth ? dbgs() << "\t and it does it\n"
- : dbgs() << "\t but it does NOT do it\n");
+ LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth ");
+ LLVM_DEBUG(NewRootDepth < RootDepth
+ ? dbgs() << "\t and it does it\n"
+ : dbgs() << "\t but it does NOT do it\n");
return NewRootDepth < RootDepth;
}
@@ -336,17 +337,17 @@ bool MachineCombiner::improvesCriticalPathLen(
unsigned NewCycleCount = NewRootDepth + NewRootLatency;
unsigned OldCycleCount =
RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0);
- DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency << "\tRootLatency: "
- << RootLatency << "\n\tRootSlack: " << RootSlack
- << " SlackIsAccurate=" << SlackIsAccurate
- << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount
- << "\n\tRootDepth + RootLatency + RootSlack = "
- << OldCycleCount;);
- DEBUG(NewCycleCount <= OldCycleCount
- ? dbgs() << "\n\t It IMPROVES PathLen because"
- : dbgs() << "\n\t It DOES NOT improve PathLen because");
- DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount
- << ", OldCycleCount = " << OldCycleCount << "\n");
+ LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency
+ << "\tRootLatency: " << RootLatency << "\n\tRootSlack: "
+ << RootSlack << " SlackIsAccurate=" << SlackIsAccurate
+ << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount
+ << "\n\tRootDepth + RootLatency + RootSlack = "
+ << OldCycleCount;);
+ LLVM_DEBUG(NewCycleCount <= OldCycleCount
+ ? dbgs() << "\n\t It IMPROVES PathLen because"
+ : dbgs() << "\n\t It DOES NOT improve PathLen because");
+ LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount
+ << ", OldCycleCount = " << OldCycleCount << "\n");
return NewCycleCount <= OldCycleCount;
}
@@ -392,10 +393,10 @@ bool MachineCombiner::preservesResourceLen(
unsigned ResLenAfterCombine =
BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
- DEBUG(dbgs() << "\t\tResource length before replacement: "
- << ResLenBeforeCombine << " and after: " << ResLenAfterCombine
- << "\n";);
- DEBUG(
+ LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: "
+ << ResLenBeforeCombine
+ << " and after: " << ResLenAfterCombine << "\n";);
+ LLVM_DEBUG(
ResLenAfterCombine <= ResLenBeforeCombine
? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n"
: dbgs() << "\t\t As result it DOES NOT improve/preserve Resource "
@@ -492,7 +493,7 @@ void MachineCombiner::verifyPatternOrder(
/// sequence is shorter.
bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
bool Changed = false;
- DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n");
bool IncrementalUpdate = false;
auto BlockIter = MBB->begin();
@@ -555,7 +556,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
if (!NewInstCount)
continue;
- DEBUG(if (dump_intrs) {
+ LLVM_DEBUG(if (dump_intrs) {
dbgs() << "\tFor the Pattern (" << (int)P << ") these instructions could be removed\n";
for (auto const *InstrPtr : DelInstrs) {
dbgs() << "\t\t" << STI->getSchedInfoStr(*InstrPtr) << ": ";
@@ -640,9 +641,11 @@ bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
MinInstr = nullptr;
OptSize = MF.getFunction().optForSize();
- DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
if (!TII->useMachineCombiner()) {
- DEBUG(dbgs() << " Skipping pass: Target does not support machine combiner\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Skipping pass: Target does not support machine combiner\n");
return false;
}
diff --git a/lib/CodeGen/MachineCopyPropagation.cpp b/lib/CodeGen/MachineCopyPropagation.cpp
index 8b6777f06dba..d3bdc772ae4c 100644
--- a/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/lib/CodeGen/MachineCopyPropagation.cpp
@@ -181,7 +181,8 @@ void MachineCopyPropagation::ReadRegister(unsigned Reg) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
Reg2MIMap::iterator CI = CopyMap.find(*AI);
if (CI != CopyMap.end()) {
- DEBUG(dbgs() << "MCP: Copy is used - not dead: "; CI->second->dump());
+ LLVM_DEBUG(dbgs() << "MCP: Copy is used - not dead: ";
+ CI->second->dump());
MaybeDeadCopies.remove(CI->second);
}
}
@@ -229,7 +230,7 @@ bool MachineCopyPropagation::eraseIfRedundant(MachineInstr &Copy, unsigned Src,
if (!isNopCopy(PrevCopy, Src, Def, TRI))
return false;
- DEBUG(dbgs() << "MCP: copy is a NOP, removing: "; Copy.dump());
+ LLVM_DEBUG(dbgs() << "MCP: copy is a NOP, removing: "; Copy.dump());
// Copy was redundantly redefining either Src or Def. Remove earlier kill
// flags between Copy and PrevCopy because the value will be reused now.
@@ -351,8 +352,9 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
// FIXME: Don't handle partial uses of wider COPYs yet.
if (MOUse.getReg() != CopyDstReg) {
- DEBUG(dbgs() << "MCP: FIXME! Not forwarding COPY to sub-register use:\n "
- << MI);
+ LLVM_DEBUG(
+ dbgs() << "MCP: FIXME! Not forwarding COPY to sub-register use:\n "
+ << MI);
continue;
}
@@ -367,20 +369,20 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
continue;
if (!DebugCounter::shouldExecute(FwdCounter)) {
- DEBUG(dbgs() << "MCP: Skipping forwarding due to debug counter:\n "
- << MI);
+ LLVM_DEBUG(dbgs() << "MCP: Skipping forwarding due to debug counter:\n "
+ << MI);
continue;
}
- DEBUG(dbgs() << "MCP: Replacing " << printReg(MOUse.getReg(), TRI)
- << "\n with " << printReg(CopySrcReg, TRI) << "\n in "
- << MI << " from " << Copy);
+ LLVM_DEBUG(dbgs() << "MCP: Replacing " << printReg(MOUse.getReg(), TRI)
+ << "\n with " << printReg(CopySrcReg, TRI)
+ << "\n in " << MI << " from " << Copy);
MOUse.setReg(CopySrcReg);
if (!CopySrc.isRenamable())
MOUse.setIsRenamable(false);
- DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
+ LLVM_DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
// Clear kill markers that may have been invalidated.
for (MachineInstr &KMI :
@@ -393,7 +395,7 @@ void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
}
void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
- DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ) {
MachineInstr *MI = &*I;
@@ -444,7 +446,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
ReadRegister(Reg);
}
- DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
+ LLVM_DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
// Copy is now a candidate for deletion.
if (!MRI->isReserved(Def))
@@ -536,8 +538,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
continue;
}
- DEBUG(dbgs() << "MCP: Removing copy due to regmask clobbering: ";
- MaybeDead->dump());
+ LLVM_DEBUG(dbgs() << "MCP: Removing copy due to regmask clobbering: ";
+ MaybeDead->dump());
// erase() will return the next valid iterator pointing to the next
// element after the erased one.
@@ -569,8 +571,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// since we don't want to trust live-in lists.
if (MBB.succ_empty()) {
for (MachineInstr *MaybeDead : MaybeDeadCopies) {
- DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
- MaybeDead->dump());
+ LLVM_DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
+ MaybeDead->dump());
assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg()));
MaybeDead->eraseFromParent();
Changed = true;
diff --git a/lib/CodeGen/MachineFrameInfo.cpp b/lib/CodeGen/MachineFrameInfo.cpp
index d0c737b261d1..0b316871dbdf 100644
--- a/lib/CodeGen/MachineFrameInfo.cpp
+++ b/lib/CodeGen/MachineFrameInfo.cpp
@@ -41,9 +41,9 @@ static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
unsigned StackAlign) {
if (!ShouldClamp || Align <= StackAlign)
return Align;
- DEBUG(dbgs() << "Warning: requested alignment " << Align
- << " exceeds the stack alignment " << StackAlign
- << " when stack realignment is off" << '\n');
+ LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Align
+ << " exceeds the stack alignment " << StackAlign
+ << " when stack realignment is off" << '\n');
return StackAlign;
}
diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp
index 0ad3ef18e557..e2d92009f217 100644
--- a/lib/CodeGen/MachineLICM.cpp
+++ b/lib/CodeGen/MachineLICM.cpp
@@ -319,10 +319,10 @@ bool MachineLICMBase::runOnMachineFunction(MachineFunction &MF) {
PreRegAlloc = MRI->isSSA();
if (PreRegAlloc)
- DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
+ LLVM_DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
else
- DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
- DEBUG(dbgs() << MF.getName() << " ********\n");
+ LLVM_DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
+ LLVM_DEBUG(dbgs() << MF.getName() << " ********\n");
if (PreRegAlloc) {
// Estimate register pressure during pre-regalloc pass.
@@ -591,8 +591,9 @@ void MachineLICMBase::HoistPostRA(MachineInstr *MI, unsigned Def) {
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
- DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader) << " from "
- << printMBBReference(*MI->getParent()) << ": " << *MI);
+ LLVM_DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader)
+ << " from " << printMBBReference(*MI->getParent()) << ": "
+ << *MI);
// Splice the instruction to the preheader.
MachineBasicBlock *MBB = MI->getParent();
@@ -629,14 +630,14 @@ bool MachineLICMBase::IsGuaranteedToExecute(MachineBasicBlock *BB) {
}
void MachineLICMBase::EnterScope(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n');
+ LLVM_DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n');
// Remember livein register pressure.
BackTrace.push_back(RegPressure);
}
void MachineLICMBase::ExitScope(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Exiting " << printMBBReference(*MBB) << '\n');
+ LLVM_DEBUG(dbgs() << "Exiting " << printMBBReference(*MBB) << '\n');
BackTrace.pop_back();
}
@@ -1208,7 +1209,7 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
// Don't hoist a cheap instruction if it would create a copy in the loop.
if (CheapInstr && CreatesCopy) {
- DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
+ LLVM_DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
return false;
}
@@ -1227,7 +1228,7 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
- DEBUG(dbgs() << "Hoist High Latency: " << MI);
+ LLVM_DEBUG(dbgs() << "Hoist High Latency: " << MI);
++NumHighLatency;
return true;
}
@@ -1245,14 +1246,14 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
// Visit BBs from header to current BB, if hoisting this doesn't cause
// high register pressure, then it's safe to proceed.
if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
- DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
+ LLVM_DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
++NumLowRP;
return true;
}
// Don't risk increasing register pressure if it would create copies.
if (CreatesCopy) {
- DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
+ LLVM_DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
return false;
}
@@ -1261,7 +1262,7 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
// conservative.
if (AvoidSpeculation &&
(!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) {
- DEBUG(dbgs() << "Won't speculate: " << MI);
+ LLVM_DEBUG(dbgs() << "Won't speculate: " << MI);
return false;
}
@@ -1269,7 +1270,7 @@ bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
// to be remat'ed.
if (!TII->isTriviallyReMaterializable(MI, AA) &&
!MI.isDereferenceableInvariantLoad(AA)) {
- DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
+ LLVM_DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
return false;
}
@@ -1366,7 +1367,7 @@ bool MachineLICMBase::EliminateCSE(MachineInstr *MI,
return false;
if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
- DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
+ LLVM_DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
// Replace virtual registers defined by MI by their counterparts defined
// by Dup.
@@ -1446,14 +1447,14 @@ bool MachineLICMBase::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
- DEBUG({
- dbgs() << "Hoisting " << *MI;
- if (MI->getParent()->getBasicBlock())
- dbgs() << " from " << printMBBReference(*MI->getParent());
- if (Preheader->getBasicBlock())
- dbgs() << " to " << printMBBReference(*Preheader);
- dbgs() << "\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "Hoisting " << *MI;
+ if (MI->getParent()->getBasicBlock())
+ dbgs() << " from " << printMBBReference(*MI->getParent());
+ if (Preheader->getBasicBlock())
+ dbgs() << " to " << printMBBReference(*Preheader);
+ dbgs() << "\n";
+ });
// If this is the first instruction being hoisted to the preheader,
// initialize the CSE map with potential common expressions.
diff --git a/lib/CodeGen/MachineOutliner.cpp b/lib/CodeGen/MachineOutliner.cpp
index e9000fffdb58..a524fca972ea 100644
--- a/lib/CodeGen/MachineOutliner.cpp
+++ b/lib/CodeGen/MachineOutliner.cpp
@@ -1112,11 +1112,11 @@ void MachineOutliner::prune(Candidate &C,
// Remove C from the CandidateList.
C.InCandidateList = false;
- DEBUG(dbgs() << "- Removed a Candidate \n";
- dbgs() << "--- Num fns left for candidate: " << F.getOccurrenceCount()
- << "\n";
- dbgs() << "--- Candidate's functions's benefit: " << F.getBenefit()
- << "\n";);
+ LLVM_DEBUG(dbgs() << "- Removed a Candidate \n";
+ dbgs() << "--- Num fns left for candidate: "
+ << F.getOccurrenceCount() << "\n";
+ dbgs() << "--- Candidate's functions's benefit: " << F.getBenefit()
+ << "\n";);
}
void MachineOutliner::pruneOverlaps(
@@ -1441,7 +1441,7 @@ bool MachineOutliner::outline(
NumOutlined++;
}
- DEBUG(dbgs() << "OutlinedSomething = " << OutlinedSomething << "\n";);
+ LLVM_DEBUG(dbgs() << "OutlinedSomething = " << OutlinedSomething << "\n";);
return OutlinedSomething;
}
@@ -1461,8 +1461,9 @@ bool MachineOutliner::runOnModule(Module &M) {
// Does the target implement the MachineOutliner? If it doesn't, quit here.
if (!TII->useMachineOutliner()) {
// No. So we're done.
- DEBUG(dbgs()
- << "Skipping pass: Target does not support the MachineOutliner.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Skipping pass: Target does not support the MachineOutliner.\n");
return false;
}
diff --git a/lib/CodeGen/MachinePipeliner.cpp b/lib/CodeGen/MachinePipeliner.cpp
index da2e67832bdb..ef96cec4f978 100644
--- a/lib/CodeGen/MachinePipeliner.cpp
+++ b/lib/CodeGen/MachinePipeliner.cpp
@@ -886,7 +886,7 @@ void SwingSchedulerDAG::schedule() {
Topo.InitDAGTopologicalSorting();
postprocessDAG();
changeDependences();
- DEBUG({
+ LLVM_DEBUG({
for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this);
});
@@ -906,8 +906,8 @@ void SwingSchedulerDAG::schedule() {
RecMII = 0;
MII = std::max(ResMII, RecMII);
- DEBUG(dbgs() << "MII = " << MII << " (rec=" << RecMII << ", res=" << ResMII
- << ")\n");
+ LLVM_DEBUG(dbgs() << "MII = " << MII << " (rec=" << RecMII
+ << ", res=" << ResMII << ")\n");
// Can't schedule a loop without a valid MII.
if (MII == 0)
@@ -925,7 +925,7 @@ void SwingSchedulerDAG::schedule() {
checkNodeSets(NodeSets);
- DEBUG({
+ LLVM_DEBUG({
for (auto &I : NodeSets) {
dbgs() << " Rec NodeSet ";
I.dump();
@@ -938,7 +938,7 @@ void SwingSchedulerDAG::schedule() {
removeDuplicateNodes(NodeSets);
- DEBUG({
+ LLVM_DEBUG({
for (auto &I : NodeSets) {
dbgs() << " NodeSet ";
I.dump();
@@ -1634,7 +1634,7 @@ static bool ignoreDependence(const SDep &D, bool isPred) {
void SwingSchedulerDAG::computeNodeFunctions(NodeSetType &NodeSets) {
ScheduleInfo.resize(SUnits.size());
- DEBUG({
+ LLVM_DEBUG({
for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(),
E = Topo.end();
I != E; ++I) {
@@ -1696,7 +1696,7 @@ void SwingSchedulerDAG::computeNodeFunctions(NodeSetType &NodeSets) {
for (NodeSet &I : NodeSets)
I.computeNodeSetInfo(this);
- DEBUG({
+ LLVM_DEBUG({
for (unsigned i = 0; i < SUnits.size(); i++) {
dbgs() << "\tNode " << i << ":\n";
dbgs() << "\t ASAP = " << getASAP(&SUnits[i]) << "\n";
@@ -1883,9 +1883,10 @@ void SwingSchedulerDAG::registerPressureFilter(NodeSetType &NodeSets) {
CriticalPSets,
RecRegPressure.MaxSetPressure);
if (RPDelta.Excess.isValid()) {
- DEBUG(dbgs() << "Excess register pressure: SU(" << SU->NodeNum << ") "
- << TRI->getRegPressureSetName(RPDelta.Excess.getPSet())
- << ":" << RPDelta.Excess.getUnitInc());
+ LLVM_DEBUG(
+ dbgs() << "Excess register pressure: SU(" << SU->NodeNum << ") "
+ << TRI->getRegPressureSetName(RPDelta.Excess.getPSet())
+ << ":" << RPDelta.Excess.getUnitInc());
NS.setExceedPressure(SU);
break;
}
@@ -1936,7 +1937,7 @@ void SwingSchedulerDAG::checkNodeSets(NodeSetType &NodeSets) {
return;
}
NodeSets.clear();
- DEBUG(dbgs() << "Clear recurrence node-sets\n");
+ LLVM_DEBUG(dbgs() << "Clear recurrence node-sets\n");
return;
}
@@ -2082,28 +2083,28 @@ void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
NodeOrder.clear();
for (auto &Nodes : NodeSets) {
- DEBUG(dbgs() << "NodeSet size " << Nodes.size() << "\n");
+ LLVM_DEBUG(dbgs() << "NodeSet size " << Nodes.size() << "\n");
OrderKind Order;
SmallSetVector<SUnit *, 8> N;
if (pred_L(NodeOrder, N) && isSubset(N, Nodes)) {
R.insert(N.begin(), N.end());
Order = BottomUp;
- DEBUG(dbgs() << " Bottom up (preds) ");
+ LLVM_DEBUG(dbgs() << " Bottom up (preds) ");
} else if (succ_L(NodeOrder, N) && isSubset(N, Nodes)) {
R.insert(N.begin(), N.end());
Order = TopDown;
- DEBUG(dbgs() << " Top down (succs) ");
+ LLVM_DEBUG(dbgs() << " Top down (succs) ");
} else if (isIntersect(N, Nodes, R)) {
// If some of the successors are in the existing node-set, then use the
// top-down ordering.
Order = TopDown;
- DEBUG(dbgs() << " Top down (intersect) ");
+ LLVM_DEBUG(dbgs() << " Top down (intersect) ");
} else if (NodeSets.size() == 1) {
for (auto &N : Nodes)
if (N->Succs.size() == 0)
R.insert(N);
Order = BottomUp;
- DEBUG(dbgs() << " Bottom up (all) ");
+ LLVM_DEBUG(dbgs() << " Bottom up (all) ");
} else {
// Find the node with the highest ASAP.
SUnit *maxASAP = nullptr;
@@ -2114,7 +2115,7 @@ void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
}
R.insert(maxASAP);
Order = BottomUp;
- DEBUG(dbgs() << " Bottom up (default) ");
+ LLVM_DEBUG(dbgs() << " Bottom up (default) ");
}
while (!R.empty()) {
@@ -2137,7 +2138,7 @@ void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
maxHeight = I;
}
NodeOrder.insert(maxHeight);
- DEBUG(dbgs() << maxHeight->NodeNum << " ");
+ LLVM_DEBUG(dbgs() << maxHeight->NodeNum << " ");
R.remove(maxHeight);
for (const auto &I : maxHeight->Succs) {
if (Nodes.count(I.getSUnit()) == 0)
@@ -2160,7 +2161,7 @@ void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
}
}
Order = BottomUp;
- DEBUG(dbgs() << "\n Switching order to bottom up ");
+ LLVM_DEBUG(dbgs() << "\n Switching order to bottom up ");
SmallSetVector<SUnit *, 8> N;
if (pred_L(NodeOrder, N, &Nodes))
R.insert(N.begin(), N.end());
@@ -2182,7 +2183,7 @@ void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
maxDepth = I;
}
NodeOrder.insert(maxDepth);
- DEBUG(dbgs() << maxDepth->NodeNum << " ");
+ LLVM_DEBUG(dbgs() << maxDepth->NodeNum << " ");
R.remove(maxDepth);
if (Nodes.isExceedSU(maxDepth)) {
Order = TopDown;
@@ -2209,16 +2210,16 @@ void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
}
}
Order = TopDown;
- DEBUG(dbgs() << "\n Switching order to top down ");
+ LLVM_DEBUG(dbgs() << "\n Switching order to top down ");
SmallSetVector<SUnit *, 8> N;
if (succ_L(NodeOrder, N, &Nodes))
R.insert(N.begin(), N.end());
}
}
- DEBUG(dbgs() << "\nDone with Nodeset\n");
+ LLVM_DEBUG(dbgs() << "\nDone with Nodeset\n");
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Node order: ";
for (SUnit *I : NodeOrder)
dbgs() << " " << I->NodeNum << " ";
@@ -2237,7 +2238,7 @@ bool SwingSchedulerDAG::schedulePipeline(SMSchedule &Schedule) {
for (unsigned II = MII; II < MII + 10 && !scheduleFound; ++II) {
Schedule.reset();
Schedule.setInitiationInterval(II);
- DEBUG(dbgs() << "Try to schedule with " << II << "\n");
+ LLVM_DEBUG(dbgs() << "Try to schedule with " << II << "\n");
SetVector<SUnit *>::iterator NI = NodeOrder.begin();
SetVector<SUnit *>::iterator NE = NodeOrder.end();
@@ -2254,12 +2255,12 @@ bool SwingSchedulerDAG::schedulePipeline(SMSchedule &Schedule) {
int SchedStart = INT_MIN;
Schedule.computeStart(SU, &EarlyStart, &LateStart, &SchedEnd, &SchedStart,
II, this);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Inst (" << SU->NodeNum << ") ";
SU->getInstr()->dump();
dbgs() << "\n";
});
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\tes: " << EarlyStart << " ls: " << LateStart
<< " me: " << SchedEnd << " ms: " << SchedStart << "\n";
});
@@ -2295,7 +2296,7 @@ bool SwingSchedulerDAG::schedulePipeline(SMSchedule &Schedule) {
Schedule.getMaxStageCount() > (unsigned)SwpMaxStages)
scheduleFound = false;
- DEBUG({
+ LLVM_DEBUG({
if (!scheduleFound)
dbgs() << "\tCan't schedule\n";
});
@@ -2306,7 +2307,7 @@ bool SwingSchedulerDAG::schedulePipeline(SMSchedule &Schedule) {
scheduleFound = Schedule.isValidSchedule(this);
}
- DEBUG(dbgs() << "Schedule Found? " << scheduleFound << "\n");
+ LLVM_DEBUG(dbgs() << "Schedule Found? " << scheduleFound << "\n");
if (scheduleFound)
Schedule.finalizeSchedule(this);
@@ -2376,7 +2377,7 @@ void SwingSchedulerDAG::generatePipelinedLoop(SMSchedule &Schedule) {
generatePhis(KernelBB, PrologBBs.back(), KernelBB, KernelBB, Schedule, VRMap,
InstrMap, MaxStageCount, MaxStageCount, false);
- DEBUG(dbgs() << "New block\n"; KernelBB->dump(););
+ LLVM_DEBUG(dbgs() << "New block\n"; KernelBB->dump(););
SmallVector<MachineBasicBlock *, 4> EpilogBBs;
// Generate the epilog instructions to complete the pipeline.
@@ -2445,7 +2446,7 @@ void SwingSchedulerDAG::generateProlog(SMSchedule &Schedule, unsigned LastStage,
}
}
rewritePhiValues(NewBB, i, Schedule, VRMap, InstrMap);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "prolog:\n";
NewBB->dump();
});
@@ -2527,7 +2528,7 @@ void SwingSchedulerDAG::generateEpilog(SMSchedule &Schedule, unsigned LastStage,
InstrMap, LastStage, EpilogStage, i == 1);
PredBB = NewBB;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "epilog:\n";
NewBB->dump();
});
@@ -3625,7 +3626,7 @@ bool SMSchedule::insert(SUnit *SU, int StartCycle, int EndCycle, int II) {
}
if (ST.getInstrInfo()->isZeroCost(SU->getInstr()->getOpcode()) ||
Resources->canReserveResources(*SU->getInstr())) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\tinsert at cycle " << curCycle << " ";
SU->getInstr()->dump();
});
@@ -3638,7 +3639,7 @@ bool SMSchedule::insert(SUnit *SU, int StartCycle, int EndCycle, int II) {
FirstCycle = curCycle;
return true;
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\tfailed to insert at cycle " << curCycle << " ";
SU->getInstr()->dump();
});
@@ -4034,18 +4035,19 @@ void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const {
Circuits.begin(), Circuits.end(),
[SU](const NodeSet &Circuit) { return Circuit.count(SU); });
if (InCircuit)
- DEBUG(dbgs() << "In a circuit, predecessor ";);
+ LLVM_DEBUG(dbgs() << "In a circuit, predecessor ";);
else {
Valid = false;
NumNodeOrderIssues++;
- DEBUG(dbgs() << "Predecessor ";);
+ LLVM_DEBUG(dbgs() << "Predecessor ";);
}
- DEBUG(dbgs() << Pred->NodeNum << " and successor " << Succ->NodeNum
- << " are scheduled before node " << SU->NodeNum << "\n";);
+ LLVM_DEBUG(dbgs() << Pred->NodeNum << " and successor " << Succ->NodeNum
+ << " are scheduled before node " << SU->NodeNum
+ << "\n";);
}
}
- DEBUG({
+ LLVM_DEBUG({
if (!Valid)
dbgs() << "Invalid node order found!\n";
});
@@ -4188,7 +4190,7 @@ void SMSchedule::finalizeSchedule(SwingSchedulerDAG *SSD) {
SSD->fixupRegisterOverlaps(cycleInstrs);
}
- DEBUG(dump(););
+ LLVM_DEBUG(dump(););
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/lib/CodeGen/MachineRegionInfo.cpp b/lib/CodeGen/MachineRegionInfo.cpp
index 88fb7e1dde38..2619d8f78276 100644
--- a/lib/CodeGen/MachineRegionInfo.cpp
+++ b/lib/CodeGen/MachineRegionInfo.cpp
@@ -90,7 +90,7 @@ bool MachineRegionInfoPass::runOnMachineFunction(MachineFunction &F) {
RI.recalculate(F, DT, PDT, DF);
- DEBUG(RI.dump());
+ LLVM_DEBUG(RI.dump());
return false;
}
diff --git a/lib/CodeGen/MachineSSAUpdater.cpp b/lib/CodeGen/MachineSSAUpdater.cpp
index 36844e9fb30a..773661965f18 100644
--- a/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/lib/CodeGen/MachineSSAUpdater.cpp
@@ -204,7 +204,7 @@ unsigned MachineSSAUpdater::GetValueInMiddleOfBlock(MachineBasicBlock *BB) {
// If the client wants to know about all new instructions, tell it.
if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);
- DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n");
+ LLVM_DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n");
return InsertedPHI->getOperand(0).getReg();
}
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index cee87be92495..c2c0d347f5eb 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -361,7 +361,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
} else if (!mf.getSubtarget().enableMachineScheduler())
return false;
- DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
// Initialize the context of the pass.
MF = &mf;
@@ -373,7 +373,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
LIS = &getAnalysis<LiveIntervals>();
if (VerifyScheduling) {
- DEBUG(LIS->dump());
+ LLVM_DEBUG(LIS->dump());
MF->verify(this, "Before machine scheduling.");
}
RegClassInfo->runOnMachineFunction(*MF);
@@ -383,7 +383,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
scheduleRegions(*Scheduler, false);
- DEBUG(LIS->dump());
+ LLVM_DEBUG(LIS->dump());
if (VerifyScheduling)
MF->verify(this, "After machine scheduling.");
return true;
@@ -397,10 +397,10 @@ bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (!EnablePostRAMachineSched)
return false;
} else if (!mf.getSubtarget().enablePostRAScheduler()) {
- DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
+ LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
return false;
}
- DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
// Initialize the context of the pass.
MF = &mf;
@@ -548,12 +548,13 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
Scheduler.exitRegion();
continue;
}
- DEBUG(dbgs() << "********** MI Scheduling **********\n");
- DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) << " "
- << MBB->getName() << "\n From: " << *I << " To: ";
- if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
- else dbgs() << "End";
- dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
+ LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB)
+ << " " << MBB->getName() << "\n From: " << *I
+ << " To: ";
+ if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
+ else dbgs() << "End";
+ dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
if (DumpCriticalPathLength) {
errs() << MF->getName();
errs() << ":%bb. " << MBB->getNumber();
@@ -750,8 +751,8 @@ bool ScheduleDAGMI::checkSchedLimit() {
/// does not consider liveness or register pressure. It is useful for PostRA
/// scheduling and potentially other custom schedulers.
void ScheduleDAGMI::schedule() {
- DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
- DEBUG(SchedImpl->dumpPolicy());
+ LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
+ LLVM_DEBUG(SchedImpl->dumpPolicy());
// Build the DAG.
buildSchedGraph(AA);
@@ -763,14 +764,10 @@ void ScheduleDAGMI::schedule() {
SmallVector<SUnit*, 8> TopRoots, BotRoots;
findRootsAndBiasEdges(TopRoots, BotRoots);
- DEBUG(
- if (EntrySU.getInstr() != nullptr)
- EntrySU.dumpAll(this);
- for (const SUnit &SU : SUnits)
- SU.dumpAll(this);
- if (ExitSU.getInstr() != nullptr)
- ExitSU.dumpAll(this);
- );
+ LLVM_DEBUG(if (EntrySU.getInstr() != nullptr) EntrySU.dumpAll(this);
+ for (const SUnit &SU
+ : SUnits) SU.dumpAll(this);
+ if (ExitSU.getInstr() != nullptr) ExitSU.dumpAll(this););
if (ViewMISchedDAGs) viewGraph();
// Initialize the strategy before modifying the DAG.
@@ -782,7 +779,7 @@ void ScheduleDAGMI::schedule() {
bool IsTopNode = false;
while (true) {
- DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
+ LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
SUnit *SU = SchedImpl->pickNode(IsTopNode);
if (!SU) break;
@@ -822,7 +819,7 @@ void ScheduleDAGMI::schedule() {
placeDebugValues();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "*** Final schedule for "
<< printMBBReference(*begin()->getParent()) << " ***\n";
dumpSchedule();
@@ -1017,7 +1014,7 @@ void ScheduleDAGMILive::initRegPressure() {
// Close the RPTracker to finalize live ins.
RPTracker.closeRegion();
- DEBUG(RPTracker.dump());
+ LLVM_DEBUG(RPTracker.dump());
// Initialize the live ins and live outs.
TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
@@ -1032,8 +1029,8 @@ void ScheduleDAGMILive::initRegPressure() {
BotRPTracker.initLiveThru(RPTracker);
if (!BotRPTracker.getLiveThru().empty()) {
TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
- DEBUG(dbgs() << "Live Thru: ";
- dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
+ LLVM_DEBUG(dbgs() << "Live Thru: ";
+ dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
};
// For each live out vreg reduce the pressure change associated with other
@@ -1047,12 +1044,10 @@ void ScheduleDAGMILive::initRegPressure() {
updatePressureDiffs(LiveUses);
}
- DEBUG(
- dbgs() << "Top Pressure:\n";
- dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
- dbgs() << "Bottom Pressure:\n";
- dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
- );
+ LLVM_DEBUG(dbgs() << "Top Pressure:\n";
+ dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
+ dbgs() << "Bottom Pressure:\n";
+ dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI););
assert((BotRPTracker.getPos() == RegionEnd ||
(RegionEnd->isDebugInstr() &&
@@ -1067,17 +1062,16 @@ void ScheduleDAGMILive::initRegPressure() {
for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
if (RegionPressure[i] > Limit) {
- DEBUG(dbgs() << TRI->getRegPressureSetName(i)
- << " Limit " << Limit
- << " Actual " << RegionPressure[i] << "\n");
+ LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit
+ << " Actual " << RegionPressure[i] << "\n");
RegionCriticalPSets.push_back(PressureChange(i));
}
}
- DEBUG(dbgs() << "Excess PSets: ";
- for (const PressureChange &RCPS : RegionCriticalPSets)
- dbgs() << TRI->getRegPressureSetName(
- RCPS.getPSet()) << " ";
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Excess PSets: ";
+ for (const PressureChange &RCPS
+ : RegionCriticalPSets) dbgs()
+ << TRI->getRegPressureSetName(RCPS.getPSet()) << " ";
+ dbgs() << "\n");
}
void ScheduleDAGMILive::
@@ -1098,10 +1092,11 @@ updateScheduledPressure(const SUnit *SU,
}
unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
if (NewMaxPressure[ID] >= Limit - 2) {
- DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
- << NewMaxPressure[ID]
- << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
- << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
+ LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
+ << NewMaxPressure[ID]
+ << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ")
+ << Limit << "(+ " << BotRPTracker.getLiveThru()[ID]
+ << " livethru)\n");
}
}
}
@@ -1131,17 +1126,14 @@ void ScheduleDAGMILive::updatePressureDiffs(
PressureDiff &PDiff = getPressureDiff(&SU);
PDiff.addPressureChange(Reg, Decrement, &MRI);
- DEBUG(
- dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
- << printReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
- << ' ' << *SU.getInstr();
- dbgs() << " to ";
- PDiff.dump(*TRI);
- );
+ LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
+ << printReg(Reg, TRI) << ':'
+ << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr();
+ dbgs() << " to "; PDiff.dump(*TRI););
}
} else {
assert(P.LaneMask.any());
- DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n");
// This may be called before CurrentBottom has been initialized. However,
// BotRPTracker must have a valid position. We want the value live into the
// instruction or live out of the block, so ask for the previous
@@ -1169,12 +1161,9 @@ void ScheduleDAGMILive::updatePressureDiffs(
if (LRQ.valueIn() == VNI) {
PressureDiff &PDiff = getPressureDiff(SU);
PDiff.addPressureChange(Reg, true, &MRI);
- DEBUG(
- dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
- << *SU->getInstr();
- dbgs() << " to ";
- PDiff.dump(*TRI);
- );
+ LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
+ << *SU->getInstr();
+ dbgs() << " to "; PDiff.dump(*TRI););
}
}
}
@@ -1193,8 +1182,8 @@ void ScheduleDAGMILive::updatePressureDiffs(
/// ScheduleDAGMILive then it will want to override this virtual method in order
/// to update any specialized state.
void ScheduleDAGMILive::schedule() {
- DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
- DEBUG(SchedImpl->dumpPolicy());
+ LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
+ LLVM_DEBUG(SchedImpl->dumpPolicy());
buildDAGWithRegPressure();
Topo.InitDAGTopologicalSorting();
@@ -1208,26 +1197,22 @@ void ScheduleDAGMILive::schedule() {
// This may initialize a DFSResult to be used for queue priority.
SchedImpl->initialize(this);
- DEBUG(
- if (EntrySU.getInstr() != nullptr)
- EntrySU.dumpAll(this);
- for (const SUnit &SU : SUnits) {
- SU.dumpAll(this);
- if (ShouldTrackPressure) {
- dbgs() << " Pressure Diff : ";
- getPressureDiff(&SU).dump(*TRI);
- }
- dbgs() << " Single Issue : ";
- if (SchedModel.mustBeginGroup(SU.getInstr()) &&
- SchedModel.mustEndGroup(SU.getInstr()))
- dbgs() << "true;";
- else
- dbgs() << "false;";
- dbgs() << '\n';
- }
- if (ExitSU.getInstr() != nullptr)
- ExitSU.dumpAll(this);
- );
+ LLVM_DEBUG(if (EntrySU.getInstr() != nullptr) EntrySU.dumpAll(this);
+ for (const SUnit &SU
+ : SUnits) {
+ SU.dumpAll(this);
+ if (ShouldTrackPressure) {
+ dbgs() << " Pressure Diff : ";
+ getPressureDiff(&SU).dump(*TRI);
+ }
+ dbgs() << " Single Issue : ";
+ if (SchedModel.mustBeginGroup(SU.getInstr()) &&
+ SchedModel.mustEndGroup(SU.getInstr()))
+ dbgs() << "true;";
+ else
+ dbgs() << "false;";
+ dbgs() << '\n';
+ } if (ExitSU.getInstr() != nullptr) ExitSU.dumpAll(this););
if (ViewMISchedDAGs) viewGraph();
// Initialize ready queues now that the DAG and priority data are finalized.
@@ -1235,7 +1220,7 @@ void ScheduleDAGMILive::schedule() {
bool IsTopNode = false;
while (true) {
- DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
+ LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
SUnit *SU = SchedImpl->pickNode(IsTopNode);
if (!SU) break;
@@ -1263,7 +1248,7 @@ void ScheduleDAGMILive::schedule() {
placeDebugValues();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "*** Final schedule for "
<< printMBBReference(*begin()->getParent()) << " ***\n";
dumpSchedule();
@@ -1380,13 +1365,13 @@ unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
} else
CyclicLatency = 0;
- DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
- << SU->NodeNum << ") = " << CyclicLatency << "c\n");
+ LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
+ << SU->NodeNum << ") = " << CyclicLatency << "c\n");
if (CyclicLatency > MaxCyclicLatency)
MaxCyclicLatency = CyclicLatency;
}
}
- DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
+ LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
return MaxCyclicLatency;
}
@@ -1430,10 +1415,8 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
TopRPTracker.advance(RegOpers);
assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
- DEBUG(
- dbgs() << "Top Pressure:\n";
- dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
- );
+ LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
+ TopRPTracker.getRegSetPressureAtPos(), TRI););
updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
}
@@ -1469,10 +1452,8 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
SmallVector<RegisterMaskPair, 8> LiveUses;
BotRPTracker.recede(RegOpers, &LiveUses);
assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
- DEBUG(
- dbgs() << "Bottom Pressure:\n";
- dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
- );
+ LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
+ BotRPTracker.getRegSetPressureAtPos(), TRI););
updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
updatePressureDiffs(LiveUses);
@@ -1572,8 +1553,8 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
*SUb->getInstr(), MemOpRecords[Idx+1].BaseReg,
ClusterLength) &&
DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
- DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
- << SUb->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
+ << SUb->NodeNum << ")\n");
// Copy successor edges from SUa to SUb. Interleaving computation
// dependent on SUa can prevent load combining due to register reuse.
// Predecessor edges do not need to be copied from SUb to SUa since nearby
@@ -1581,7 +1562,8 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
for (const SDep &Succ : SUa->Succs) {
if (Succ.getSUnit() == SUb)
continue;
- DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum
+ << ")\n");
DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial));
}
++ClusterLength;
@@ -1790,18 +1772,18 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
return;
GlobalUses.push_back(Pred.getSUnit());
}
- DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
// Add the weak edges.
for (SmallVectorImpl<SUnit*>::const_iterator
I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
- DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
- << GlobalSU->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
+ << GlobalSU->NodeNum << ")\n");
DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
}
for (SmallVectorImpl<SUnit*>::const_iterator
I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
- DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
- << FirstLocalSU->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
+ << FirstLocalSU->NodeNum << ")\n");
DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
}
}
@@ -1959,16 +1941,16 @@ bool SchedBoundary::checkHazard(SUnit *SU) {
unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
- DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
- << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
+ LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
+ << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
return true;
}
if (CurrMOps > 0 &&
((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) ||
(!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) {
- DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must "
- << (isTop()? "begin" : "end") << " group\n");
+ LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must "
+ << (isTop() ? "begin" : "end") << " group\n");
return true;
}
@@ -1984,9 +1966,9 @@ bool SchedBoundary::checkHazard(SUnit *SU) {
#ifndef NDEBUG
MaxObservedStall = std::max(Cycles, MaxObservedStall);
#endif
- DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
- << SchedModel->getResourceName(ResIdx)
- << "=" << NRCycle << "c\n");
+ LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
+ << SchedModel->getResourceName(ResIdx) << "="
+ << NRCycle << "c\n");
return true;
}
}
@@ -2007,8 +1989,8 @@ findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
}
}
if (LateSU) {
- DEBUG(dbgs() << Available.getName() << " RemLatency SU("
- << LateSU->NodeNum << ") " << RemLatency << "c\n");
+ LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU("
+ << LateSU->NodeNum << ") " << RemLatency << "c\n");
}
return RemLatency;
}
@@ -2024,8 +2006,8 @@ getOtherResourceCount(unsigned &OtherCritIdx) {
unsigned OtherCritCount = Rem->RemIssueCount
+ (RetiredMOps * SchedModel->getMicroOpFactor());
- DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
- << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
+ LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
+ << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
PIdx != PEnd; ++PIdx) {
unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
@@ -2035,9 +2017,10 @@ getOtherResourceCount(unsigned &OtherCritIdx) {
}
}
if (OtherCritIdx) {
- DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
- << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
- << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
+ LLVM_DEBUG(
+ dbgs() << " " << Available.getName() << " + Remain CritRes: "
+ << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
+ << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
}
return OtherCritCount;
}
@@ -2101,7 +2084,8 @@ void SchedBoundary::bumpCycle(unsigned NextCycle) {
checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
getScheduledLatency());
- DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName()
+ << '\n');
}
void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
@@ -2121,8 +2105,8 @@ unsigned SchedBoundary::
countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
unsigned Factor = SchedModel->getResourceFactor(PIdx);
unsigned Count = Factor * Cycles;
- DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
- << " +" << Cycles << "x" << Factor << "u\n");
+ LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +"
+ << Cycles << "x" << Factor << "u\n");
// Update Executed resources counts.
incExecutedResources(PIdx, Count);
@@ -2133,16 +2117,17 @@ countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
// becomes the critical resource.
if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
ZoneCritResIdx = PIdx;
- DEBUG(dbgs() << " *** Critical resource "
- << SchedModel->getResourceName(PIdx) << ": "
- << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
+ LLVM_DEBUG(dbgs() << " *** Critical resource "
+ << SchedModel->getResourceName(PIdx) << ": "
+ << getResourceCount(PIdx) / SchedModel->getLatencyFactor()
+ << "c\n");
}
// For reserved resources, record the highest cycle using the resource.
unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
if (NextAvailable > CurrCycle) {
- DEBUG(dbgs() << " Resource conflict: "
- << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
- << NextAvailable << "\n");
+ LLVM_DEBUG(dbgs() << " Resource conflict: "
+ << SchedModel->getProcResource(PIdx)->Name
+ << " reserved until @" << NextAvailable << "\n");
}
return NextAvailable;
}
@@ -2167,7 +2152,7 @@ void SchedBoundary::bumpNode(SUnit *SU) {
"Cannot schedule this instruction's MicroOps in the current cycle.");
unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
- DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
+ LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
unsigned NextCycle = CurrCycle;
switch (SchedModel->getMicroOpBufferSize()) {
@@ -2177,7 +2162,7 @@ void SchedBoundary::bumpNode(SUnit *SU) {
case 1:
if (ReadyCycle > NextCycle) {
NextCycle = ReadyCycle;
- DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
+ LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
}
break;
default:
@@ -2206,8 +2191,9 @@ void SchedBoundary::bumpNode(SUnit *SU) {
if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
>= (int)SchedModel->getLatencyFactor()) {
ZoneCritResIdx = 0;
- DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
- << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
+ LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
+ << ScaledMOps / SchedModel->getLatencyFactor()
+ << "c\n");
}
}
for (TargetSchedModel::ProcResIter
@@ -2243,13 +2229,13 @@ void SchedBoundary::bumpNode(SUnit *SU) {
unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
if (SU->getDepth() > TopLatency) {
TopLatency = SU->getDepth();
- DEBUG(dbgs() << " " << Available.getName()
- << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
+ LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU("
+ << SU->NodeNum << ") " << TopLatency << "c\n");
}
if (SU->getHeight() > BotLatency) {
BotLatency = SU->getHeight();
- DEBUG(dbgs() << " " << Available.getName()
- << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
+ LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU("
+ << SU->NodeNum << ") " << BotLatency << "c\n");
}
// If we stall for any reason, bump the cycle.
if (NextCycle > CurrCycle)
@@ -2273,17 +2259,17 @@ void SchedBoundary::bumpNode(SUnit *SU) {
// currCycle to X.
if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) ||
(!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) {
- DEBUG(dbgs() << " Bump cycle to "
- << (isTop() ? "end" : "begin") << " group\n");
+ LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin")
+ << " group\n");
bumpCycle(++NextCycle);
}
while (CurrMOps >= SchedModel->getIssueWidth()) {
- DEBUG(dbgs() << " *** Max MOps " << CurrMOps
- << " at cycle " << CurrCycle << '\n');
+ LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle "
+ << CurrCycle << '\n');
bumpCycle(++NextCycle);
}
- DEBUG(dumpScheduledState());
+ LLVM_DEBUG(dumpScheduledState());
}
/// Release pending ready nodes in to the available queue. This makes them
@@ -2356,8 +2342,8 @@ SUnit *SchedBoundary::pickOnlyChoice() {
releasePending();
}
- DEBUG(Pending.dump());
- DEBUG(Available.dump());
+ LLVM_DEBUG(Pending.dump());
+ LLVM_DEBUG(Available.dump());
if (Available.size() == 1)
return *Available.begin();
@@ -2455,27 +2441,24 @@ void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
if (!OtherResLimited) {
if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
Policy.ReduceLatency |= true;
- DEBUG(dbgs() << " " << CurrZone.Available.getName()
- << " RemainingLatency " << RemLatency << " + "
- << CurrZone.getCurrCycle() << "c > CritPath "
- << Rem.CriticalPath << "\n");
+ LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName()
+ << " RemainingLatency " << RemLatency << " + "
+ << CurrZone.getCurrCycle() << "c > CritPath "
+ << Rem.CriticalPath << "\n");
}
}
// If the same resource is limiting inside and outside the zone, do nothing.
if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
return;
- DEBUG(
- if (CurrZone.isResourceLimited()) {
- dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
- << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
- << "\n";
- }
- if (OtherResLimited)
- dbgs() << " RemainingLimit: "
- << SchedModel->getResourceName(OtherCritIdx) << "\n";
- if (!CurrZone.isResourceLimited() && !OtherResLimited)
- dbgs() << " Latency limited both directions.\n");
+ LLVM_DEBUG(if (CurrZone.isResourceLimited()) {
+ dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
+ << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n";
+ } if (OtherResLimited) dbgs()
+ << " RemainingLimit: "
+ << SchedModel->getResourceName(OtherCritIdx) << "\n";
+ if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs()
+ << " Latency limited both directions.\n");
if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
@@ -2623,8 +2606,8 @@ bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
} // end namespace llvm
static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
- DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
- << GenericSchedulerBase::getReasonStr(Reason) << '\n');
+ LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
+ << GenericSchedulerBase::getReasonStr(Reason) << '\n');
}
static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
@@ -2746,14 +2729,14 @@ void GenericScheduler::checkAcyclicLatency() {
Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
- DEBUG(dbgs() << "IssueCycles="
- << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
- << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
- << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
- << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
- << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
- if (Rem.IsAcyclicLatencyLimited)
- dbgs() << " ACYCLIC LATENCY LIMIT\n");
+ LLVM_DEBUG(
+ dbgs() << "IssueCycles="
+ << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
+ << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
+ << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount
+ << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
+ << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
+ if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n");
}
void GenericScheduler::registerRoots() {
@@ -2764,7 +2747,7 @@ void GenericScheduler::registerRoots() {
if (SU->getDepth() > Rem.CriticalPath)
Rem.CriticalPath = SU->getDepth();
}
- DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
+ LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
if (DumpCriticalPathLength) {
errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
}
@@ -2879,10 +2862,10 @@ void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
}
}
}
- DEBUG(if (Cand.RPDelta.Excess.isValid())
- dbgs() << " Try SU(" << Cand.SU->NodeNum << ") "
- << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
- << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
+ LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs()
+ << " Try SU(" << Cand.SU->NodeNum << ") "
+ << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":"
+ << Cand.RPDelta.Excess.getUnitInc() << "\n");
}
/// Apply a set of heursitics to a new candidate. Heuristics are currently
@@ -3023,7 +3006,7 @@ void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
if (TryCand.ResDelta == SchedResourceDelta())
TryCand.initResourceDelta(DAG, SchedModel);
Cand.setBest(TryCand);
- DEBUG(traceCandidate(Cand));
+ LLVM_DEBUG(traceCandidate(Cand));
}
}
}
@@ -3052,14 +3035,14 @@ SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
// See if BotCand is still valid (because we previously scheduled from Top).
- DEBUG(dbgs() << "Picking from Bot:\n");
+ LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
if (!BotCand.isValid() || BotCand.SU->isScheduled ||
BotCand.Policy != BotPolicy) {
BotCand.reset(CandPolicy());
pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
assert(BotCand.Reason != NoCand && "failed to find the first candidate");
} else {
- DEBUG(traceCandidate(BotCand));
+ LLVM_DEBUG(traceCandidate(BotCand));
#ifndef NDEBUG
if (VerifyScheduling) {
SchedCandidate TCand;
@@ -3072,14 +3055,14 @@ SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
}
// Check if the top Q has a better candidate.
- DEBUG(dbgs() << "Picking from Top:\n");
+ LLVM_DEBUG(dbgs() << "Picking from Top:\n");
if (!TopCand.isValid() || TopCand.SU->isScheduled ||
TopCand.Policy != TopPolicy) {
TopCand.reset(CandPolicy());
pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
assert(TopCand.Reason != NoCand && "failed to find the first candidate");
} else {
- DEBUG(traceCandidate(TopCand));
+ LLVM_DEBUG(traceCandidate(TopCand));
#ifndef NDEBUG
if (VerifyScheduling) {
SchedCandidate TCand;
@@ -3099,7 +3082,7 @@ SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
tryCandidate(Cand, TopCand, nullptr);
if (TopCand.Reason != NoCand) {
Cand.setBest(TopCand);
- DEBUG(traceCandidate(Cand));
+ LLVM_DEBUG(traceCandidate(Cand));
}
IsTopNode = Cand.AtTop;
@@ -3148,7 +3131,8 @@ SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
if (SU->isBottomReady())
Bot.removeReady(SU);
- DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
+ LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
+ << *SU->getInstr());
return SU;
}
@@ -3169,8 +3153,8 @@ void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
MachineInstr *Copy = DepSU->getInstr();
if (!Copy->isCopy())
continue;
- DEBUG(dbgs() << " Rescheduling physreg copy ";
- Dep.getSUnit()->dump(DAG));
+ LLVM_DEBUG(dbgs() << " Rescheduling physreg copy ";
+ Dep.getSUnit()->dump(DAG));
DAG->moveInstruction(Copy, InsertPos);
}
}
@@ -3249,7 +3233,7 @@ void PostGenericScheduler::registerRoots() {
if (SU->getDepth() > Rem.CriticalPath)
Rem.CriticalPath = SU->getDepth();
}
- DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
+ LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
if (DumpCriticalPathLength) {
errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
}
@@ -3307,7 +3291,7 @@ void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
tryCandidate(Cand, TryCand);
if (TryCand.Reason != NoCand) {
Cand.setBest(TryCand);
- DEBUG(traceCandidate(Cand));
+ LLVM_DEBUG(traceCandidate(Cand));
}
}
}
@@ -3339,7 +3323,8 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
IsTopNode = true;
Top.removeReady(SU);
- DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
+ LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
+ << *SU->getInstr());
return SU;
}
@@ -3428,12 +3413,15 @@ public:
SUnit *SU = ReadyQ.back();
ReadyQ.pop_back();
IsTopNode = false;
- DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
- << " ILP: " << DAG->getDFSResult()->getILP(SU)
- << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
- << DAG->getDFSResult()->getSubtreeLevel(
- DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
- << "Scheduling " << *SU->getInstr());
+ LLVM_DEBUG(dbgs() << "Pick node "
+ << "SU(" << SU->NodeNum << ") "
+ << " ILP: " << DAG->getDFSResult()->getILP(SU)
+ << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU)
+ << " @"
+ << DAG->getDFSResult()->getSubtreeLevel(
+ DAG->getDFSResult()->getSubtreeID(SU))
+ << '\n'
+ << "Scheduling " << *SU->getInstr());
return SU;
}
diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp
index 0d43b10cc843..c09539d9903c 100644
--- a/lib/CodeGen/MachineSink.cpp
+++ b/lib/CodeGen/MachineSink.cpp
@@ -211,8 +211,8 @@ bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
if (DefMI->isCopyLike())
return false;
- DEBUG(dbgs() << "Coalescing: " << *DefMI);
- DEBUG(dbgs() << "*** to: " << MI);
+ LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
+ LLVM_DEBUG(dbgs() << "*** to: " << MI);
MRI->replaceRegWith(DstReg, SrcReg);
MI.eraseFromParent();
@@ -296,7 +296,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
- DEBUG(dbgs() << "******** Machine Sinking ********\n");
+ LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
@@ -323,14 +323,14 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
for (auto &Pair : ToSplit) {
auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
if (NewSucc != nullptr) {
- DEBUG(dbgs() << " *** Splitting critical edge: "
- << printMBBReference(*Pair.first) << " -- "
- << printMBBReference(*NewSucc) << " -- "
- << printMBBReference(*Pair.second) << '\n');
+ LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
+ << printMBBReference(*Pair.first) << " -- "
+ << printMBBReference(*NewSucc) << " -- "
+ << printMBBReference(*Pair.second) << '\n');
MadeChange = true;
++NumSplit;
} else
- DEBUG(dbgs() << " *** Not legal to break critical edge\n");
+ LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
}
// If this iteration over the code changed anything, keep iterating.
if (!MadeChange) break;
@@ -804,7 +804,7 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
return false;
}
- DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
+ LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
// If the block has multiple predecessors, this is a critical edge.
// Decide if we can sink along it or need to break the edge.
@@ -814,26 +814,26 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
bool TryBreak = false;
bool store = true;
if (!MI.isSafeToMove(AA, store)) {
- DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
+ LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
TryBreak = true;
}
// We don't want to sink across a critical edge if we don't dominate the
// successor. We could be introducing calculations to new code paths.
if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
- DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
+ LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
TryBreak = true;
}
// Don't sink instructions into a loop.
if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
- DEBUG(dbgs() << " *** NOTE: Loop header found\n");
+ LLVM_DEBUG(dbgs() << " *** NOTE: Loop header found\n");
TryBreak = true;
}
// Otherwise we are OK with sinking along a critical edge.
if (!TryBreak)
- DEBUG(dbgs() << "Sinking along critical edge.\n");
+ LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");
else {
// Mark this edge as to be split.
// If the edge can actually be split, the next iteration of the main loop
@@ -841,8 +841,8 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
bool Status =
PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
if (!Status)
- DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
- "break critical edge\n");
+ LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
+ "break critical edge\n");
// The instruction will not be sunk this time.
return false;
}
@@ -855,8 +855,8 @@ bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
SuccToSinkTo, BreakPHIEdge);
if (!Status)
- DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
- "break critical edge\n");
+ LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
+ "break critical edge\n");
// The instruction will not be sunk this time.
return false;
}
diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp
index f4aebca3f472..b444cd31eba2 100644
--- a/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/lib/CodeGen/MachineTraceMetrics.cpp
@@ -396,8 +396,8 @@ MachineTraceMetrics::getEnsemble(MachineTraceMetrics::Strategy strategy) {
}
void MachineTraceMetrics::invalidate(const MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Invalidate traces through " << printMBBReference(*MBB)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Invalidate traces through " << printMBBReference(*MBB)
+ << '\n');
BlockInfo[MBB->getNumber()].invalidate();
for (unsigned i = 0; i != TS_NumStrategies; ++i)
if (Ensembles[i])
@@ -477,8 +477,8 @@ public:
/// Compute the trace through MBB.
void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Computing " << getName() << " trace through "
- << printMBBReference(*MBB) << '\n');
+ LLVM_DEBUG(dbgs() << "Computing " << getName() << " trace through "
+ << printMBBReference(*MBB) << '\n');
// Set up loop bounds for the backwards post-order traversal.
LoopBounds Bounds(BlockInfo, MTM.Loops);
@@ -486,11 +486,11 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
Bounds.Downward = false;
Bounds.Visited.clear();
for (auto I : inverse_post_order_ext(MBB, Bounds)) {
- DEBUG(dbgs() << " pred for " << printMBBReference(*I) << ": ");
+ LLVM_DEBUG(dbgs() << " pred for " << printMBBReference(*I) << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the predecessors have been visited, pick the preferred one.
TBI.Pred = pickTracePred(I);
- DEBUG({
+ LLVM_DEBUG({
if (TBI.Pred)
dbgs() << printMBBReference(*TBI.Pred) << '\n';
else
@@ -504,11 +504,11 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
Bounds.Downward = true;
Bounds.Visited.clear();
for (auto I : post_order_ext(MBB, Bounds)) {
- DEBUG(dbgs() << " succ for " << printMBBReference(*I) << ": ");
+ LLVM_DEBUG(dbgs() << " succ for " << printMBBReference(*I) << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the successors have been visited, pick the preferred one.
TBI.Succ = pickTraceSucc(I);
- DEBUG({
+ LLVM_DEBUG({
if (TBI.Succ)
dbgs() << printMBBReference(*TBI.Succ) << '\n';
else
@@ -531,8 +531,8 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
WorkList.push_back(BadMBB);
do {
const MachineBasicBlock *MBB = WorkList.pop_back_val();
- DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
- << getName() << " height.\n");
+ LLVM_DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
+ << getName() << " height.\n");
// Find any MBB predecessors that have MBB as their preferred successor.
// They are the only ones that need to be invalidated.
for (const MachineBasicBlock *Pred : MBB->predecessors()) {
@@ -556,8 +556,8 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
WorkList.push_back(BadMBB);
do {
const MachineBasicBlock *MBB = WorkList.pop_back_val();
- DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
- << getName() << " depth.\n");
+ LLVM_DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' '
+ << getName() << " depth.\n");
// Find any MBB successors that have MBB as their preferred predecessor.
// They are the only ones that need to be invalidated.
for (const MachineBasicBlock *Succ : MBB->successors()) {
@@ -813,9 +813,9 @@ updateDepth(MachineTraceMetrics::TraceBlockInfo &TBI, const MachineInstr &UseMI,
if (TBI.HasValidInstrHeights) {
// Update critical path length.
TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Height);
- DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << UseMI);
+ LLVM_DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << UseMI);
} else {
- DEBUG(dbgs() << Cycle << '\t' << UseMI);
+ LLVM_DEBUG(dbgs() << Cycle << '\t' << UseMI);
}
}
@@ -860,13 +860,13 @@ computeInstrDepths(const MachineBasicBlock *MBB) {
// Go through trace blocks in top-down order, stopping after the center block.
while (!Stack.empty()) {
MBB = Stack.pop_back_val();
- DEBUG(dbgs() << "\nDepths for " << printMBBReference(*MBB) << ":\n");
+ LLVM_DEBUG(dbgs() << "\nDepths for " << printMBBReference(*MBB) << ":\n");
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
TBI.HasValidInstrDepths = true;
TBI.CriticalPath = 0;
// Print out resource depths here as well.
- DEBUG({
+ LLVM_DEBUG({
dbgs() << format("%7u Instructions\n", TBI.InstrDepth);
ArrayRef<unsigned> PRDepths = getProcResourceDepths(MBB->getNumber());
for (unsigned K = 0; K != PRDepths.size(); ++K)
@@ -1045,12 +1045,12 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
SmallVector<DataDep, 8> Deps;
for (;!Stack.empty(); Stack.pop_back()) {
MBB = Stack.back();
- DEBUG(dbgs() << "Heights for " << printMBBReference(*MBB) << ":\n");
+ LLVM_DEBUG(dbgs() << "Heights for " << printMBBReference(*MBB) << ":\n");
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
TBI.HasValidInstrHeights = true;
TBI.CriticalPath = 0;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << format("%7u Instructions\n", TBI.InstrHeight);
ArrayRef<unsigned> PRHeights = getProcResourceHeights(MBB->getNumber());
for (unsigned K = 0; K != PRHeights.size(); ++K)
@@ -1081,7 +1081,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
if (!Deps.empty()) {
// Loop header PHI heights are all 0.
unsigned Height = TBI.Succ ? Cycles.lookup(&PHI).Height : 0;
- DEBUG(dbgs() << "pred\t" << Height << '\t' << PHI);
+ LLVM_DEBUG(dbgs() << "pred\t" << Height << '\t' << PHI);
if (pushDepHeight(Deps.front(), PHI, Height, Heights, MTM.SchedModel,
MTM.TII))
addLiveIns(Deps.front().DefMI, Deps.front().DefOp, Stack);
@@ -1122,38 +1122,38 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
InstrCycles &MICycles = Cycles[&MI];
MICycles.Height = Cycle;
if (!TBI.HasValidInstrDepths) {
- DEBUG(dbgs() << Cycle << '\t' << MI);
+ LLVM_DEBUG(dbgs() << Cycle << '\t' << MI);
continue;
}
// Update critical path length.
TBI.CriticalPath = std::max(TBI.CriticalPath, Cycle + MICycles.Depth);
- DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << MI);
+ LLVM_DEBUG(dbgs() << TBI.CriticalPath << '\t' << Cycle << '\t' << MI);
}
// Update virtual live-in heights. They were added by addLiveIns() with a 0
// height because the final height isn't known until now.
- DEBUG(dbgs() << printMBBReference(*MBB) << " Live-ins:");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " Live-ins:");
for (LiveInReg &LIR : TBI.LiveIns) {
const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg);
LIR.Height = Heights.lookup(DefMI);
- DEBUG(dbgs() << ' ' << printReg(LIR.Reg) << '@' << LIR.Height);
+ LLVM_DEBUG(dbgs() << ' ' << printReg(LIR.Reg) << '@' << LIR.Height);
}
// Transfer the live regunits to the live-in list.
for (SparseSet<LiveRegUnit>::const_iterator
RI = RegUnits.begin(), RE = RegUnits.end(); RI != RE; ++RI) {
TBI.LiveIns.push_back(LiveInReg(RI->RegUnit, RI->Cycle));
- DEBUG(dbgs() << ' ' << printRegUnit(RI->RegUnit, MTM.TRI)
- << '@' << RI->Cycle);
+ LLVM_DEBUG(dbgs() << ' ' << printRegUnit(RI->RegUnit, MTM.TRI) << '@'
+ << RI->Cycle);
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
if (!TBI.HasValidInstrDepths)
continue;
// Add live-ins to the critical path length.
TBI.CriticalPath = std::max(TBI.CriticalPath,
computeCrossBlockCriticalPath(TBI));
- DEBUG(dbgs() << "Critical path: " << TBI.CriticalPath << '\n');
+ LLVM_DEBUG(dbgs() << "Critical path: " << TBI.CriticalPath << '\n');
}
}
diff --git a/lib/CodeGen/MacroFusion.cpp b/lib/CodeGen/MacroFusion.cpp
index 5b3523be6351..2df72d83aaff 100644
--- a/lib/CodeGen/MacroFusion.cpp
+++ b/lib/CodeGen/MacroFusion.cpp
@@ -66,11 +66,11 @@ static bool fuseInstructionPair(ScheduleDAGMI &DAG, SUnit &FirstSU,
if (SI.getSUnit() == &FirstSU)
SI.setLatency(0);
- DEBUG(dbgs() << "Macro fuse: ";
- FirstSU.print(dbgs(), &DAG); dbgs() << " - ";
- SecondSU.print(dbgs(), &DAG); dbgs() << " / ";
- dbgs() << DAG.TII->getName(FirstSU.getInstr()->getOpcode()) << " - " <<
- DAG.TII->getName(SecondSU.getInstr()->getOpcode()) << '\n'; );
+ LLVM_DEBUG(
+ dbgs() << "Macro fuse: "; FirstSU.print(dbgs(), &DAG); dbgs() << " - ";
+ SecondSU.print(dbgs(), &DAG); dbgs() << " / ";
+ dbgs() << DAG.TII->getName(FirstSU.getInstr()->getOpcode()) << " - "
+ << DAG.TII->getName(SecondSU.getInstr()->getOpcode()) << '\n';);
// Make data dependencies from the FirstSU also dependent on the SecondSU to
// prevent them from being scheduled between the FirstSU and the SecondSU.
@@ -80,9 +80,8 @@ static bool fuseInstructionPair(ScheduleDAGMI &DAG, SUnit &FirstSU,
if (SI.isWeak() || isHazard(SI) ||
SU == &DAG.ExitSU || SU == &SecondSU || SU->isPred(&SecondSU))
continue;
- DEBUG(dbgs() << " Bind ";
- SecondSU.print(dbgs(), &DAG); dbgs() << " - ";
- SU->print(dbgs(), &DAG); dbgs() << '\n';);
+ LLVM_DEBUG(dbgs() << " Bind "; SecondSU.print(dbgs(), &DAG);
+ dbgs() << " - "; SU->print(dbgs(), &DAG); dbgs() << '\n';);
DAG.addEdge(SU, SDep(&SecondSU, SDep::Artificial));
}
@@ -93,9 +92,8 @@ static bool fuseInstructionPair(ScheduleDAGMI &DAG, SUnit &FirstSU,
SUnit *SU = SI.getSUnit();
if (SI.isWeak() || isHazard(SI) || &FirstSU == SU || FirstSU.isSucc(SU))
continue;
- DEBUG(dbgs() << " Bind ";
- SU->print(dbgs(), &DAG); dbgs() << " - ";
- FirstSU.print(dbgs(), &DAG); dbgs() << '\n';);
+ LLVM_DEBUG(dbgs() << " Bind "; SU->print(dbgs(), &DAG); dbgs() << " - ";
+ FirstSU.print(dbgs(), &DAG); dbgs() << '\n';);
DAG.addEdge(&FirstSU, SDep(SU, SDep::Artificial));
}
diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp
index 6a73708b9a88..b7a5a6d47554 100644
--- a/lib/CodeGen/PHIElimination.cpp
+++ b/lib/CodeGen/PHIElimination.cpp
@@ -270,7 +270,8 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
IncomingReg = entry;
reusedIncoming = true;
++NumReused;
- DEBUG(dbgs() << "Reusing " << printReg(IncomingReg) << " for " << *MPhi);
+ LLVM_DEBUG(dbgs() << "Reusing " << printReg(IncomingReg) << " for "
+ << *MPhi);
} else {
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
@@ -295,9 +296,9 @@ void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
// AfterPHIsIt, so it appears before the current PHICopy.
if (reusedIncoming)
if (MachineInstr *OldKill = VI.findKill(&MBB)) {
- DEBUG(dbgs() << "Remove old kill from " << *OldKill);
+ LLVM_DEBUG(dbgs() << "Remove old kill from " << *OldKill);
LV->removeVirtualRegisterKilled(IncomingReg, *OldKill);
- DEBUG(MBB.dump());
+ LLVM_DEBUG(MBB.dump());
}
// Add information to LiveVariables to know that the incoming value is
@@ -593,9 +594,9 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
if (!ShouldSplit && !NoPhiElimLiveOutEarlyExit)
continue;
if (ShouldSplit) {
- DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge "
- << printMBBReference(*PreMBB) << " -> "
- << printMBBReference(MBB) << ": " << *BBI);
+ LLVM_DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge "
+ << printMBBReference(*PreMBB) << " -> "
+ << printMBBReference(MBB) << ": " << *BBI);
}
// If Reg is not live-in to MBB, it means it must be live-in to some
@@ -610,10 +611,12 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
// Check for a loop exiting edge.
if (!ShouldSplit && CurLoop != PreLoop) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Split wouldn't help, maybe avoid loop copies?\n";
- if (PreLoop) dbgs() << "PreLoop: " << *PreLoop;
- if (CurLoop) dbgs() << "CurLoop: " << *CurLoop;
+ if (PreLoop)
+ dbgs() << "PreLoop: " << *PreLoop;
+ if (CurLoop)
+ dbgs() << "CurLoop: " << *CurLoop;
});
// This edge could be entering a loop, exiting a loop, or it could be
// both: Jumping directly form one loop to the header of a sibling
@@ -624,7 +627,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
if (!ShouldSplit && !SplitAllCriticalEdges)
continue;
if (!PreMBB->SplitCriticalEdge(&MBB, *this)) {
- DEBUG(dbgs() << "Failed to split critical edge.\n");
+ LLVM_DEBUG(dbgs() << "Failed to split critical edge.\n");
continue;
}
Changed = true;
diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp
index d7d792e58bf9..1d058ccfb633 100644
--- a/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/lib/CodeGen/PeepholeOptimizer.cpp
@@ -696,7 +696,8 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg,
// An existent entry with multiple sources is a PHI cycle we must avoid.
// Otherwise it's an entry with a valid next source we already found.
if (CurSrcRes.getNumSources() > 1) {
- DEBUG(dbgs() << "findNextSource: found PHI cycle, aborting...\n");
+ LLVM_DEBUG(dbgs()
+ << "findNextSource: found PHI cycle, aborting...\n");
return false;
}
break;
@@ -709,7 +710,7 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg,
if (NumSrcs > 1) {
PHICount++;
if (PHICount >= RewritePHILimit) {
- DEBUG(dbgs() << "findNextSource: PHI limit reached\n");
+ LLVM_DEBUG(dbgs() << "findNextSource: PHI limit reached\n");
return false;
}
@@ -1143,9 +1144,9 @@ getNewSource(MachineRegisterInfo *MRI, const TargetInstrInfo *TII,
// Build the new PHI node and return its def register as the new source.
MachineInstr &OrigPHI = const_cast<MachineInstr &>(*Res.getInst());
MachineInstr &NewPHI = insertPHI(*MRI, *TII, NewPHISrcs, OrigPHI);
- DEBUG(dbgs() << "-- getNewSource\n");
- DEBUG(dbgs() << " Replacing: " << OrigPHI);
- DEBUG(dbgs() << " With: " << NewPHI);
+ LLVM_DEBUG(dbgs() << "-- getNewSource\n");
+ LLVM_DEBUG(dbgs() << " Replacing: " << OrigPHI);
+ LLVM_DEBUG(dbgs() << " With: " << NewPHI);
const MachineOperand &MODef = NewPHI.getOperand(0);
return RegSubRegPair(MODef.getReg(), MODef.getSubReg());
}
@@ -1241,9 +1242,9 @@ PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike,
NewCopy->getOperand(0).setIsUndef();
}
- DEBUG(dbgs() << "-- RewriteSource\n");
- DEBUG(dbgs() << " Replacing: " << CopyLike);
- DEBUG(dbgs() << " With: " << *NewCopy);
+ LLVM_DEBUG(dbgs() << "-- RewriteSource\n");
+ LLVM_DEBUG(dbgs() << " Replacing: " << CopyLike);
+ LLVM_DEBUG(dbgs() << " With: " << *NewCopy);
MRI->replaceRegWith(Def.Reg, NewVReg);
MRI->clearKillFlags(NewVReg);
@@ -1462,7 +1463,8 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
if (PrevCopy == NAPhysToVirtMIs.end()) {
// We can't remove the copy: there was an intervening clobber of the
// non-allocatable physical register after the copy to virtual.
- DEBUG(dbgs() << "NAPhysCopy: intervening clobber forbids erasing " << MI);
+ LLVM_DEBUG(dbgs() << "NAPhysCopy: intervening clobber forbids erasing "
+ << MI);
return false;
}
@@ -1470,7 +1472,7 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
if (PrevDstReg == SrcReg) {
// Remove the virt->phys copy: we saw the virtual register definition, and
// the non-allocatable physical register's state hasn't changed since then.
- DEBUG(dbgs() << "NAPhysCopy: erasing " << MI);
+ LLVM_DEBUG(dbgs() << "NAPhysCopy: erasing " << MI);
++NumNAPhysCopies;
return true;
}
@@ -1479,7 +1481,7 @@ bool PeepholeOptimizer::foldRedundantNAPhysCopy(
// register get a copy of the non-allocatable physical register, and we only
// track one such copy. Avoid getting confused by this new non-allocatable
// physical register definition, and remove it from the tracked copies.
- DEBUG(dbgs() << "NAPhysCopy: missed opportunity " << MI);
+ LLVM_DEBUG(dbgs() << "NAPhysCopy: missed opportunity " << MI);
NAPhysToVirtMIs.erase(PrevCopy);
return false;
}
@@ -1575,15 +1577,15 @@ bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) {
if (findTargetRecurrence(PHI.getOperand(0).getReg(), TargetRegs, RC)) {
// Commutes operands of instructions in RC if necessary so that the copy to
// be generated from PHI can be coalesced.
- DEBUG(dbgs() << "Optimize recurrence chain from " << PHI);
+ LLVM_DEBUG(dbgs() << "Optimize recurrence chain from " << PHI);
for (auto &RI : RC) {
- DEBUG(dbgs() << "\tInst: " << *(RI.getMI()));
+ LLVM_DEBUG(dbgs() << "\tInst: " << *(RI.getMI()));
auto CP = RI.getCommutePair();
if (CP) {
Changed = true;
TII->commuteInstruction(*(RI.getMI()), false, (*CP).first,
(*CP).second);
- DEBUG(dbgs() << "\t\tCommuted: " << *(RI.getMI()));
+ LLVM_DEBUG(dbgs() << "\t\tCommuted: " << *(RI.getMI()));
}
}
}
@@ -1595,8 +1597,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
- DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
- DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
+ LLVM_DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
if (DisablePeephole)
return false;
@@ -1667,7 +1669,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (Def != NAPhysToVirtMIs.end()) {
// A new definition of the non-allocatable physical register
// invalidates previous copies.
- DEBUG(dbgs() << "NAPhysCopy: invalidating because of " << *MI);
+ LLVM_DEBUG(dbgs()
+ << "NAPhysCopy: invalidating because of " << *MI);
NAPhysToVirtMIs.erase(Def);
}
}
@@ -1676,7 +1679,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
for (auto &RegMI : NAPhysToVirtMIs) {
unsigned Def = RegMI.first;
if (MachineOperand::clobbersPhysReg(RegMask, Def)) {
- DEBUG(dbgs() << "NAPhysCopy: invalidating because of " << *MI);
+ LLVM_DEBUG(dbgs()
+ << "NAPhysCopy: invalidating because of " << *MI);
NAPhysToVirtMIs.erase(Def);
}
}
@@ -1692,7 +1696,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
// don't know what's correct anymore.
//
// FIXME: handle explicit asm clobbers.
- DEBUG(dbgs() << "NAPhysCopy: blowing away all info due to " << *MI);
+ LLVM_DEBUG(dbgs() << "NAPhysCopy: blowing away all info due to "
+ << *MI);
NAPhysToVirtMIs.clear();
}
@@ -1768,8 +1773,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
TII->optimizeLoadInstr(*MI, MRI, FoldAsLoadDefReg, DefMI)) {
// Update LocalMIs since we replaced MI with FoldMI and deleted
// DefMI.
- DEBUG(dbgs() << "Replacing: " << *MI);
- DEBUG(dbgs() << " With: " << *FoldMI);
+ LLVM_DEBUG(dbgs() << "Replacing: " << *MI);
+ LLVM_DEBUG(dbgs() << " With: " << *FoldMI);
LocalMIs.erase(MI);
LocalMIs.erase(DefMI);
LocalMIs.insert(FoldMI);
@@ -1791,7 +1796,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
// the load candidates. Note: We might be able to fold *into* this
// instruction, so this needs to be after the folding logic.
if (MI->isLoadFoldBarrier()) {
- DEBUG(dbgs() << "Encountered load fold barrier on " << *MI);
+ LLVM_DEBUG(dbgs() << "Encountered load fold barrier on " << *MI);
FoldAsLoadDefCandidates.clear();
}
}
diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp
index 2e5e6754bd29..215da630caf4 100644
--- a/lib/CodeGen/PostRASchedulerList.cpp
+++ b/lib/CodeGen/PostRASchedulerList.cpp
@@ -243,11 +243,11 @@ void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
/// Print the schedule before exiting the region.
void SchedulePostRATDList::exitRegion() {
- DEBUG({
- dbgs() << "*** Final schedule ***\n";
- dumpSchedule();
- dbgs() << '\n';
- });
+ LLVM_DEBUG({
+ dbgs() << "*** Final schedule ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
ScheduleDAGInstrs::exitRegion();
}
@@ -309,7 +309,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
: TargetSubtargetInfo::ANTIDEP_NONE);
}
- DEBUG(dbgs() << "PostRAScheduler\n");
+ LLVM_DEBUG(dbgs() << "PostRAScheduler\n");
SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
CriticalPathRCs);
@@ -413,13 +413,12 @@ void SchedulePostRATDList::schedule() {
postprocessDAG();
- DEBUG(dbgs() << "********** List Scheduling **********\n");
- DEBUG(
- for (const SUnit &SU : SUnits) {
- SU.dumpAll(this);
- dbgs() << '\n';
- }
- );
+ LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
+ LLVM_DEBUG(for (const SUnit &SU
+ : SUnits) {
+ SU.dumpAll(this);
+ dbgs() << '\n';
+ });
AvailableQueue.initNodes(SUnits);
ListScheduleTopDown();
@@ -502,8 +501,8 @@ void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
- DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
- DEBUG(SU->dump(this));
+ LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
+ LLVM_DEBUG(SU->dump(this));
Sequence.push_back(SU);
assert(CurCycle >= SU->getDepth() &&
@@ -517,7 +516,7 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
/// emitNoop - Add a noop to the current instruction sequence.
void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
- DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
+ LLVM_DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
HazardRec->EmitNoop();
Sequence.push_back(nullptr); // NULL here means noop
++NumNoops;
@@ -569,7 +568,8 @@ void SchedulePostRATDList::ListScheduleTopDown() {
MinDepth = PendingQueue[i]->getDepth();
}
- DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
+ LLVM_DEBUG(dbgs() << "\n*** Examining Available\n";
+ AvailableQueue.dump(this));
SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
bool HasNoopHazards = false;
@@ -605,7 +605,8 @@ void SchedulePostRATDList::ListScheduleTopDown() {
// non-preferred node.
if (NotPreferredSUnit) {
if (!FoundSUnit) {
- DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n");
+ LLVM_DEBUG(
+ dbgs() << "*** Will schedule a non-preferred instruction...\n");
FoundSUnit = NotPreferredSUnit;
} else {
AvailableQueue.push(NotPreferredSUnit);
@@ -632,19 +633,20 @@ void SchedulePostRATDList::ListScheduleTopDown() {
HazardRec->EmitInstruction(FoundSUnit);
CycleHasInsts = true;
if (HazardRec->atIssueLimit()) {
- DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n');
+ LLVM_DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle
+ << '\n');
HazardRec->AdvanceCycle();
++CurCycle;
CycleHasInsts = false;
}
} else {
if (CycleHasInsts) {
- DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
+ LLVM_DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
HazardRec->AdvanceCycle();
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem,
// just advance the current cycle and try again.
- DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
+ LLVM_DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
HazardRec->AdvanceCycle();
++NumStalls;
} else {
diff --git a/lib/CodeGen/ProcessImplicitDefs.cpp b/lib/CodeGen/ProcessImplicitDefs.cpp
index 48b48c5f6499..3bc5809090fe 100644
--- a/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -73,7 +73,7 @@ bool ProcessImplicitDefs::canTurnIntoImplicitDef(MachineInstr *MI) {
}
void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
- DEBUG(dbgs() << "Processing " << *MI);
+ LLVM_DEBUG(dbgs() << "Processing " << *MI);
unsigned Reg = MI->getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
@@ -84,7 +84,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
MachineInstr *UserMI = MO.getParent();
if (!canTurnIntoImplicitDef(UserMI))
continue;
- DEBUG(dbgs() << "Converting to IMPLICIT_DEF: " << *UserMI);
+ LLVM_DEBUG(dbgs() << "Converting to IMPLICIT_DEF: " << *UserMI);
UserMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
WorkList.insert(UserMI);
}
@@ -116,7 +116,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
// If we found the using MI, we can erase the IMPLICIT_DEF.
if (Found) {
- DEBUG(dbgs() << "Physreg user: " << *UserMI);
+ LLVM_DEBUG(dbgs() << "Physreg user: " << *UserMI);
MI->eraseFromParent();
return;
}
@@ -125,15 +125,15 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
// Leave the physreg IMPLICIT_DEF, but trim any extra operands.
for (unsigned i = MI->getNumOperands() - 1; i; --i)
MI->RemoveOperand(i);
- DEBUG(dbgs() << "Keeping physreg: " << *MI);
+ LLVM_DEBUG(dbgs() << "Keeping physreg: " << *MI);
}
/// processImplicitDefs - Process IMPLICIT_DEF instructions and turn them into
/// <undef> operands.
bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
+ << "********** Function: " << MF.getName() << '\n');
bool Changed = false;
@@ -154,8 +154,8 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) {
if (WorkList.empty())
continue;
- DEBUG(dbgs() << printMBBReference(*MFI) << " has " << WorkList.size()
- << " implicit defs.\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MFI) << " has " << WorkList.size()
+ << " implicit defs.\n");
Changed = true;
// Drain the WorkList to recursively process any new implicit defs.
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp
index 5e431f9761ab..e2ee4e501a03 100644
--- a/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/lib/CodeGen/PrologEpilogInserter.cpp
@@ -564,10 +564,12 @@ AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx,
Offset = alignTo(Offset, Align, Skew);
if (StackGrowsDown) {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset
+ << "]\n");
MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset
} else {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset
+ << "]\n");
MFI.setObjectOffset(FrameIdx, Offset);
Offset += MFI.getObjectSize(FrameIdx);
}
@@ -660,12 +662,12 @@ static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx,
if (StackGrowsDown) {
int ObjStart = -(FreeStart + ObjSize);
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << ObjStart
- << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
+ << ObjStart << "]\n");
MFI.setObjectOffset(FrameIdx, ObjStart);
} else {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << FreeStart
- << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
+ << FreeStart << "]\n");
MFI.setObjectOffset(FrameIdx, FreeStart);
}
@@ -745,7 +747,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Adjust to alignment boundary
Offset = alignTo(Offset, Align, Skew);
- DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n");
MFI.setObjectOffset(i, -Offset); // Set the computed offset
}
} else if (MaxCSFrameIndex >= MinCSFrameIndex) {
@@ -758,7 +760,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Adjust to alignment boundary
Offset = alignTo(Offset, Align, Skew);
- DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n");
MFI.setObjectOffset(i, Offset);
Offset += MFI.getObjectSize(i);
}
@@ -795,14 +797,14 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Adjust to alignment boundary.
Offset = alignTo(Offset, Align, Skew);
- DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
+ LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
// Resolve offsets for objects in the local block.
for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) {
std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i);
int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
- DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
- FIOffset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset
+ << "]\n");
MFI.setObjectOffset(Entry.first, FIOffset);
}
// Allocate the local block
diff --git a/lib/CodeGen/ReachingDefAnalysis.cpp b/lib/CodeGen/ReachingDefAnalysis.cpp
index 31eb8c9b2643..050fef5d25ed 100644
--- a/lib/CodeGen/ReachingDefAnalysis.cpp
+++ b/lib/CodeGen/ReachingDefAnalysis.cpp
@@ -47,7 +47,7 @@ void ReachingDefAnalysis::enterBasicBlock(
MBBReachingDefs[MBBNumber][*Unit].push_back(LiveRegs[*Unit]);
}
}
- DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n");
return;
}
@@ -69,9 +69,9 @@ void ReachingDefAnalysis::enterBasicBlock(
}
}
- DEBUG(dbgs() << printMBBReference(*MBB)
- << (!TraversedMBB.IsDone ? ": incomplete\n"
- : ": all preds known\n"));
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB)
+ << (!TraversedMBB.IsDone ? ": incomplete\n"
+ : ": all preds known\n"));
}
void ReachingDefAnalysis::leaveBasicBlock(
@@ -109,8 +109,8 @@ void ReachingDefAnalysis::processDefs(MachineInstr *MI) {
continue;
for (MCRegUnitIterator Unit(MO.getReg(), TRI); Unit.isValid(); ++Unit) {
// This instruction explicitly defines the current reg unit.
- DEBUG(dbgs() << printReg(MO.getReg(), TRI) << ":\t" << CurInstr << '\t'
- << *MI);
+ LLVM_DEBUG(dbgs() << printReg(MO.getReg(), TRI) << ":\t" << CurInstr
+ << '\t' << *MI);
// How many instructions since this reg unit was last written?
LiveRegs[*Unit] = CurInstr;
@@ -142,7 +142,7 @@ bool ReachingDefAnalysis::runOnMachineFunction(MachineFunction &mf) {
MBBReachingDefs.resize(mf.getNumBlockIDs());
- DEBUG(dbgs() << "********** REACHING DEFINITION ANALYSIS **********\n");
+ LLVM_DEBUG(dbgs() << "********** REACHING DEFINITION ANALYSIS **********\n");
// Initialize the MBBOutRegsInfos
MBBOutRegsInfos.resize(mf.getNumBlockIDs());
diff --git a/lib/CodeGen/RegAllocBase.cpp b/lib/CodeGen/RegAllocBase.cpp
index 74c1592634aa..bc28a054c680 100644
--- a/lib/CodeGen/RegAllocBase.cpp
+++ b/lib/CodeGen/RegAllocBase.cpp
@@ -91,7 +91,7 @@ void RegAllocBase::allocatePhysRegs() {
// Unused registers can appear when the spiller coalesces snippets.
if (MRI->reg_nodbg_empty(VirtReg->reg)) {
- DEBUG(dbgs() << "Dropping unused " << *VirtReg << '\n');
+ LLVM_DEBUG(dbgs() << "Dropping unused " << *VirtReg << '\n');
aboutToRemoveInterval(*VirtReg);
LIS->removeInterval(VirtReg->reg);
continue;
@@ -103,9 +103,9 @@ void RegAllocBase::allocatePhysRegs() {
// selectOrSplit requests the allocator to return an available physical
// register if possible and populate a list of new live intervals that
// result from splitting.
- DEBUG(dbgs() << "\nselectOrSplit "
- << TRI->getRegClassName(MRI->getRegClass(VirtReg->reg))
- << ':' << *VirtReg << " w=" << VirtReg->weight << '\n');
+ LLVM_DEBUG(dbgs() << "\nselectOrSplit "
+ << TRI->getRegClassName(MRI->getRegClass(VirtReg->reg))
+ << ':' << *VirtReg << " w=" << VirtReg->weight << '\n');
using VirtRegVec = SmallVector<unsigned, 4>;
@@ -145,12 +145,12 @@ void RegAllocBase::allocatePhysRegs() {
assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned");
if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) {
assert(SplitVirtReg->empty() && "Non-empty but used interval");
- DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n');
+ LLVM_DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n');
aboutToRemoveInterval(*SplitVirtReg);
LIS->removeInterval(SplitVirtReg->reg);
continue;
}
- DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
+ LLVM_DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
"expect split value in virtual register");
enqueue(SplitVirtReg);
diff --git a/lib/CodeGen/RegAllocBasic.cpp b/lib/CodeGen/RegAllocBasic.cpp
index 1125d2c62bef..daeff3fc3963 100644
--- a/lib/CodeGen/RegAllocBasic.cpp
+++ b/lib/CodeGen/RegAllocBasic.cpp
@@ -219,8 +219,8 @@ bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
Intfs.push_back(Intf);
}
}
- DEBUG(dbgs() << "spilling " << printReg(PhysReg, TRI)
- << " interferences with " << VirtReg << "\n");
+ LLVM_DEBUG(dbgs() << "spilling " << printReg(PhysReg, TRI)
+ << " interferences with " << VirtReg << "\n");
assert(!Intfs.empty() && "expected interference");
// Spill each interfering vreg allocated to PhysReg or an alias.
@@ -292,7 +292,7 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
}
// No other spill candidates were found, so spill the current VirtReg.
- DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
+ LLVM_DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
if (!VirtReg.isSpillable())
return ~0u;
LiveRangeEdit LRE(&VirtReg, SplitVRegs, *MF, *LIS, VRM, this, &DeadRemats);
@@ -304,9 +304,8 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
}
bool RABasic::runOnMachineFunction(MachineFunction &mf) {
- DEBUG(dbgs() << "********** BASIC REGISTER ALLOCATION **********\n"
- << "********** Function: "
- << mf.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** BASIC REGISTER ALLOCATION **********\n"
+ << "********** Function: " << mf.getName() << '\n');
MF = &mf;
RegAllocBase::init(getAnalysis<VirtRegMap>(),
@@ -323,7 +322,7 @@ bool RABasic::runOnMachineFunction(MachineFunction &mf) {
postOptimization();
// Diagnostic output before rewriting
- DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *VRM << "\n");
+ LLVM_DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *VRM << "\n");
releaseMemory();
return true;
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index 241f680e2e83..e2980d175e77 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -322,11 +322,11 @@ void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI,
// instruction, not on the spill.
bool SpillKill = MachineBasicBlock::iterator(LR.LastUse) != MI;
LR.Dirty = false;
- DEBUG(dbgs() << "Spilling " << printReg(LRI->VirtReg, TRI)
- << " in " << printReg(LR.PhysReg, TRI));
+ LLVM_DEBUG(dbgs() << "Spilling " << printReg(LRI->VirtReg, TRI) << " in "
+ << printReg(LR.PhysReg, TRI));
const TargetRegisterClass &RC = *MRI->getRegClass(LRI->VirtReg);
int FI = getStackSpaceFor(LRI->VirtReg, RC);
- DEBUG(dbgs() << " to stack slot #" << FI << "\n");
+ LLVM_DEBUG(dbgs() << " to stack slot #" << FI << "\n");
TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, &RC, TRI);
++NumStores; // Update statistics
@@ -339,7 +339,9 @@ void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI,
MachineInstr *NewDV = buildDbgValueForSpill(*MBB, MI, *DBG, FI);
assert(NewDV->getParent() == MBB && "dangling parent pointer");
(void)NewDV;
- DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV);
+ LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:"
+ << "\n"
+ << *NewDV);
}
// Now this register is spilled there is should not be any DBG_VALUE
// pointing to this register because they are all pointing to spilled value
@@ -476,7 +478,8 @@ void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
/// \returns spillImpossible when PhysReg or an alias can't be spilled.
unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
if (isRegUsedInInstr(PhysReg)) {
- DEBUG(dbgs() << printReg(PhysReg, TRI) << " is already used in instr.\n");
+ LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI)
+ << " is already used in instr.\n");
return spillImpossible;
}
switch (unsigned VirtReg = PhysRegState[PhysReg]) {
@@ -485,8 +488,8 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
case regFree:
return 0;
case regReserved:
- DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding "
- << printReg(PhysReg, TRI) << " is reserved already.\n");
+ LLVM_DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding "
+ << printReg(PhysReg, TRI) << " is reserved already.\n");
return spillImpossible;
default: {
LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
@@ -496,7 +499,7 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
}
// This is a disabled register, add up cost of aliases.
- DEBUG(dbgs() << printReg(PhysReg, TRI) << " is disabled.\n");
+ LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is disabled.\n");
unsigned Cost = 0;
for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
MCPhysReg Alias = *AI;
@@ -523,8 +526,8 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
/// proper container for VirtReg now. The physical register must not be used
/// for anything else when this is called.
void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) {
- DEBUG(dbgs() << "Assigning " << printReg(LR.VirtReg, TRI) << " to "
- << printReg(PhysReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Assigning " << printReg(LR.VirtReg, TRI) << " to "
+ << printReg(PhysReg, TRI) << "\n");
PhysRegState[PhysReg] = LR.VirtReg;
assert(!LR.PhysReg && "Already assigned a physreg");
LR.PhysReg = PhysReg;
@@ -570,16 +573,16 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::allocVirtReg(MachineInstr &MI,
}
}
- DEBUG(dbgs() << "Allocating " << printReg(VirtReg) << " from "
- << TRI->getRegClassName(&RC) << "\n");
+ LLVM_DEBUG(dbgs() << "Allocating " << printReg(VirtReg) << " from "
+ << TRI->getRegClassName(&RC) << "\n");
unsigned BestReg = 0;
unsigned BestCost = spillImpossible;
for (MCPhysReg PhysReg : AO) {
unsigned Cost = calcSpillCost(PhysReg);
- DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << "\n");
- DEBUG(dbgs() << "\tCost: " << Cost << "\n");
- DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n");
+ LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "\tCost: " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n");
// Cost is 0 when all aliases are already disabled.
if (Cost == 0) {
assignVirtToPhysReg(*LRI, PhysReg);
@@ -654,22 +657,22 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::reloadVirtReg(MachineInstr &MI,
LRI = allocVirtReg(MI, LRI, Hint);
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
int FrameIndex = getStackSpaceFor(VirtReg, RC);
- DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
- << printReg(LRI->PhysReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
+ << printReg(LRI->PhysReg, TRI) << "\n");
TII->loadRegFromStackSlot(*MBB, MI, LRI->PhysReg, FrameIndex, &RC, TRI);
++NumLoads;
} else if (LRI->Dirty) {
if (isLastUseOfLocalReg(MO)) {
- DEBUG(dbgs() << "Killing last use: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Killing last use: " << MO << "\n");
if (MO.isUse())
MO.setIsKill();
else
MO.setIsDead();
} else if (MO.isKill()) {
- DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n");
MO.setIsKill(false);
} else if (MO.isDead()) {
- DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n");
MO.setIsDead(false);
}
} else if (MO.isKill()) {
@@ -677,10 +680,10 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::reloadVirtReg(MachineInstr &MI,
// register would be killed immediately, and there might be a second use:
// %foo = OR killed %x, %x
// This would cause a second reload of %x into a different register.
- DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
MO.setIsKill(false);
} else if (MO.isDead()) {
- DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
MO.setIsDead(false);
}
assert(LRI->PhysReg && "Register not assigned");
@@ -727,7 +730,7 @@ bool RegAllocFast::setPhysReg(MachineInstr &MI, unsigned OpNum,
// there are additional physreg defines.
void RegAllocFast::handleThroughOperands(MachineInstr &MI,
SmallVectorImpl<unsigned> &VirtDead) {
- DEBUG(dbgs() << "Scanning for through registers:");
+ LLVM_DEBUG(dbgs() << "Scanning for through registers:");
SmallSet<unsigned, 8> ThroughRegs;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg()) continue;
@@ -737,13 +740,13 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) ||
(MO.getSubReg() && MI.readsVirtualRegister(Reg))) {
if (ThroughRegs.insert(Reg).second)
- DEBUG(dbgs() << ' ' << printReg(Reg));
+ LLVM_DEBUG(dbgs() << ' ' << printReg(Reg));
}
}
// If any physreg defines collide with preallocated through registers,
// we must spill and reallocate.
- DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
+ LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
@@ -756,7 +759,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
}
SmallVector<unsigned, 8> PartialDefs;
- DEBUG(dbgs() << "Allocating tied uses.\n");
+ LLVM_DEBUG(dbgs() << "Allocating tied uses.\n");
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
@@ -764,15 +767,16 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
if (!MO.isTied()) continue;
- DEBUG(dbgs() << "Operand " << I << "("<< MO << ") is tied to operand "
- << MI.findTiedOperandIdx(I) << ".\n");
+ LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO
+ << ") is tied to operand " << MI.findTiedOperandIdx(I)
+ << ".\n");
LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, 0);
MCPhysReg PhysReg = LRI->PhysReg;
setPhysReg(MI, I, PhysReg);
// Note: we don't update the def operand yet. That would cause the normal
// def-scan to attempt spilling.
} else if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) {
- DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
// Reload the register, but don't assign to the operand just yet.
// That would confuse the later phys-def processing pass.
LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, 0);
@@ -780,7 +784,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
}
}
- DEBUG(dbgs() << "Allocating early clobbers.\n");
+ LLVM_DEBUG(dbgs() << "Allocating early clobbers.\n");
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
@@ -801,8 +805,8 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
unsigned Reg = MO.getReg();
if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
- DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI)
- << " as used in instr\n");
+ LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI)
+ << " as used in instr\n");
markRegUsedInInstr(Reg);
}
@@ -848,7 +852,7 @@ void RegAllocFast::dumpState() {
void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
this->MBB = &MBB;
- DEBUG(dbgs() << "\nAllocating " << MBB);
+ LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
PhysRegState.assign(TRI->getNumRegs(), regDisabled);
assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
@@ -866,10 +870,7 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
// Otherwise, sequentially allocate each instruction in the MBB.
for (MachineInstr &MI : MBB) {
const MCInstrDesc &MCID = MI.getDesc();
- DEBUG(
- dbgs() << "\n>> " << MI << "Regs:";
- dumpState()
- );
+ LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:"; dumpState());
// Debug values are not allowed to change codegen in any way.
if (MI.isDebugValue()) {
@@ -894,13 +895,13 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
if (SS != -1) {
// Modify DBG_VALUE now that the value is in a spill slot.
updateDbgValueForSpill(*DebugMI, SS);
- DEBUG(dbgs() << "Modifying debug info due to spill:"
- << "\t" << *DebugMI);
+ LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:"
+ << "\t" << *DebugMI);
continue;
}
// We can't allocate a physreg for a DebugValue, sorry!
- DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
+ LLVM_DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
MO.setReg(0);
}
@@ -1028,7 +1029,7 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
// as call-clobbered, this is not correct because some of those
// definitions may be used later on and we do not want to reuse
// those for virtual registers in between.
- DEBUG(dbgs() << " Spilling remaining registers before call.\n");
+ LLVM_DEBUG(dbgs() << " Spilling remaining registers before call.\n");
spillAll(MI);
}
@@ -1063,15 +1064,15 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
VirtDead.clear();
if (CopyDstReg && CopyDstReg == CopySrcReg && CopyDstSub == CopySrcSub) {
- DEBUG(dbgs() << "-- coalescing: " << MI);
+ LLVM_DEBUG(dbgs() << "-- coalescing: " << MI);
Coalesced.push_back(&MI);
} else {
- DEBUG(dbgs() << "<< " << MI);
+ LLVM_DEBUG(dbgs() << "<< " << MI);
}
}
// Spill all physical registers holding virtual registers now.
- DEBUG(dbgs() << "Spilling live registers at end of block.\n");
+ LLVM_DEBUG(dbgs() << "Spilling live registers at end of block.\n");
spillAll(MBB.getFirstTerminator());
// Erase all the coalesced copies. We are delaying it until now because
@@ -1080,13 +1081,13 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
MBB.erase(MI);
NumCopies += Coalesced.size();
- DEBUG(MBB.dump());
+ LLVM_DEBUG(MBB.dump());
}
/// Allocates registers for a function.
bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
+ << "********** Function: " << MF.getName() << '\n');
MRI = &MF.getRegInfo();
const TargetSubtargetInfo &STI = MF.getSubtarget();
TRI = STI.getRegisterInfo();
diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp
index 04b5393d79dc..182d9665098b 100644
--- a/lib/CodeGen/RegAllocGreedy.cpp
+++ b/lib/CodeGen/RegAllocGreedy.cpp
@@ -766,7 +766,7 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
// preferred register.
if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
if (Order.isHint(Hint)) {
- DEBUG(dbgs() << "missed hint " << printReg(Hint, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "missed hint " << printReg(Hint, TRI) << '\n');
EvictionCost MaxCost;
MaxCost.setBrokenHints(1);
if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
@@ -785,8 +785,8 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
if (!Cost)
return PhysReg;
- DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost " << Cost
- << '\n');
+ LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost "
+ << Cost << '\n');
unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
return CheapReg ? CheapReg : PhysReg;
}
@@ -814,9 +814,9 @@ unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
break;
}
if (PhysReg)
- DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
- << printReg(PrevReg, TRI) << " to " << printReg(PhysReg, TRI)
- << '\n');
+ LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
+ << printReg(PrevReg, TRI) << " to "
+ << printReg(PhysReg, TRI) << '\n');
return PhysReg;
}
@@ -843,7 +843,7 @@ bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
return true;
if (A.weight > B.weight) {
- DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
+ LLVM_DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
return true;
}
return false;
@@ -1035,8 +1035,8 @@ void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
if (!Cascade)
Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
- DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI)
- << " interference: Cascade " << Cascade << '\n');
+ LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI)
+ << " interference: Cascade " << Cascade << '\n');
// Collect all interfering virtregs first.
SmallVector<LiveInterval*, 8> Intfs;
@@ -1107,8 +1107,8 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
unsigned MinCost = RegClassInfo.getMinCost(RC);
if (MinCost >= CostPerUseLimit) {
- DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " << MinCost
- << ", no cheaper registers to be found.\n");
+ LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = "
+ << MinCost << ", no cheaper registers to be found.\n");
return 0;
}
@@ -1116,7 +1116,8 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
// the same cost. We don't need to look at them if they're too expensive.
if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
OrderLimit = RegClassInfo.getLastCostChange(RC);
- DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n");
+ LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit
+ << " regs.\n");
}
}
@@ -1127,9 +1128,10 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
// The first use of a callee-saved register in a function has cost 1.
// Don't start using a CSR when the CostPerUseLimit is low.
if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
- DEBUG(dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
- << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
- << '\n');
+ LLVM_DEBUG(
+ dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
+ << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
+ << '\n');
continue;
}
@@ -1316,7 +1318,7 @@ void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
// Perhaps iterating can enable more bundles?
SpillPlacer->iterate();
}
- DEBUG(dbgs() << ", v=" << Visited);
+ LLVM_DEBUG(dbgs() << ", v=" << Visited);
}
/// calcCompactRegion - Compute the set of edge bundles that should be live
@@ -1334,7 +1336,7 @@ bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
// Compact regions don't correspond to any physreg.
Cand.reset(IntfCache, 0);
- DEBUG(dbgs() << "Compact region bundles");
+ LLVM_DEBUG(dbgs() << "Compact region bundles");
// Use the spill placer to determine the live bundles. GrowRegion pretends
// that all the through blocks have interference when PhysReg is unset.
@@ -1343,7 +1345,7 @@ bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
// The static split cost will be zero since Cand.Intf reports no interference.
BlockFrequency Cost;
if (!addSplitConstraints(Cand.Intf, Cost)) {
- DEBUG(dbgs() << ", none.\n");
+ LLVM_DEBUG(dbgs() << ", none.\n");
return false;
}
@@ -1351,11 +1353,11 @@ bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
SpillPlacer->finish();
if (!Cand.LiveBundles.any()) {
- DEBUG(dbgs() << ", none.\n");
+ LLVM_DEBUG(dbgs() << ", none.\n");
return false;
}
- DEBUG({
+ LLVM_DEBUG({
for (int i : Cand.LiveBundles.set_bits())
dbgs() << " EB#" << i;
dbgs() << ".\n";
@@ -1633,7 +1635,8 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
// These are the intervals created for new global ranges. We may create more
// intervals for local ranges.
const unsigned NumGlobalIntvs = LREdit.size();
- DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
+ LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs
+ << " globals.\n");
assert(NumGlobalIntvs && "No global intervals configured");
// Isolate even single instructions when dealing with a proper sub-class.
@@ -1670,7 +1673,7 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
// Create separate intervals for isolated blocks with multiple uses.
if (!IntvIn && !IntvOut) {
- DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n");
if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
SE->splitSingleBlock(BI);
continue;
@@ -1752,8 +1755,8 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
// blocks is strictly decreasing.
if (IntvMap[i] < NumGlobalIntvs) {
if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
- DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
- << " blocks as original.\n");
+ LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
+ << " blocks as original.\n");
// Don't allow repeated splitting as a safe guard against looping.
setStage(Reg, RS_Split2);
}
@@ -1784,8 +1787,8 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// No benefit from the compact region, our fallback will be per-block
// splitting. Make sure we find a solution that is cheaper than spilling.
BestCost = SpillCost;
- DEBUG(dbgs() << "Cost of isolating all blocks = ";
- MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
+ LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = ";
+ MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
}
bool CanCauseEvictionChain = false;
@@ -1848,13 +1851,13 @@ unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
SpillPlacer->prepare(Cand.LiveBundles);
BlockFrequency Cost;
if (!addSplitConstraints(Cand.Intf, Cost)) {
- DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n");
+ LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n");
continue;
}
- DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = ";
- MBFI->printBlockFreq(dbgs(), Cost));
+ LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = ";
+ MBFI->printBlockFreq(dbgs(), Cost));
if (Cost >= BestCost) {
- DEBUG({
+ LLVM_DEBUG({
if (BestCand == NoCand)
dbgs() << " worse than no bundles\n";
else
@@ -1869,15 +1872,15 @@ unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
// No live bundles, defer to splitSingleBlocks().
if (!Cand.LiveBundles.any()) {
- DEBUG(dbgs() << " no bundles.\n");
+ LLVM_DEBUG(dbgs() << " no bundles.\n");
continue;
}
bool HasEvictionChain = false;
Cost += calcGlobalSplitCost(Cand, Order, &HasEvictionChain);
- DEBUG({
- dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost)
- << " with bundles";
+ LLVM_DEBUG({
+ dbgs() << ", total = ";
+ MBFI->printBlockFreq(dbgs(), Cost) << " with bundles";
for (int i : Cand.LiveBundles.set_bits())
dbgs() << " EB#" << i;
dbgs() << ".\n";
@@ -1896,11 +1899,11 @@ unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
if (CanCauseEvictionChain && BestCand != NoCand) {
// See splitCanCauseEvictionChain for detailed description of bad
// eviction chain scenarios.
- DEBUG(dbgs() << "Best split candidate of vreg "
- << printReg(VirtReg.reg, TRI) << " may ");
+ LLVM_DEBUG(dbgs() << "Best split candidate of vreg "
+ << printReg(VirtReg.reg, TRI) << " may ");
if (!(*CanCauseEvictionChain))
- DEBUG(dbgs() << "not ");
- DEBUG(dbgs() << "cause bad eviction chain\n");
+ LLVM_DEBUG(dbgs() << "not ");
+ LLVM_DEBUG(dbgs() << "cause bad eviction chain\n");
}
return BestCand;
@@ -1923,8 +1926,8 @@ unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
UsedCands.push_back(BestCand);
Cand.IntvIdx = SE->openIntv();
- DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in "
- << B << " bundles, intv " << Cand.IntvIdx << ".\n");
+ LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in "
+ << B << " bundles, intv " << Cand.IntvIdx << ".\n");
(void)B;
}
}
@@ -1936,8 +1939,8 @@ unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
if (unsigned B = Cand.getBundles(BundleCand, 0)) {
UsedCands.push_back(0);
Cand.IntvIdx = SE->openIntv();
- DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
- << Cand.IntvIdx << ".\n");
+ LLVM_DEBUG(dbgs() << "Split for compact region in " << B
+ << " bundles, intv " << Cand.IntvIdx << ".\n");
(void)B;
}
}
@@ -2036,7 +2039,8 @@ RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
if (Uses.size() <= 1)
return 0;
- DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
+ LLVM_DEBUG(dbgs() << "Split around " << Uses.size()
+ << " individual instrs.\n");
const TargetRegisterClass *SuperRC =
TRI->getLargestLegalSuperClass(CurRC, *MF);
@@ -2051,7 +2055,7 @@ RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SuperRCNumAllocatableRegs ==
getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII,
TRI, RCI)) {
- DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
continue;
}
SE->openIntv();
@@ -2061,7 +2065,7 @@ RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
}
if (LREdit.empty()) {
- DEBUG(dbgs() << "All uses were copies.\n");
+ LLVM_DEBUG(dbgs() << "All uses were copies.\n");
return 0;
}
@@ -2179,7 +2183,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
return 0;
const unsigned NumGaps = Uses.size()-1;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "tryLocalSplit: ";
for (unsigned i = 0, e = Uses.size(); i != e; ++i)
dbgs() << ' ' << Uses[i];
@@ -2192,7 +2196,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
if (Matrix->checkRegMaskInterference(VirtReg)) {
// Get regmask slots for the whole block.
ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
- DEBUG(dbgs() << RMS.size() << " regmasks in block:");
+ LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:");
// Constrain to VirtReg's live range.
unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
Uses.front().getRegSlot()) - RMS.begin();
@@ -2206,14 +2210,15 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// overlap the live range.
if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
break;
- DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
+ LLVM_DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-'
+ << Uses[i + 1]);
RegMaskGaps.push_back(i);
// Advance ri to the next gap. A regmask on one of the uses counts in
// both gaps.
while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
++ri;
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
// Since we allow local split results to be split again, there is a risk of
@@ -2272,13 +2277,12 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
- DEBUG(dbgs() << printReg(PhysReg, TRI) << ' '
- << Uses[SplitBefore] << '-' << Uses[SplitAfter]
- << " i=" << MaxGap);
+ LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore]
+ << '-' << Uses[SplitAfter] << " i=" << MaxGap);
// Stop before the interval gets so big we wouldn't be making progress.
if (!LiveBefore && !LiveAfter) {
- DEBUG(dbgs() << " all\n");
+ LLVM_DEBUG(dbgs() << " all\n");
break;
}
// Should the interval be extended or shrunk?
@@ -2303,12 +2307,12 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1);
// Would this split be possible to allocate?
// Never allocate all gaps, we wouldn't be making progress.
- DEBUG(dbgs() << " w=" << EstWeight);
+ LLVM_DEBUG(dbgs() << " w=" << EstWeight);
if (EstWeight * Hysteresis >= MaxGap) {
Shrink = false;
float Diff = EstWeight - MaxGap;
if (Diff > BestDiff) {
- DEBUG(dbgs() << " (best)");
+ LLVM_DEBUG(dbgs() << " (best)");
BestDiff = Hysteresis * Diff;
BestBefore = SplitBefore;
BestAfter = SplitAfter;
@@ -2319,7 +2323,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// Try to shrink.
if (Shrink) {
if (++SplitBefore < SplitAfter) {
- DEBUG(dbgs() << " shrink\n");
+ LLVM_DEBUG(dbgs() << " shrink\n");
// Recompute the max when necessary.
if (GapWeight[SplitBefore - 1] >= MaxGap) {
MaxGap = GapWeight[SplitBefore];
@@ -2333,11 +2337,11 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// Try to extend the interval.
if (SplitAfter >= NumGaps) {
- DEBUG(dbgs() << " end\n");
+ LLVM_DEBUG(dbgs() << " end\n");
break;
}
- DEBUG(dbgs() << " extend\n");
+ LLVM_DEBUG(dbgs() << " extend\n");
MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
}
}
@@ -2346,9 +2350,9 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
if (BestBefore == NumGaps)
return 0;
- DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
- << '-' << Uses[BestAfter] << ", " << BestDiff
- << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
+ LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-'
+ << Uses[BestAfter] << ", " << BestDiff << ", "
+ << (BestAfter - BestBefore + 1) << " instrs\n");
LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
SE->reset(LREdit);
@@ -2368,14 +2372,14 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
if (NewGaps >= NumGaps) {
- DEBUG(dbgs() << "Tagging non-progress ranges: ");
+ LLVM_DEBUG(dbgs() << "Tagging non-progress ranges: ");
assert(!ProgressRequired && "Didn't make progress when it was required.");
for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
if (IntvMap[i] == 1) {
setStage(LIS->getInterval(LREdit.get(i)), RS_Split2);
- DEBUG(dbgs() << printReg(LREdit.get(i)));
+ LLVM_DEBUG(dbgs() << printReg(LREdit.get(i)));
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
++NumLocalSplits;
@@ -2468,7 +2472,7 @@ RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
// chances are one would not be recolorable.
if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
LastChanceRecoloringMaxInterference && !ExhaustiveSearch) {
- DEBUG(dbgs() << "Early abort: too many interferences.\n");
+ LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n");
CutOffInfo |= CO_Interf;
return false;
}
@@ -2482,7 +2486,8 @@ RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
MRI->getRegClass(Intf->reg) == CurRC) &&
!(hasTiedDef(MRI, VirtReg.reg) && !hasTiedDef(MRI, Intf->reg))) ||
FixedRegisters.count(Intf->reg)) {
- DEBUG(dbgs() << "Early abort: the interference is not recolorable.\n");
+ LLVM_DEBUG(
+ dbgs() << "Early abort: the interference is not recolorable.\n");
return false;
}
RecoloringCandidates.insert(Intf);
@@ -2535,7 +2540,7 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &NewVRegs,
SmallVirtRegSet &FixedRegisters,
unsigned Depth) {
- DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
+ LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
// Ranges must be Done.
assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
"Last chance recoloring should really be last chance");
@@ -2544,7 +2549,7 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
// for target with hundreds of registers.
// Indeed, in that case we may want to cut the search space earlier.
if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
- DEBUG(dbgs() << "Abort because max depth has been reached.\n");
+ LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n");
CutOffInfo |= CO_Depth;
return ~0u;
}
@@ -2561,8 +2566,8 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
Order.rewind();
while (unsigned PhysReg = Order.next()) {
- DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
- << printReg(PhysReg, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
+ << printReg(PhysReg, TRI) << '\n');
RecoloringCandidates.clear();
VirtRegToPhysReg.clear();
CurrentNewVRegs.clear();
@@ -2570,7 +2575,8 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
// It is only possible to recolor virtual register interference.
if (Matrix->checkInterference(VirtReg, PhysReg) >
LiveRegMatrix::IK_VirtReg) {
- DEBUG(dbgs() << "Some interferences are not with virtual registers.\n");
+ LLVM_DEBUG(
+ dbgs() << "Some interferences are not with virtual registers.\n");
continue;
}
@@ -2579,7 +2585,7 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
// the interferences.
if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
FixedRegisters)) {
- DEBUG(dbgs() << "Some interferences cannot be recolored.\n");
+ LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n");
continue;
}
@@ -2621,8 +2627,8 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
return PhysReg;
}
- DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
- << printReg(PhysReg, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
+ << printReg(PhysReg, TRI) << '\n');
// The recoloring attempt failed, undo the changes.
FixedRegisters = SaveFixedRegisters;
@@ -2669,7 +2675,7 @@ bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
unsigned Depth) {
while (!RecoloringQueue.empty()) {
LiveInterval *LI = dequeue(RecoloringQueue);
- DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
+ LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
unsigned PhysReg;
PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
// When splitting happens, the live-range may actually be empty.
@@ -2681,11 +2687,12 @@ bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
if (!PhysReg) {
assert(LI->empty() && "Only empty live-range do not require a register");
- DEBUG(dbgs() << "Recoloring of " << *LI << " succeeded. Empty LI.\n");
+ LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
+ << " succeeded. Empty LI.\n");
continue;
}
- DEBUG(dbgs() << "Recoloring of " << *LI
- << " succeeded with: " << printReg(PhysReg, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
+ << " succeeded with: " << printReg(PhysReg, TRI) << '\n');
Matrix->assign(*LI, PhysReg);
FixedRegisters.insert(LI->reg);
@@ -2852,8 +2859,8 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
Visited.insert(Reg);
RecoloringCandidates.push_back(Reg);
- DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI) << '('
- << printReg(PhysReg, TRI) << ")\n");
+ LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI)
+ << '(' << printReg(PhysReg, TRI) << ")\n");
do {
Reg = RecoloringCandidates.pop_back_val();
@@ -2874,8 +2881,8 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
Matrix->checkInterference(LI, PhysReg)))
continue;
- DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI)
- << ") is recolorable.\n");
+ LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI)
+ << ") is recolorable.\n");
// Gather the hint info.
Info.clear();
@@ -2883,19 +2890,20 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
// Check if recoloring the live-range will increase the cost of the
// non-identity copies.
if (CurrPhys != PhysReg) {
- DEBUG(dbgs() << "Checking profitability:\n");
+ LLVM_DEBUG(dbgs() << "Checking profitability:\n");
BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
- DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
- << "\nNew Cost: " << NewCopiesCost.getFrequency() << '\n');
+ LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
+ << "\nNew Cost: " << NewCopiesCost.getFrequency()
+ << '\n');
if (OldCopiesCost < NewCopiesCost) {
- DEBUG(dbgs() << "=> Not profitable.\n");
+ LLVM_DEBUG(dbgs() << "=> Not profitable.\n");
continue;
}
// At this point, the cost is either cheaper or equal. If it is
// equal, we consider this is profitable because it may expose
// more recoloring opportunities.
- DEBUG(dbgs() << "=> Profitable.\n");
+ LLVM_DEBUG(dbgs() << "=> Profitable.\n");
// Recolor the live-range.
Matrix->unassign(LI);
Matrix->assign(LI, PhysReg);
@@ -2983,8 +2991,8 @@ unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
}
LiveRangeStage Stage = getStage(VirtReg);
- DEBUG(dbgs() << StageName[Stage]
- << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
+ LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade "
+ << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
// Try to evict a less worthy live range, but only for ranges from the primary
// queue. The RS_Split ranges already failed to do this, and they should not
@@ -3013,7 +3021,7 @@ unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
// This gives a better picture of the interference to split around.
if (Stage < RS_Split) {
setStage(VirtReg, RS_Split);
- DEBUG(dbgs() << "wait for second round\n");
+ LLVM_DEBUG(dbgs() << "wait for second round\n");
NewVRegs.push_back(VirtReg.reg);
return 0;
}
@@ -3042,7 +3050,7 @@ unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
// We would need a deep integration with the spiller to do the
// right thing here. Anyway, that is still good for early testing.
setStage(VirtReg, RS_Memory);
- DEBUG(dbgs() << "Do as if this register is in memory\n");
+ LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n");
NewVRegs.push_back(VirtReg.reg);
} else {
NamedRegionTimer T("spill", "Spiller", TimerGroupName,
@@ -3128,8 +3136,8 @@ void RAGreedy::reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads,
}
bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
- DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
- << "********** Function: " << mf.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
+ << "********** Function: " << mf.getName() << '\n');
MF = &mf;
TRI = MF->getSubtarget().getRegisterInfo();
@@ -3164,7 +3172,7 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
calculateSpillWeightsAndHints(*LIS, mf, VRM, *Loops, *MBFI);
- DEBUG(LIS->dump());
+ LLVM_DEBUG(LIS->dump());
SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI));
diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp
index 879330a0b4d9..c19001c8403d 100644
--- a/lib/CodeGen/RegAllocPBQP.cpp
+++ b/lib/CodeGen/RegAllocPBQP.cpp
@@ -685,8 +685,8 @@ void RegAllocPBQP::spillVReg(unsigned VReg,
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
(void)TRI;
- DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> SPILLED (Cost: "
- << LRE.getParent().weight << ", New vregs: ");
+ LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> SPILLED (Cost: "
+ << LRE.getParent().weight << ", New vregs: ");
// Copy any newly inserted live intervals into the list of regs to
// allocate.
@@ -694,11 +694,11 @@ void RegAllocPBQP::spillVReg(unsigned VReg,
I != E; ++I) {
const LiveInterval &LI = LIS.getInterval(*I);
assert(!LI.empty() && "Empty spill range.");
- DEBUG(dbgs() << printReg(LI.reg, &TRI) << " ");
+ LLVM_DEBUG(dbgs() << printReg(LI.reg, &TRI) << " ");
VRegsToAlloc.insert(LI.reg);
}
- DEBUG(dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << ")\n");
}
bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
@@ -724,8 +724,8 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAGraph &G,
if (AllocOption != PBQP::RegAlloc::getSpillOptionIdx()) {
unsigned PReg = G.getNodeMetadata(NId).getAllowedRegs()[AllocOption - 1];
- DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> "
- << TRI.getName(PReg) << "\n");
+ LLVM_DEBUG(dbgs() << "VREG " << printReg(VReg, &TRI) << " -> "
+ << TRI.getName(PReg) << "\n");
assert(PReg != 0 && "Invalid preg selected.");
VRM.assignVirt2Phys(VReg, PReg);
} else {
@@ -801,7 +801,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
MF.getRegInfo().freezeReservedRegs(MF);
- DEBUG(dbgs() << "PBQP Register Allocating for " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "PBQP Register Allocating for " << MF.getName() << "\n");
// Allocator main loop:
//
@@ -836,7 +836,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
unsigned Round = 0;
while (!PBQPAllocComplete) {
- DEBUG(dbgs() << " PBQP Regalloc round " << Round << ":\n");
+ LLVM_DEBUG(dbgs() << " PBQP Regalloc round " << Round << ":\n");
PBQPRAGraph G(PBQPRAGraph::GraphMetadata(MF, LIS, MBFI));
initializeGraph(G, VRM, *VRegSpiller);
@@ -850,8 +850,8 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
".pbqpgraph";
std::error_code EC;
raw_fd_ostream OS(GraphFileName, EC, sys::fs::F_Text);
- DEBUG(dbgs() << "Dumping graph for round " << Round << " to \""
- << GraphFileName << "\"\n");
+ LLVM_DEBUG(dbgs() << "Dumping graph for round " << Round << " to \""
+ << GraphFileName << "\"\n");
G.dump(OS);
}
#endif
@@ -868,7 +868,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
VRegsToAlloc.clear();
EmptyIntervalVRegs.clear();
- DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << VRM << "\n");
+ LLVM_DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << VRM << "\n");
return true;
}
diff --git a/lib/CodeGen/RegUsageInfoCollector.cpp b/lib/CodeGen/RegUsageInfoCollector.cpp
index d934fa9f57d9..6a0f26b346c9 100644
--- a/lib/CodeGen/RegUsageInfoCollector.cpp
+++ b/lib/CodeGen/RegUsageInfoCollector.cpp
@@ -83,9 +83,9 @@ bool RegUsageInfoCollector::runOnMachineFunction(MachineFunction &MF) {
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const TargetMachine &TM = MF.getTarget();
- DEBUG(dbgs() << " -------------------- " << getPassName()
- << " -------------------- \n");
- DEBUG(dbgs() << "Function Name : " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << " -------------------- " << getPassName()
+ << " -------------------- \n");
+ LLVM_DEBUG(dbgs() << "Function Name : " << MF.getName() << "\n");
std::vector<uint32_t> RegMask;
@@ -101,7 +101,7 @@ bool RegUsageInfoCollector::runOnMachineFunction(MachineFunction &MF) {
PRUI->setTargetMachine(&TM);
- DEBUG(dbgs() << "Clobbered Registers: ");
+ LLVM_DEBUG(dbgs() << "Clobbered Registers: ");
const BitVector &UsedPhysRegsMask = MRI->getUsedPhysRegsMask();
auto SetRegAsDefined = [&RegMask] (unsigned Reg) {
@@ -134,15 +134,15 @@ bool RegUsageInfoCollector::runOnMachineFunction(MachineFunction &MF) {
}
} else {
++NumCSROpt;
- DEBUG(dbgs() << MF.getName()
- << " function optimized for not having CSR.\n");
+ LLVM_DEBUG(dbgs() << MF.getName()
+ << " function optimized for not having CSR.\n");
}
for (unsigned PReg = 1, PRegE = TRI->getNumRegs(); PReg < PRegE; ++PReg)
if (MachineOperand::clobbersPhysReg(&(RegMask[0]), PReg))
- DEBUG(dbgs() << printReg(PReg, TRI) << " ");
+ LLVM_DEBUG(dbgs() << printReg(PReg, TRI) << " ");
- DEBUG(dbgs() << " \n----------------------------------------\n");
+ LLVM_DEBUG(dbgs() << " \n----------------------------------------\n");
PRUI->storeUpdateRegUsageInfo(&F, std::move(RegMask));
diff --git a/lib/CodeGen/RegUsageInfoPropagate.cpp b/lib/CodeGen/RegUsageInfoPropagate.cpp
index 5b12d00e126f..7eaeea2eb2cd 100644
--- a/lib/CodeGen/RegUsageInfoPropagate.cpp
+++ b/lib/CodeGen/RegUsageInfoPropagate.cpp
@@ -105,9 +105,9 @@ bool RegUsageInfoPropagationPass::runOnMachineFunction(MachineFunction &MF) {
const Module *M = MF.getFunction().getParent();
PhysicalRegisterUsageInfo *PRUI = &getAnalysis<PhysicalRegisterUsageInfo>();
- DEBUG(dbgs() << " ++++++++++++++++++++ " << getPassName()
- << " ++++++++++++++++++++ \n");
- DEBUG(dbgs() << "MachineFunction : " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << " ++++++++++++++++++++ " << getPassName()
+ << " ++++++++++++++++++++ \n");
+ LLVM_DEBUG(dbgs() << "MachineFunction : " << MF.getName() << "\n");
const MachineFrameInfo &MFI = MF.getFrameInfo();
if (!MFI.hasCalls() && !MFI.hasTailCall())
@@ -119,9 +119,10 @@ bool RegUsageInfoPropagationPass::runOnMachineFunction(MachineFunction &MF) {
for (MachineInstr &MI : MBB) {
if (!MI.isCall())
continue;
- DEBUG(dbgs()
- << "Call Instruction Before Register Usage Info Propagation : \n");
- DEBUG(dbgs() << MI << "\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Call Instruction Before Register Usage Info Propagation : \n");
+ LLVM_DEBUG(dbgs() << MI << "\n");
auto UpdateRegMask = [&](const Function *F) {
const auto *RegMask = PRUI->getRegUsageInfo(F);
@@ -134,15 +135,17 @@ bool RegUsageInfoPropagationPass::runOnMachineFunction(MachineFunction &MF) {
if (const Function *F = findCalledFunction(*M, MI)) {
UpdateRegMask(F);
} else {
- DEBUG(dbgs() << "Failed to find call target function\n");
+ LLVM_DEBUG(dbgs() << "Failed to find call target function\n");
}
- DEBUG(dbgs() << "Call Instruction After Register Usage Info Propagation : "
- << MI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "Call Instruction After Register Usage Info Propagation : "
+ << MI << '\n');
}
}
- DEBUG(dbgs() << " +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
- "++++++ \n");
+ LLVM_DEBUG(
+ dbgs() << " +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
+ "++++++ \n");
return Changed;
}
diff --git a/lib/CodeGen/RegisterClassInfo.cpp b/lib/CodeGen/RegisterClassInfo.cpp
index 5df32db4d990..add8faec97d4 100644
--- a/lib/CodeGen/RegisterClassInfo.cpp
+++ b/lib/CodeGen/RegisterClassInfo.cpp
@@ -151,7 +151,7 @@ void RegisterClassInfo::compute(const TargetRegisterClass *RC) const {
RCI.MinCost = uint8_t(MinCost);
RCI.LastCostChange = LastCostChange;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "AllocationOrder(" << TRI->getRegClassName(RC) << ") = [";
for (unsigned I = 0; I != RCI.NumRegs; ++I)
dbgs() << ' ' << printReg(RCI.Order[I], TRI);
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index a9565fae8f1c..db5258e62700 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -568,7 +568,7 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
// in IntB, we can merge them.
if (ValS+1 != BS) return false;
- DEBUG(dbgs() << "Extending: " << printReg(IntB.reg, TRI));
+ LLVM_DEBUG(dbgs() << "Extending: " << printReg(IntB.reg, TRI));
SlotIndex FillerStart = ValS->end, FillerEnd = BS->start;
// We are about to delete CopyMI, so need to remove it as the 'instruction
@@ -594,7 +594,7 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
S.MergeValueNumberInto(SubBValNo, SubValSNo);
}
- DEBUG(dbgs() << " result = " << IntB << '\n');
+ LLVM_DEBUG(dbgs() << " result = " << IntB << '\n');
// If the source instruction was killing the source register before the
// merge, unset the isKill marker given the live range has been extended.
@@ -742,8 +742,8 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
return false;
}
- DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t'
- << *DefMI);
+ LLVM_DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t'
+ << *DefMI);
// At this point we have decided that it is legal to do this
// transformation. Start by commuting the instruction.
@@ -812,7 +812,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
if (!DVNI)
continue;
- DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
+ LLVM_DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
assert(DVNI->def == DefIdx);
BValNo = IntB.MergeValueNumberInto(DVNI, BValNo);
for (LiveInterval::SubRange &S : IntB.subranges()) {
@@ -853,11 +853,11 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
BValNo->def = AValNo->def;
addSegmentsWithValNo(IntB, BValNo, IntA, AValNo);
- DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
+ LLVM_DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
LIS->removeVRegDefAt(IntA, AValNo->def);
- DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n');
+ LLVM_DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n');
++numCommutes;
return true;
}
@@ -1003,8 +1003,8 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
return false;
}
- DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to "
- << printMBBReference(*CopyLeftBB) << '\t' << CopyMI);
+ LLVM_DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to "
+ << printMBBReference(*CopyLeftBB) << '\t' << CopyMI);
// Insert new copy to CopyLeftBB.
MachineInstr *NewCopyMI = BuildMI(*CopyLeftBB, InsPos, CopyMI.getDebugLoc(),
@@ -1021,8 +1021,8 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
// the deleted list.
ErasedInstrs.erase(NewCopyMI);
} else {
- DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from "
- << printMBBReference(MBB) << '\t' << CopyMI);
+ LLVM_DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from "
+ << printMBBReference(MBB) << '\t' << CopyMI);
}
// Remove CopyMI.
@@ -1277,8 +1277,9 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
bool UpdatedSubRanges = false;
for (LiveInterval::SubRange &SR : DstInt.subranges()) {
if ((SR.LaneMask & DstMask).none()) {
- DEBUG(dbgs() << "Removing undefined SubRange "
- << PrintLaneMask(SR.LaneMask) << " : " << SR << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Removing undefined SubRange "
+ << PrintLaneMask(SR.LaneMask) << " : " << SR << "\n");
// VNI is in ValNo - remove any segments in this SubRange that have this ValNo
if (VNInfo *RmValNo = SR.getVNInfoAt(CurrIdx.getRegSlot())) {
SR.removeValNo(RmValNo);
@@ -1335,7 +1336,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator());
}
- DEBUG(dbgs() << "Remat: " << NewMI);
+ LLVM_DEBUG(dbgs() << "Remat: " << NewMI);
++NumReMats;
// The source interval can become smaller because we removed a use.
@@ -1350,7 +1351,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// Move the debug value directly after the def of the rematerialized
// value in DstReg.
MBB->splice(std::next(NewMI.getIterator()), UseMI->getParent(), UseMI);
- DEBUG(dbgs() << "\t\tupdated: " << *UseMI);
+ LLVM_DEBUG(dbgs() << "\t\tupdated: " << *UseMI);
}
}
eliminateDeadDefs();
@@ -1388,7 +1389,7 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
} else if (SrcLI.liveAt(Idx))
return false;
- DEBUG(dbgs() << "\tEliminating copy of <undef> value\n");
+ LLVM_DEBUG(dbgs() << "\tEliminating copy of <undef> value\n");
// Remove any DstReg segments starting at the instruction.
LiveInterval &DstLI = LIS->getInterval(DstReg);
@@ -1435,7 +1436,7 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
if (isLive)
continue;
MO.setIsUndef(true);
- DEBUG(dbgs() << "\tnew undef: " << UseIdx << '\t' << MI);
+ LLVM_DEBUG(dbgs() << "\tnew undef: " << UseIdx << '\t' << MI);
}
// A def of a subregister may be a use of the other subregisters, so
@@ -1550,12 +1551,12 @@ void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg,
MO.substVirtReg(DstReg, SubIdx, *TRI);
}
- DEBUG({
- dbgs() << "\t\tupdated: ";
- if (!UseMI->isDebugValue())
- dbgs() << LIS->getInstructionIndex(*UseMI) << "\t";
- dbgs() << *UseMI;
- });
+ LLVM_DEBUG({
+ dbgs() << "\t\tupdated: ";
+ if (!UseMI->isDebugValue())
+ dbgs() << LIS->getInstructionIndex(*UseMI) << "\t";
+ dbgs() << *UseMI;
+ });
}
}
@@ -1564,7 +1565,7 @@ bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) {
// reserved register. This doesn't increase register pressure, so it is
// always beneficial.
if (!MRI->isReserved(CP.getDstReg())) {
- DEBUG(dbgs() << "\tCan only merge into reserved registers.\n");
+ LLVM_DEBUG(dbgs() << "\tCan only merge into reserved registers.\n");
return false;
}
@@ -1572,17 +1573,18 @@ bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) {
if (JoinVInt.containsOneValue())
return true;
- DEBUG(dbgs() << "\tCannot join complex intervals into reserved register.\n");
+ LLVM_DEBUG(
+ dbgs() << "\tCannot join complex intervals into reserved register.\n");
return false;
}
bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
Again = false;
- DEBUG(dbgs() << LIS->getInstructionIndex(*CopyMI) << '\t' << *CopyMI);
+ LLVM_DEBUG(dbgs() << LIS->getInstructionIndex(*CopyMI) << '\t' << *CopyMI);
CoalescerPair CP(*TRI);
if (!CP.setRegisters(CopyMI)) {
- DEBUG(dbgs() << "\tNot coalescable.\n");
+ LLVM_DEBUG(dbgs() << "\tNot coalescable.\n");
return false;
}
@@ -1597,7 +1599,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
}
if (!TRI->shouldCoalesce(CopyMI, SrcRC, SrcIdx, DstRC, DstIdx,
CP.getNewRC(), *LIS)) {
- DEBUG(dbgs() << "\tSubtarget bailed on coalescing.\n");
+ LLVM_DEBUG(dbgs() << "\tSubtarget bailed on coalescing.\n");
return false;
}
}
@@ -1606,7 +1608,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
// sometimes dead copies slip through, and we can't generate invalid live
// ranges.
if (!CP.isPhys() && CopyMI->allDefsAreDead()) {
- DEBUG(dbgs() << "\tCopy is dead.\n");
+ LLVM_DEBUG(dbgs() << "\tCopy is dead.\n");
DeadDefs.push_back(CopyMI);
eliminateDeadDefs();
return true;
@@ -1623,7 +1625,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
// When that happens, just join the values and remove the copy.
if (CP.getSrcReg() == CP.getDstReg()) {
LiveInterval &LI = LIS->getInterval(CP.getSrcReg());
- DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n');
+ LLVM_DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n');
const SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI);
LiveQueryResult LRQ = LI.Query(CopyIdx);
if (VNInfo *DefVNI = LRQ.valueDefined()) {
@@ -1640,7 +1642,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
S.MergeValueNumberInto(SDefVNI, SReadVNI);
}
}
- DEBUG(dbgs() << "\tMerged values: " << LI << '\n');
+ LLVM_DEBUG(dbgs() << "\tMerged values: " << LI << '\n');
}
deleteInstr(CopyMI);
return true;
@@ -1648,9 +1650,9 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
// Enforce policies.
if (CP.isPhys()) {
- DEBUG(dbgs() << "\tConsidering merging " << printReg(CP.getSrcReg(), TRI)
- << " with " << printReg(CP.getDstReg(), TRI, CP.getSrcIdx())
- << '\n');
+ LLVM_DEBUG(dbgs() << "\tConsidering merging "
+ << printReg(CP.getSrcReg(), TRI) << " with "
+ << printReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n');
if (!canJoinPhys(CP)) {
// Before giving up coalescing, if definition of source is defined by
// trivial computation, try rematerializing it.
@@ -1667,7 +1669,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
LIS->getInterval(CP.getDstReg()).size())
CP.flip();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\tConsidering merging to "
<< TRI->getRegClassName(CP.getNewRC()) << " with ";
if (CP.getDstIdx() && CP.getSrcIdx())
@@ -1703,7 +1705,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
if (adjustCopiesBackFrom(CP, CopyMI) ||
removeCopyByCommutingDef(CP, CopyMI)) {
deleteInstr(CopyMI);
- DEBUG(dbgs() << "\tTrivial!\n");
+ LLVM_DEBUG(dbgs() << "\tTrivial!\n");
return true;
}
}
@@ -1715,7 +1717,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
return true;
// Otherwise, we are unable to join the intervals.
- DEBUG(dbgs() << "\tInterference!\n");
+ LLVM_DEBUG(dbgs() << "\tInterference!\n");
Again = true; // May be possible to coalesce later.
return false;
}
@@ -1749,8 +1751,8 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
for (LiveInterval::SubRange &S : LI.subranges()) {
if ((S.LaneMask & ShrinkMask).none())
continue;
- DEBUG(dbgs() << "Shrink LaneUses (Lane " << PrintLaneMask(S.LaneMask)
- << ")\n");
+ LLVM_DEBUG(dbgs() << "Shrink LaneUses (Lane " << PrintLaneMask(S.LaneMask)
+ << ")\n");
LIS->shrinkToUses(S, LI.reg);
}
LI.removeEmptySubRanges();
@@ -1767,7 +1769,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
// Update regalloc hint.
TRI->updateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *MF);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\tSuccess: " << printReg(CP.getSrcReg(), TRI, CP.getSrcIdx())
<< " -> " << printReg(CP.getDstReg(), TRI, CP.getDstIdx()) << '\n';
dbgs() << "\tResult = ";
@@ -1788,7 +1790,7 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
assert(CP.isPhys() && "Must be a physreg copy");
assert(MRI->isReserved(DstReg) && "Not a reserved register");
LiveInterval &RHS = LIS->getInterval(SrcReg);
- DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n');
assert(RHS.containsOneValue() && "Invalid join with reserved register");
@@ -1807,7 +1809,8 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
return false;
}
if (RHS.overlaps(LIS->getRegUnit(*UI))) {
- DEBUG(dbgs() << "\t\tInterference: " << printRegUnit(*UI, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tInterference: " << printRegUnit(*UI, TRI)
+ << '\n');
return false;
}
}
@@ -1816,7 +1819,7 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
BitVector RegMaskUsable;
if (LIS->checkRegMaskInterference(RHS, RegMaskUsable) &&
!RegMaskUsable.test(DstReg)) {
- DEBUG(dbgs() << "\t\tRegMask interference\n");
+ LLVM_DEBUG(dbgs() << "\t\tRegMask interference\n");
return false;
}
}
@@ -1846,12 +1849,12 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
// %y = def
// ...
if (!MRI->hasOneNonDBGUse(SrcReg)) {
- DEBUG(dbgs() << "\t\tMultiple vreg uses!\n");
+ LLVM_DEBUG(dbgs() << "\t\tMultiple vreg uses!\n");
return false;
}
if (!LIS->intervalIsInOneMBB(RHS)) {
- DEBUG(dbgs() << "\t\tComplex control flow!\n");
+ LLVM_DEBUG(dbgs() << "\t\tComplex control flow!\n");
return false;
}
@@ -1869,7 +1872,7 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
SI != CopyRegIdx; SI = Indexes->getNextNonNullIndex(SI)) {
MachineInstr *MI = LIS->getInstructionFromIndex(SI);
if (MI->readsRegister(DstReg, TRI)) {
- DEBUG(dbgs() << "\t\tInterference (read): " << *MI);
+ LLVM_DEBUG(dbgs() << "\t\tInterference (read): " << *MI);
return false;
}
}
@@ -1877,8 +1880,8 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
// We're going to remove the copy which defines a physical reserved
// register, so remove its valno, etc.
- DEBUG(dbgs() << "\t\tRemoving phys reg def of " << printReg(DstReg, TRI)
- << " at " << CopyRegIdx << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tRemoving phys reg def of "
+ << printReg(DstReg, TRI) << " at " << CopyRegIdx << "\n");
LIS->removePhysRegDefAt(DstReg, CopyRegIdx);
// Create a new dead def at the new def location.
@@ -2386,9 +2389,10 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
// to erase the IMPLICIT_DEF instruction.
if (OtherV.ErasableImplicitDef && DefMI &&
DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) {
- DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def
- << " extends into " << printMBBReference(*DefMI->getParent())
- << ", keeping it.\n");
+ LLVM_DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def
+ << " extends into "
+ << printMBBReference(*DefMI->getParent())
+ << ", keeping it.\n");
OtherV.ErasableImplicitDef = false;
}
@@ -2498,11 +2502,11 @@ void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) {
assert(V.OtherVNI && "OtherVNI not assigned, can't merge.");
assert(Other.Vals[V.OtherVNI->id].isAnalyzed() && "Missing recursion");
Assignments[ValNo] = Other.Assignments[V.OtherVNI->id];
- DEBUG(dbgs() << "\t\tmerge " << printReg(Reg) << ':' << ValNo << '@'
- << LR.getValNumInfo(ValNo)->def << " into "
- << printReg(Other.Reg) << ':' << V.OtherVNI->id << '@'
- << V.OtherVNI->def << " --> @"
- << NewVNInfo[Assignments[ValNo]]->def << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tmerge " << printReg(Reg) << ':' << ValNo << '@'
+ << LR.getValNumInfo(ValNo)->def << " into "
+ << printReg(Other.Reg) << ':' << V.OtherVNI->id << '@'
+ << V.OtherVNI->def << " --> @"
+ << NewVNInfo[Assignments[ValNo]]->def << '\n');
break;
case CR_Replace:
case CR_Unresolved: {
@@ -2528,8 +2532,8 @@ bool JoinVals::mapValues(JoinVals &Other) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
computeAssignment(i, Other);
if (Vals[i].Resolution == CR_Impossible) {
- DEBUG(dbgs() << "\t\tinterference at " << printReg(Reg) << ':' << i
- << '@' << LR.getValNumInfo(i)->def << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tinterference at " << printReg(Reg) << ':' << i
+ << '@' << LR.getValNumInfo(i)->def << '\n');
return false;
}
}
@@ -2551,13 +2555,13 @@ taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other,
// lanes escape the block.
SlotIndex End = OtherI->end;
if (End >= MBBEnd) {
- DEBUG(dbgs() << "\t\ttaints global " << printReg(Other.Reg) << ':'
- << OtherI->valno->id << '@' << OtherI->start << '\n');
+ LLVM_DEBUG(dbgs() << "\t\ttaints global " << printReg(Other.Reg) << ':'
+ << OtherI->valno->id << '@' << OtherI->start << '\n');
return false;
}
- DEBUG(dbgs() << "\t\ttaints local " << printReg(Other.Reg) << ':'
- << OtherI->valno->id << '@' << OtherI->start
- << " to " << End << '\n');
+ LLVM_DEBUG(dbgs() << "\t\ttaints local " << printReg(Other.Reg) << ':'
+ << OtherI->valno->id << '@' << OtherI->start << " to "
+ << End << '\n');
// A dead def is not a problem.
if (End.isDead())
break;
@@ -2598,8 +2602,8 @@ bool JoinVals::resolveConflicts(JoinVals &Other) {
assert(V.Resolution != CR_Impossible && "Unresolvable conflict");
if (V.Resolution != CR_Unresolved)
continue;
- DEBUG(dbgs() << "\t\tconflict at " << printReg(Reg) << ':' << i
- << '@' << LR.getValNumInfo(i)->def << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tconflict at " << printReg(Reg) << ':' << i << '@'
+ << LR.getValNumInfo(i)->def << '\n');
if (SubRangeJoin)
return false;
@@ -2636,7 +2640,7 @@ bool JoinVals::resolveConflicts(JoinVals &Other) {
while (true) {
assert(MI != MBB->end() && "Bad LastMI");
if (usesLanes(*MI, Other.Reg, Other.SubIdx, TaintedLanes)) {
- DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI);
+ LLVM_DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI);
return false;
}
// LastMI is the last instruction to use the current value.
@@ -2709,8 +2713,8 @@ void JoinVals::pruneValues(JoinVals &Other,
if (!EraseImpDef)
EndPoints.push_back(Def);
}
- DEBUG(dbgs() << "\t\tpruned " << printReg(Other.Reg) << " at " << Def
- << ": " << Other.LR << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tpruned " << printReg(Other.Reg) << " at " << Def
+ << ": " << Other.LR << '\n');
break;
}
case CR_Erase:
@@ -2721,8 +2725,8 @@ void JoinVals::pruneValues(JoinVals &Other,
// computeAssignment(), the value that was originally copied could have
// been replaced.
LIS->pruneValue(LR, Def, &EndPoints);
- DEBUG(dbgs() << "\t\tpruned all of " << printReg(Reg) << " at "
- << Def << ": " << LR << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tpruned all of " << printReg(Reg) << " at "
+ << Def << ": " << LR << '\n');
}
break;
case CR_Unresolved:
@@ -2746,7 +2750,8 @@ void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) {
// Check subranges at the point where the copy will be removed.
SlotIndex Def = LR.getValNumInfo(i)->def;
// Print message so mismatches with eraseInstrs() can be diagnosed.
- DEBUG(dbgs() << "\t\tExpecting instruction removal at " << Def << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tExpecting instruction removal at " << Def
+ << '\n');
for (LiveInterval::SubRange &S : LI.subranges()) {
LiveQueryResult Q = S.Query(Def);
@@ -2754,8 +2759,8 @@ void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) {
// copied and we must remove that subrange value as well.
VNInfo *ValueOut = Q.valueOutOrDead();
if (ValueOut != nullptr && Q.valueIn() == nullptr) {
- DEBUG(dbgs() << "\t\tPrune sublane " << PrintLaneMask(S.LaneMask)
- << " at " << Def << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tPrune sublane " << PrintLaneMask(S.LaneMask)
+ << " at " << Def << "\n");
LIS->pruneValue(S, Def, nullptr);
DidPrune = true;
// Mark value number as unused.
@@ -2765,8 +2770,9 @@ void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) {
// If a subrange ends at the copy, then a value was copied but only
// partially used later. Shrink the subregister range appropriately.
if (Q.valueIn() != nullptr && Q.valueOut() == nullptr) {
- DEBUG(dbgs() << "\t\tDead uses at sublane " << PrintLaneMask(S.LaneMask)
- << " at " << Def << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tDead uses at sublane "
+ << PrintLaneMask(S.LaneMask) << " at " << Def
+ << "\n");
ShrinkMask |= S.LaneMask;
}
}
@@ -2878,7 +2884,7 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
std::prev(S)->end = NewEnd;
}
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\t\tremoved " << i << '@' << Def << ": " << LR << '\n';
if (LI != nullptr)
dbgs() << "\t\t LHS = " << *LI << '\n';
@@ -2896,7 +2902,7 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
ShrinkRegs.push_back(Reg);
}
ErasedInstrs.insert(MI);
- DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI);
LIS->RemoveMachineInstrFromMaps(*MI);
MI->eraseFromParent();
break;
@@ -2951,13 +2957,13 @@ void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
LRange.join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
NewVNInfo);
- DEBUG(dbgs() << "\t\tjoined lanes: " << LRange << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tjoined lanes: " << LRange << "\n");
if (EndPoints.empty())
return;
// Recompute the parts of the live range we had to remove because of
// CR_Replace conflicts.
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";
for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {
dbgs() << EndPoints[i];
@@ -2996,9 +3002,7 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
JoinVals LHSVals(LHS, CP.getDstReg(), CP.getDstIdx(), LaneBitmask::getNone(),
NewVNInfo, CP, LIS, TRI, false, TrackSubRegLiveness);
- DEBUG(dbgs() << "\t\tRHS = " << RHS
- << "\n\t\tLHS = " << LHS
- << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tRHS = " << RHS << "\n\t\tLHS = " << LHS << '\n');
// First compute NewVNInfo and the simple value mappings.
// Detect impossible conflicts early.
@@ -3029,8 +3033,8 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
R.LaneMask = Mask;
}
}
- DEBUG(dbgs() << "\t\tLHST = " << printReg(CP.getDstReg())
- << ' ' << LHS << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tLHST = " << printReg(CP.getDstReg()) << ' ' << LHS
+ << '\n');
// Determine lanemasks of RHS in the coalesced register and merge subranges.
unsigned SrcIdx = CP.getSrcIdx();
@@ -3045,7 +3049,7 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
mergeSubRangeInto(LHS, R, Mask, CP);
}
}
- DEBUG(dbgs() << "\tJoined SubRanges " << LHS << "\n");
+ LLVM_DEBUG(dbgs() << "\tJoined SubRanges " << LHS << "\n");
// Pruning implicit defs from subranges may result in the main range
// having stale segments.
@@ -3083,7 +3087,7 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
if (!EndPoints.empty()) {
// Recompute the parts of the live range we had to remove because of
// CR_Replace conflicts.
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";
for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {
dbgs() << EndPoints[i];
@@ -3231,7 +3235,8 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
continue;
// Check that OtherReg interfere with DstReg.
if (LIS->getInterval(OtherReg).overlaps(DstLI)) {
- DEBUG(dbgs() << "Apply terminal rule for: " << printReg(DstReg) << '\n');
+ LLVM_DEBUG(dbgs() << "Apply terminal rule for: " << printReg(DstReg)
+ << '\n');
return true;
}
}
@@ -3240,7 +3245,7 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
void
RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << MBB->getName() << ":\n");
+ LLVM_DEBUG(dbgs() << MBB->getName() << ":\n");
// Collect all copy-like instructions in MBB. Don't start coalescing anything
// yet, it might invalidate the iterator.
@@ -3305,7 +3310,7 @@ void RegisterCoalescer::coalesceLocals() {
}
void RegisterCoalescer::joinAllIntervals() {
- DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
+ LLVM_DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
assert(WorkList.empty() && LocalWorkList.empty() && "Old data still around.");
std::vector<MBBPriorityInfo> MBBs;
@@ -3361,8 +3366,8 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
// splitting optimization.
JoinSplitEdges = EnableJoinSplits;
- DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
- << "********** Function: " << MF->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
+ << "********** Function: " << MF->getName() << '\n');
if (VerifyCoalescing)
MF->verify(this, "Before register coalescing");
@@ -3379,14 +3384,15 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
array_pod_sort(InflateRegs.begin(), InflateRegs.end());
InflateRegs.erase(std::unique(InflateRegs.begin(), InflateRegs.end()),
InflateRegs.end());
- DEBUG(dbgs() << "Trying to inflate " << InflateRegs.size() << " regs.\n");
+ LLVM_DEBUG(dbgs() << "Trying to inflate " << InflateRegs.size()
+ << " regs.\n");
for (unsigned i = 0, e = InflateRegs.size(); i != e; ++i) {
unsigned Reg = InflateRegs[i];
if (MRI->reg_nodbg_empty(Reg))
continue;
if (MRI->recomputeRegClass(Reg)) {
- DEBUG(dbgs() << printReg(Reg) << " inflated to "
- << TRI->getRegClassName(MRI->getRegClass(Reg)) << '\n');
+ LLVM_DEBUG(dbgs() << printReg(Reg) << " inflated to "
+ << TRI->getRegClassName(MRI->getRegClass(Reg)) << '\n');
++NumInflated;
LiveInterval &LI = LIS->getInterval(Reg);
@@ -3409,7 +3415,7 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
}
}
- DEBUG(dump());
+ LLVM_DEBUG(dump());
if (VerifyCoalescing)
MF->verify(this, "After register coalescing");
return true;
diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp
index 9fa14c9dc5eb..d7cd5a096ef0 100644
--- a/lib/CodeGen/RegisterScavenging.cpp
+++ b/lib/CodeGen/RegisterScavenging.cpp
@@ -288,8 +288,8 @@ bool RegScavenger::isRegUsed(unsigned Reg, bool includeReserved) const {
unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
for (unsigned Reg : *RC) {
if (!isRegUsed(Reg)) {
- DEBUG(dbgs() << "Scavenger found unused reg: " << printReg(Reg, TRI)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Scavenger found unused reg: " << printReg(Reg, TRI)
+ << "\n");
return Reg;
}
}
@@ -561,15 +561,15 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// If we found an unused register there is no reason to spill it.
if (!isRegUsed(SReg)) {
- DEBUG(dbgs() << "Scavenged register: " << printReg(SReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Scavenged register: " << printReg(SReg, TRI) << "\n");
return SReg;
}
ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI);
Scavenged.Restore = &*std::prev(UseMI);
- DEBUG(dbgs() << "Scavenged register (with spill): " << printReg(SReg, TRI)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Scavenged register (with spill): "
+ << printReg(SReg, TRI) << "\n");
return SReg;
}
@@ -594,14 +594,15 @@ unsigned RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
MachineBasicBlock::iterator ReloadAfter =
RestoreAfter ? std::next(MBBI) : MBBI;
MachineBasicBlock::iterator ReloadBefore = std::next(ReloadAfter);
- DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n');
+ LLVM_DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n');
ScavengedInfo &Scavenged = spill(Reg, RC, SPAdj, SpillBefore, ReloadBefore);
Scavenged.Restore = &*std::prev(SpillBefore);
LiveUnits.removeReg(Reg);
- DEBUG(dbgs() << "Scavenged register with spill: " << printReg(Reg, TRI)
- << " until " << *SpillBefore);
+ LLVM_DEBUG(dbgs() << "Scavenged register with spill: " << printReg(Reg, TRI)
+ << " until " << *SpillBefore);
} else {
- DEBUG(dbgs() << "Scavenged free register: " << printReg(Reg, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Scavenged free register: " << printReg(Reg, TRI)
+ << '\n');
}
return Reg;
}
@@ -757,8 +758,8 @@ void llvm::scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS) {
bool Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
if (Again) {
- DEBUG(dbgs() << "Warning: Required two scavenging passes for block "
- << MBB.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Warning: Required two scavenging passes for block "
+ << MBB.getName() << '\n');
Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
// The target required a 2nd run (because it created new vregs while
// spilling). Refuse to do another pass to keep compiletime in check.
diff --git a/lib/CodeGen/RenameIndependentSubregs.cpp b/lib/CodeGen/RenameIndependentSubregs.cpp
index e25e53a24b5c..7305091227d6 100644
--- a/lib/CodeGen/RenameIndependentSubregs.cpp
+++ b/lib/CodeGen/RenameIndependentSubregs.cpp
@@ -134,17 +134,17 @@ bool RenameIndependentSubregs::renameComponents(LiveInterval &LI) const {
const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
SmallVector<LiveInterval*, 4> Intervals;
Intervals.push_back(&LI);
- DEBUG(dbgs() << printReg(Reg) << ": Found " << Classes.getNumClasses()
- << " equivalence classes.\n");
- DEBUG(dbgs() << printReg(Reg) << ": Splitting into newly created:");
+ LLVM_DEBUG(dbgs() << printReg(Reg) << ": Found " << Classes.getNumClasses()
+ << " equivalence classes.\n");
+ LLVM_DEBUG(dbgs() << printReg(Reg) << ": Splitting into newly created:");
for (unsigned I = 1, NumClasses = Classes.getNumClasses(); I < NumClasses;
++I) {
unsigned NewVReg = MRI->createVirtualRegister(RegClass);
LiveInterval &NewLI = LIS->createEmptyInterval(NewVReg);
Intervals.push_back(&NewLI);
- DEBUG(dbgs() << ' ' << printReg(NewVReg));
+ LLVM_DEBUG(dbgs() << ' ' << printReg(NewVReg));
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
rewriteOperands(Classes, SubRangeInfos, Intervals);
distribute(Classes, SubRangeInfos, Intervals);
@@ -376,8 +376,8 @@ bool RenameIndependentSubregs::runOnMachineFunction(MachineFunction &MF) {
if (!MRI->subRegLivenessEnabled())
return false;
- DEBUG(dbgs() << "Renaming independent subregister live ranges in "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Renaming independent subregister live ranges in "
+ << MF.getName() << '\n');
LIS = &getAnalysis<LiveIntervals>();
TII = MF.getSubtarget().getInstrInfo();
diff --git a/lib/CodeGen/ResetMachineFunctionPass.cpp b/lib/CodeGen/ResetMachineFunctionPass.cpp
index 72b631baf94a..dbc4581c7a94 100644
--- a/lib/CodeGen/ResetMachineFunctionPass.cpp
+++ b/lib/CodeGen/ResetMachineFunctionPass.cpp
@@ -55,7 +55,7 @@ namespace {
MachineFunctionProperties::Property::FailedISel)) {
if (AbortOnFailedISel)
report_fatal_error("Instruction selection failed");
- DEBUG(dbgs() << "Resetting: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Resetting: " << MF.getName() << '\n');
++NumFunctionsReset;
MF.reset();
if (EmitFallbackDiag) {
diff --git a/lib/CodeGen/SafeStack.cpp b/lib/CodeGen/SafeStack.cpp
index 3475bae4990c..1e8de15efeae 100644
--- a/lib/CodeGen/SafeStack.cpp
+++ b/lib/CodeGen/SafeStack.cpp
@@ -242,16 +242,17 @@ bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
bool Safe = AllocaRange.contains(AccessRange);
- DEBUG(dbgs() << "[SafeStack] "
- << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
- << *AllocaPtr << "\n"
- << " Access " << *Addr << "\n"
- << " SCEV " << *Expr
- << " U: " << SE.getUnsignedRange(Expr)
- << ", S: " << SE.getSignedRange(Expr) << "\n"
- << " Range " << AccessRange << "\n"
- << " AllocaRange " << AllocaRange << "\n"
- << " " << (Safe ? "safe" : "unsafe") << "\n");
+ LLVM_DEBUG(
+ dbgs() << "[SafeStack] "
+ << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
+ << *AllocaPtr << "\n"
+ << " Access " << *Addr << "\n"
+ << " SCEV " << *Expr
+ << " U: " << SE.getUnsignedRange(Expr)
+ << ", S: " << SE.getSignedRange(Expr) << "\n"
+ << " Range " << AccessRange << "\n"
+ << " AllocaRange " << AllocaRange << "\n"
+ << " " << (Safe ? "safe" : "unsafe") << "\n");
return Safe;
}
@@ -298,8 +299,9 @@ bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
case Instruction::Store:
if (V == I->getOperand(0)) {
// Stored the pointer - conservatively assume it may be unsafe.
- DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
- << "\n store of address: " << *I << "\n");
+ LLVM_DEBUG(dbgs()
+ << "[SafeStack] Unsafe alloca: " << *AllocaPtr
+ << "\n store of address: " << *I << "\n");
return false;
}
@@ -324,9 +326,9 @@ bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
- DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
- << "\n unsafe memintrinsic: " << *I
- << "\n");
+ LLVM_DEBUG(dbgs()
+ << "[SafeStack] Unsafe alloca: " << *AllocaPtr
+ << "\n unsafe memintrinsic: " << *I << "\n");
return false;
}
continue;
@@ -344,8 +346,8 @@ bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
if (A->get() == V)
if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
CS.doesNotAccessMemory()))) {
- DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
- << "\n unsafe call: " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
+ << "\n unsafe call: " << *I << "\n");
return false;
}
continue;
@@ -829,7 +831,7 @@ bool SafeStack::run() {
TryInlinePointerAddress();
- DEBUG(dbgs() << "[SafeStack] safestack applied\n");
+ LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
return true;
}
@@ -850,17 +852,17 @@ public:
}
bool runOnFunction(Function &F) override {
- DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
if (!F.hasFnAttribute(Attribute::SafeStack)) {
- DEBUG(dbgs() << "[SafeStack] safestack is not requested"
- " for this function\n");
+ LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
+ " for this function\n");
return false;
}
if (F.isDeclaration()) {
- DEBUG(dbgs() << "[SafeStack] function definition"
- " is not available\n");
+ LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
+ " is not available\n");
return false;
}
diff --git a/lib/CodeGen/SafeStackColoring.cpp b/lib/CodeGen/SafeStackColoring.cpp
index 35b0736845a1..329458778a98 100644
--- a/lib/CodeGen/SafeStackColoring.cpp
+++ b/lib/CodeGen/SafeStackColoring.cpp
@@ -102,10 +102,10 @@ void StackColoring::collectMarkers() {
// For each basic block, compute
// * the list of markers in the instruction order
// * the sets of allocas whose lifetime starts or ends in this BB
- DEBUG(dbgs() << "Instructions:\n");
+ LLVM_DEBUG(dbgs() << "Instructions:\n");
unsigned InstNo = 0;
for (BasicBlock *BB : depth_first(&F)) {
- DEBUG(dbgs() << " " << InstNo << ": BB " << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << " " << InstNo << ": BB " << BB->getName() << "\n");
unsigned BBStart = InstNo++;
BlockLifetimeInfo &BlockInfo = BlockLiveness[BB];
@@ -122,9 +122,9 @@ void StackColoring::collectMarkers() {
}
auto ProcessMarker = [&](Instruction *I, const Marker &M) {
- DEBUG(dbgs() << " " << InstNo << ": "
- << (M.IsStart ? "start " : "end ") << M.AllocaNo << ", "
- << *I << "\n");
+ LLVM_DEBUG(dbgs() << " " << InstNo << ": "
+ << (M.IsStart ? "start " : "end ") << M.AllocaNo
+ << ", " << *I << "\n");
BBMarkers[BB].push_back({InstNo, M});
@@ -281,7 +281,7 @@ LLVM_DUMP_METHOD void StackColoring::dumpLiveRanges() {
#endif
void StackColoring::run() {
- DEBUG(dumpAllocas());
+ LLVM_DEBUG(dumpAllocas());
for (unsigned I = 0; I < NumAllocas; ++I)
AllocaNumbering[Allocas[I]] = I;
@@ -304,7 +304,7 @@ void StackColoring::run() {
LiveRanges[I] = getFullLiveRange();
calculateLocalLiveness();
- DEBUG(dumpBlockLiveness());
+ LLVM_DEBUG(dumpBlockLiveness());
calculateLiveIntervals();
- DEBUG(dumpLiveRanges());
+ LLVM_DEBUG(dumpLiveRanges());
}
diff --git a/lib/CodeGen/SafeStackLayout.cpp b/lib/CodeGen/SafeStackLayout.cpp
index 68ccdc277662..07b6a5d1883b 100644
--- a/lib/CodeGen/SafeStackLayout.cpp
+++ b/lib/CodeGen/SafeStackLayout.cpp
@@ -63,30 +63,30 @@ void StackLayout::layoutObject(StackObject &Obj) {
return;
}
- DEBUG(dbgs() << "Layout: size " << Obj.Size << ", align " << Obj.Alignment
- << ", range " << Obj.Range << "\n");
+ LLVM_DEBUG(dbgs() << "Layout: size " << Obj.Size << ", align "
+ << Obj.Alignment << ", range " << Obj.Range << "\n");
assert(Obj.Alignment <= MaxAlignment);
unsigned Start = AdjustStackOffset(0, Obj.Size, Obj.Alignment);
unsigned End = Start + Obj.Size;
- DEBUG(dbgs() << " First candidate: " << Start << " .. " << End << "\n");
+ LLVM_DEBUG(dbgs() << " First candidate: " << Start << " .. " << End << "\n");
for (const StackRegion &R : Regions) {
- DEBUG(dbgs() << " Examining region: " << R.Start << " .. " << R.End
- << ", range " << R.Range << "\n");
+ LLVM_DEBUG(dbgs() << " Examining region: " << R.Start << " .. " << R.End
+ << ", range " << R.Range << "\n");
assert(End >= R.Start);
if (Start >= R.End) {
- DEBUG(dbgs() << " Does not intersect, skip.\n");
+ LLVM_DEBUG(dbgs() << " Does not intersect, skip.\n");
continue;
}
if (Obj.Range.Overlaps(R.Range)) {
// Find the next appropriate location.
Start = AdjustStackOffset(R.End, Obj.Size, Obj.Alignment);
End = Start + Obj.Size;
- DEBUG(dbgs() << " Overlaps. Next candidate: " << Start << " .. " << End
- << "\n");
+ LLVM_DEBUG(dbgs() << " Overlaps. Next candidate: " << Start << " .. "
+ << End << "\n");
continue;
}
if (End <= R.End) {
- DEBUG(dbgs() << " Reusing region(s).\n");
+ LLVM_DEBUG(dbgs() << " Reusing region(s).\n");
break;
}
}
@@ -95,13 +95,13 @@ void StackLayout::layoutObject(StackObject &Obj) {
if (End > LastRegionEnd) {
// Insert a new region at the end. Maybe two.
if (Start > LastRegionEnd) {
- DEBUG(dbgs() << " Creating gap region: " << LastRegionEnd << " .. "
- << Start << "\n");
+ LLVM_DEBUG(dbgs() << " Creating gap region: " << LastRegionEnd << " .. "
+ << Start << "\n");
Regions.emplace_back(LastRegionEnd, Start, StackColoring::LiveRange());
LastRegionEnd = Start;
}
- DEBUG(dbgs() << " Creating new region: " << LastRegionEnd << " .. " << End
- << ", range " << Obj.Range << "\n");
+ LLVM_DEBUG(dbgs() << " Creating new region: " << LastRegionEnd << " .. "
+ << End << ", range " << Obj.Range << "\n");
Regions.emplace_back(LastRegionEnd, End, Obj.Range);
LastRegionEnd = End;
}
@@ -150,5 +150,5 @@ void StackLayout::computeLayout() {
for (auto &Obj : StackObjects)
layoutObject(Obj);
- DEBUG(print(dbgs()));
+ LLVM_DEBUG(print(dbgs()));
}
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index d08501fa3c45..32c0181a4790 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -849,8 +849,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
BarrierChain->addPredBarrier(SU);
BarrierChain = SU;
- DEBUG(dbgs() << "Global memory object and new barrier chain: SU("
- << BarrierChain->NodeNum << ").\n";);
+ LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU("
+ << BarrierChain->NodeNum << ").\n";);
// Add dependencies against everything below it and clear maps.
addBarrierChain(Stores);
@@ -938,11 +938,12 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// Reduce maps if they grow huge.
if (Stores.size() + Loads.size() >= HugeRegion) {
- DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";);
+ LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";);
reduceHugeMemNodeMaps(Stores, Loads, getReductionSize());
}
if (NonAliasStores.size() + NonAliasLoads.size() >= HugeRegion) {
- DEBUG(dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";);
+ LLVM_DEBUG(
+ dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";);
reduceHugeMemNodeMaps(NonAliasStores, NonAliasLoads, getReductionSize());
}
}
@@ -982,10 +983,8 @@ void ScheduleDAGInstrs::Value2SUsMap::dump() {
void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
Value2SUsMap &loads, unsigned N) {
- DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n";
- stores.dump();
- dbgs() << "Loading SUnits:\n";
- loads.dump());
+ LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores.dump();
+ dbgs() << "Loading SUnits:\n"; loads.dump());
// Insert all SU's NodeNums into a vector and sort it.
std::vector<unsigned> NodeNums;
@@ -1011,12 +1010,12 @@ void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
if (newBarrierChain->NodeNum < BarrierChain->NodeNum) {
BarrierChain->addPredBarrier(newBarrierChain);
BarrierChain = newBarrierChain;
- DEBUG(dbgs() << "Inserting new barrier chain: SU("
- << BarrierChain->NodeNum << ").\n";);
+ LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU("
+ << BarrierChain->NodeNum << ").\n";);
}
else
- DEBUG(dbgs() << "Keeping old barrier chain: SU("
- << BarrierChain->NodeNum << ").\n";);
+ LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU("
+ << BarrierChain->NodeNum << ").\n";);
}
else
BarrierChain = newBarrierChain;
@@ -1024,10 +1023,8 @@ void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
insertBarrierChain(stores);
insertBarrierChain(loads);
- DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n";
- stores.dump();
- dbgs() << "Loading SUnits:\n";
- loads.dump());
+ LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores.dump();
+ dbgs() << "Loading SUnits:\n"; loads.dump());
}
static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs,
@@ -1048,7 +1045,7 @@ static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs,
}
void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) {
- DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n');
+ LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n');
LiveRegs.init(*TRI);
LiveRegs.addLiveOuts(MBB);
@@ -1249,11 +1246,11 @@ public:
}
R.SubtreeConnections.resize(SubtreeClasses.getNumClasses());
R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses());
- DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n");
+ LLVM_DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n");
for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) {
R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx];
- DEBUG(dbgs() << " SU(" << Idx << ") in tree "
- << R.DFSNodeData[Idx].SubtreeID << '\n');
+ LLVM_DEBUG(dbgs() << " SU(" << Idx << ") in tree "
+ << R.DFSNodeData[Idx].SubtreeID << '\n');
}
for (const std::pair<const SUnit*, const SUnit*> &P : ConnectionPairs) {
unsigned PredTree = SubtreeClasses[P.first->NodeNum];
@@ -1408,8 +1405,8 @@ void SchedDFSResult::scheduleTree(unsigned SubtreeID) {
for (const Connection &C : SubtreeConnections[SubtreeID]) {
SubtreeConnectLevels[C.TreeID] =
std::max(SubtreeConnectLevels[C.TreeID], C.Level);
- DEBUG(dbgs() << " Tree: " << C.TreeID
- << " @" << SubtreeConnectLevels[C.TreeID] << '\n');
+ LLVM_DEBUG(dbgs() << " Tree: " << C.TreeID << " @"
+ << SubtreeConnectLevels[C.TreeID] << '\n');
}
}
diff --git a/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index 1364c9d8de8a..b8bfe69a76e1 100644
--- a/lib/CodeGen/ScoreboardHazardRecognizer.cpp
+++ b/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -69,12 +69,12 @@ ScoreboardHazardRecognizer::ScoreboardHazardRecognizer(
// If MaxLookAhead is not set above, then we are not enabled.
if (!isEnabled())
- DEBUG(dbgs() << "Disabled scoreboard hazard recognizer\n");
+ LLVM_DEBUG(dbgs() << "Disabled scoreboard hazard recognizer\n");
else {
// A nonempty itinerary must have a SchedModel.
IssueWidth = ItinData->SchedModel.IssueWidth;
- DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
- << ScoreboardDepth << '\n');
+ LLVM_DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
+ << ScoreboardDepth << '\n');
}
}
@@ -156,9 +156,9 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
}
if (!freeUnits) {
- DEBUG(dbgs() << "*** Hazard in cycle +" << StageCycle << ", ");
- DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
- DEBUG(DAG->dumpNode(SU));
+ LLVM_DEBUG(dbgs() << "*** Hazard in cycle +" << StageCycle << ", ");
+ LLVM_DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
+ LLVM_DEBUG(DAG->dumpNode(SU));
return Hazard;
}
}
@@ -224,8 +224,8 @@ void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
cycle += IS->getNextCycles();
}
- DEBUG(ReservedScoreboard.dump());
- DEBUG(RequiredScoreboard.dump());
+ LLVM_DEBUG(ReservedScoreboard.dump());
+ LLVM_DEBUG(RequiredScoreboard.dump());
}
void ScoreboardHazardRecognizer::AdvanceCycle() {
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d03c6ffa6bb5..ccbbc056dd0a 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -972,11 +972,9 @@ SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
bool AddTo) {
assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
++NodesCombined;
- DEBUG(dbgs() << "\nReplacing.1 ";
- N->dump(&DAG);
- dbgs() << "\nWith: ";
- To[0].getNode()->dump(&DAG);
- dbgs() << " and " << NumTo-1 << " other values\n");
+ LLVM_DEBUG(dbgs() << "\nReplacing.1 "; N->dump(&DAG); dbgs() << "\nWith: ";
+ To[0].getNode()->dump(&DAG);
+ dbgs() << " and " << NumTo - 1 << " other values\n");
for (unsigned i = 0, e = NumTo; i != e; ++i)
assert((!To[i].getNode() ||
N->getValueType(i) == To[i].getValueType()) &&
@@ -1033,11 +1031,9 @@ bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
// Replace the old value with the new one.
++NodesCombined;
- DEBUG(dbgs() << "\nReplacing.2 ";
- TLO.Old.getNode()->dump(&DAG);
- dbgs() << "\nWith: ";
- TLO.New.getNode()->dump(&DAG);
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);
+ dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);
+ dbgs() << '\n');
CommitTargetLoweringOpt(TLO);
return true;
@@ -1058,8 +1054,9 @@ bool DAGCombiner::SimplifyDemandedVectorElts(SDValue Op,
// Replace the old value with the new one.
++NodesCombined;
- DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);
- dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);
+ dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);
+ dbgs() << '\n');
CommitTargetLoweringOpt(TLO);
return true;
@@ -1070,11 +1067,8 @@ void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
EVT VT = Load->getValueType(0);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0));
- DEBUG(dbgs() << "\nReplacing.9 ";
- Load->dump(&DAG);
- dbgs() << "\nWith: ";
- Trunc.getNode()->dump(&DAG);
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "\nReplacing.9 "; Load->dump(&DAG); dbgs() << "\nWith: ";
+ Trunc.getNode()->dump(&DAG); dbgs() << '\n');
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
@@ -1173,7 +1167,7 @@ SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
- DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
bool Replace0 = false;
SDValue N0 = Op.getOperand(0);
@@ -1238,7 +1232,7 @@ SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
assert(PVT != VT && "Don't know what type to promote to!");
- DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
bool Replace = false;
SDValue N0 = Op.getOperand(0);
@@ -1290,8 +1284,7 @@ SDValue DAGCombiner::PromoteExtend(SDValue Op) {
// fold (aext (aext x)) -> (aext x)
// fold (aext (zext x)) -> (zext x)
// fold (aext (sext x)) -> (sext x)
- DEBUG(dbgs() << "\nPromoting ";
- Op.getNode()->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG));
return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0));
}
return SDValue();
@@ -1331,11 +1324,8 @@ bool DAGCombiner::PromoteLoad(SDValue Op) {
MemVT, LD->getMemOperand());
SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
- DEBUG(dbgs() << "\nPromoting ";
- N->dump(&DAG);
- dbgs() << "\nTo: ";
- Result.getNode()->dump(&DAG);
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "\nPromoting "; N->dump(&DAG); dbgs() << "\nTo: ";
+ Result.getNode()->dump(&DAG); dbgs() << '\n');
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
@@ -1430,7 +1420,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
continue;
}
- DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG));
// Add any operands of the new node which have not yet been combined to the
// worklist as well. Because the worklist uniques things already, this
@@ -1458,8 +1448,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
RV.getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned new node!");
- DEBUG(dbgs() << " ... into: ";
- RV.getNode()->dump(&DAG));
+ LLVM_DEBUG(dbgs() << " ... into: "; RV.getNode()->dump(&DAG));
if (N->getNumValues() == RV.getNode()->getNumValues())
DAG.ReplaceAllUsesWith(N, RV.getNode());
@@ -3957,13 +3946,13 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG) {
if (Loads.size() == 0)
return false;
- DEBUG(dbgs() << "Backwards propagate AND: "; N->dump());
+ LLVM_DEBUG(dbgs() << "Backwards propagate AND: "; N->dump());
SDValue MaskOp = N->getOperand(1);
// If it exists, fixup the single node we allow in the tree that needs
// masking.
if (FixupNode) {
- DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump());
+ LLVM_DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump());
SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
FixupNode->getValueType(0),
SDValue(FixupNode, 0), MaskOp);
@@ -3988,7 +3977,7 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG) {
// Create narrow loads.
for (auto *Load : Loads) {
- DEBUG(dbgs() << "Propagate AND back to: "; Load->dump());
+ LLVM_DEBUG(dbgs() << "Propagate AND back to: "; Load->dump());
SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
SDValue(Load, 0), MaskOp);
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
@@ -11774,11 +11763,8 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
BasePtr, Offset, AM);
++PreIndexedNodes;
++NodesCombined;
- DEBUG(dbgs() << "\nReplacing.4 ";
- N->dump(&DAG);
- dbgs() << "\nWith: ";
- Result.getNode()->dump(&DAG);
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "\nReplacing.4 "; N->dump(&DAG); dbgs() << "\nWith: ";
+ Result.getNode()->dump(&DAG); dbgs() << '\n');
WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
@@ -11943,11 +11929,9 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
BasePtr, Offset, AM);
++PostIndexedNodes;
++NodesCombined;
- DEBUG(dbgs() << "\nReplacing.5 ";
- N->dump(&DAG);
- dbgs() << "\nWith: ";
- Result.getNode()->dump(&DAG);
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "\nReplacing.5 "; N->dump(&DAG);
+ dbgs() << "\nWith: "; Result.getNode()->dump(&DAG);
+ dbgs() << '\n');
WorklistRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
@@ -12013,11 +11997,9 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// v3 = add v2, c
// Now we replace use of chain2 with chain1. This makes the second load
// isomorphic to the one we are deleting, and thus makes this load live.
- DEBUG(dbgs() << "\nReplacing.6 ";
- N->dump(&DAG);
- dbgs() << "\nWith chain: ";
- Chain.getNode()->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\nReplacing.6 "; N->dump(&DAG);
+ dbgs() << "\nWith chain: "; Chain.getNode()->dump(&DAG);
+ dbgs() << "\n");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
AddUsersToWorklist(Chain.getNode());
@@ -12048,11 +12030,9 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
AddUsersToWorklist(N);
} else
Index = DAG.getUNDEF(N->getValueType(1));
- DEBUG(dbgs() << "\nReplacing.7 ";
- N->dump(&DAG);
- dbgs() << "\nWith: ";
- Undef.getNode()->dump(&DAG);
- dbgs() << " and 2 other values\n");
+ LLVM_DEBUG(dbgs() << "\nReplacing.7 "; N->dump(&DAG);
+ dbgs() << "\nWith: "; Undef.getNode()->dump(&DAG);
+ dbgs() << " and 2 other values\n");
WorklistRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index);
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 166e7b1d416b..99b223b13322 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -261,7 +261,8 @@ void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI,
if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
if (EmitStartPt == &LocalMI)
EmitStartPt = EmitStartPt->getPrevNode();
- DEBUG(dbgs() << "removing dead local value materialization " << LocalMI);
+ LLVM_DEBUG(dbgs() << "removing dead local value materialization "
+ << LocalMI);
OrderMap.Orders.erase(&LocalMI);
LocalMI.eraseFromParent();
return;
@@ -312,7 +313,7 @@ void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI,
}
// Sink LocalMI before SinkPos and assign it the same DebugLoc.
- DEBUG(dbgs() << "sinking local value to first use " << LocalMI);
+ LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI);
FuncInfo.MBB->remove(&LocalMI);
FuncInfo.MBB->insert(SinkPos, &LocalMI);
if (SinkPos != FuncInfo.MBB->end())
@@ -1329,13 +1330,13 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
assert(DI->getVariable() && "Missing variable");
if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
- DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
const Value *Address = DI->getAddress();
if (!Address || isa<UndefValue>(Address)) {
- DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
@@ -1387,7 +1388,7 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
- DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
return true;
}
@@ -1430,7 +1431,7 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
- DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
return true;
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index f1d92dc7270f..798b924244a7 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -485,7 +485,7 @@ int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
auto I = ByValArgFrameIndexMap.find(A);
if (I != ByValArgFrameIndexMap.end())
return I->second;
- DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
+ LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
return INT_MAX;
}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 86c6c0542646..71bb3c7d024a 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -200,8 +200,8 @@ public:
}
void ReplaceNode(SDNode *Old, SDNode *New) {
- DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
- dbgs() << " with: "; New->dump(&DAG));
+ LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
+ dbgs() << " with: "; New->dump(&DAG));
assert(Old->getNumValues() == New->getNumValues() &&
"Replacing one node with another that produces a different number "
@@ -213,8 +213,8 @@ public:
}
void ReplaceNode(SDValue Old, SDValue New) {
- DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
- dbgs() << " with: "; New->dump(&DAG));
+ LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG);
+ dbgs() << " with: "; New->dump(&DAG));
DAG.ReplaceAllUsesWith(Old, New);
if (UpdatedNodes)
@@ -223,13 +223,12 @@ public:
}
void ReplaceNode(SDNode *Old, const SDValue *New) {
- DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG));
+ LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG));
DAG.ReplaceAllUsesWith(Old, New);
for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) {
- DEBUG(dbgs() << (i == 0 ? " with: "
- : " and: ");
- New[i]->dump(&DAG));
+ LLVM_DEBUG(dbgs() << (i == 0 ? " with: " : " and: ");
+ New[i]->dump(&DAG));
if (UpdatedNodes)
UpdatedNodes->insert(New[i].getNode());
}
@@ -408,7 +407,7 @@ SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
}
SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
- DEBUG(dbgs() << "Optimizing float store operations\n");
+ LLVM_DEBUG(dbgs() << "Optimizing float store operations\n");
// Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
// FIXME: We shouldn't do this for TargetConstantFP's.
// FIXME: move this to the DAG Combiner! Note that we can't regress due
@@ -477,7 +476,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
AAMDNodes AAInfo = ST->getAAInfo();
if (!ST->isTruncatingStore()) {
- DEBUG(dbgs() << "Legalizing store operation\n");
+ LLVM_DEBUG(dbgs() << "Legalizing store operation\n");
if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
ReplaceNode(ST, OptStore);
return;
@@ -495,15 +494,15 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
unsigned Align = ST->getAlignment();
const DataLayout &DL = DAG.getDataLayout();
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
- DEBUG(dbgs() << "Expanding unsupported unaligned store\n");
+ LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n");
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
ReplaceNode(SDValue(ST, 0), Result);
} else
- DEBUG(dbgs() << "Legal store\n");
+ LLVM_DEBUG(dbgs() << "Legal store\n");
break;
}
case TargetLowering::Custom: {
- DEBUG(dbgs() << "Trying custom lowering\n");
+ LLVM_DEBUG(dbgs() << "Trying custom lowering\n");
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res && Res != SDValue(Node, 0))
ReplaceNode(SDValue(Node, 0), Res);
@@ -524,7 +523,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
return;
}
- DEBUG(dbgs() << "Legalizing truncating store operations\n");
+ LLVM_DEBUG(dbgs() << "Legalizing truncating store operations\n");
SDValue Value = ST->getValue();
EVT StVT = ST->getMemoryVT();
unsigned StWidth = StVT.getSizeInBits();
@@ -656,7 +655,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD) {
- DEBUG(dbgs() << "Legalizing non-extending load operation\n");
+ LLVM_DEBUG(dbgs() << "Legalizing non-extending load operation\n");
MVT VT = Node->getSimpleValueType(0);
SDValue RVal = SDValue(Node, 0);
SDValue RChain = SDValue(Node, 1);
@@ -706,7 +705,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
return;
}
- DEBUG(dbgs() << "Legalizing extending load operation\n");
+ LLVM_DEBUG(dbgs() << "Legalizing extending load operation\n");
EVT SrcVT = LD->getMemoryVT();
unsigned SrcWidth = SrcVT.getSizeInBits();
unsigned Alignment = LD->getAlignment();
@@ -979,7 +978,7 @@ getStrictFPOpcodeAction(const TargetLowering &TLI, unsigned Opcode, EVT VT) {
/// Return a legal replacement for the given operation, with all legal operands.
void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
- DEBUG(dbgs() << "\nLegalizing: "; Node->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nLegalizing: "; Node->dump(&DAG));
// Allow illegal target nodes and illegal registers.
if (Node->getOpcode() == ISD::TargetConstant ||
@@ -1202,10 +1201,10 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
}
switch (Action) {
case TargetLowering::Legal:
- DEBUG(dbgs() << "Legal node: nothing to do\n");
+ LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n");
return;
case TargetLowering::Custom:
- DEBUG(dbgs() << "Trying custom legalization\n");
+ LLVM_DEBUG(dbgs() << "Trying custom legalization\n");
// FIXME: The handling for custom lowering with multiple results is
// a complete mess.
if (SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG)) {
@@ -1213,7 +1212,7 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
return;
if (Node->getNumValues() == 1) {
- DEBUG(dbgs() << "Successfully custom legalized node\n");
+ LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n");
// We can just directly replace this node with the lowered value.
ReplaceNode(SDValue(Node, 0), Res);
return;
@@ -1222,11 +1221,11 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
SmallVector<SDValue, 8> ResultVals;
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
ResultVals.push_back(Res.getValue(i));
- DEBUG(dbgs() << "Successfully custom legalized node\n");
+ LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n");
ReplaceNode(Node, ResultVals.data());
return;
}
- DEBUG(dbgs() << "Could not custom legalize node\n");
+ LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
LLVM_FALLTHROUGH;
case TargetLowering::Expand:
if (ExpandNode(Node))
@@ -2041,12 +2040,12 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
if (!CallInfo.second.getNode()) {
- DEBUG(dbgs() << "Created tailcall: "; DAG.getRoot().dump());
+ LLVM_DEBUG(dbgs() << "Created tailcall: "; DAG.getRoot().dump());
// It's a tailcall, return the chain (which is the DAG root).
return DAG.getRoot();
}
- DEBUG(dbgs() << "Created libcall: "; CallInfo.first.dump());
+ LLVM_DEBUG(dbgs() << "Created libcall: "; CallInfo.first.dump());
return CallInfo.first;
}
@@ -2332,10 +2331,10 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, SDValue Op0,
EVT DestVT,
const SDLoc &dl) {
// TODO: Should any fast-math-flags be set for the created nodes?
- DEBUG(dbgs() << "Legalizing INT_TO_FP\n");
+ LLVM_DEBUG(dbgs() << "Legalizing INT_TO_FP\n");
if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) {
- DEBUG(dbgs() << "32-bit [signed|unsigned] integer to float/double "
- "expansion\n");
+ LLVM_DEBUG(dbgs() << "32-bit [signed|unsigned] integer to float/double "
+ "expansion\n");
// Get the stack frame index of a 8 byte buffer.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64);
@@ -2400,7 +2399,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, SDValue Op0,
// and in all alternate rounding modes.
// TODO: Generalize this for use with other types.
if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) {
- DEBUG(dbgs() << "Converting unsigned i64 to f64\n");
+ LLVM_DEBUG(dbgs() << "Converting unsigned i64 to f64\n");
SDValue TwoP52 =
DAG.getConstant(UINT64_C(0x4330000000000000), dl, MVT::i64);
SDValue TwoP84PlusTwoP52 =
@@ -2423,7 +2422,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, SDValue Op0,
// TODO: Generalize this for use with other types.
if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) {
- DEBUG(dbgs() << "Converting unsigned i64 to f32\n");
+ LLVM_DEBUG(dbgs() << "Converting unsigned i64 to f32\n");
// For unsigned conversions, convert them to signed conversions using the
// algorithm from the x86_64 __floatundidf in compiler_rt.
if (!isSigned) {
@@ -2858,7 +2857,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
}
bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
- DEBUG(dbgs() << "Trying to expand node\n");
+ LLVM_DEBUG(dbgs() << "Trying to expand node\n");
SmallVector<SDValue, 8> Results;
SDLoc dl(Node);
SDValue Tmp1, Tmp2, Tmp3, Tmp4;
@@ -3316,7 +3315,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
}
break;
case ISD::FP_TO_FP16:
- DEBUG(dbgs() << "Legalizing FP_TO_FP16\n");
+ LLVM_DEBUG(dbgs() << "Legalizing FP_TO_FP16\n");
if (!TLI.useSoftFloat() && TM.Options.UnsafeFPMath) {
SDValue Op = Node->getOperand(0);
MVT SVT = Op.getSimpleValueType();
@@ -3927,17 +3926,17 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// Replace the original node with the legalized result.
if (Results.empty()) {
- DEBUG(dbgs() << "Cannot expand node\n");
+ LLVM_DEBUG(dbgs() << "Cannot expand node\n");
return false;
}
- DEBUG(dbgs() << "Succesfully expanded node\n");
+ LLVM_DEBUG(dbgs() << "Succesfully expanded node\n");
ReplaceNode(Node, Results.data());
return true;
}
void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
- DEBUG(dbgs() << "Trying to convert node to libcall\n");
+ LLVM_DEBUG(dbgs() << "Trying to convert node to libcall\n");
SmallVector<SDValue, 8> Results;
SDLoc dl(Node);
// FIXME: Check flags on the node to see if we can use a finite call.
@@ -4237,10 +4236,10 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
// Replace the original node with the legalized result.
if (!Results.empty()) {
- DEBUG(dbgs() << "Successfully converted node to libcall\n");
+ LLVM_DEBUG(dbgs() << "Successfully converted node to libcall\n");
ReplaceNode(Node, Results.data());
} else
- DEBUG(dbgs() << "Could not convert node to libcall\n");
+ LLVM_DEBUG(dbgs() << "Could not convert node to libcall\n");
}
// Determine the vector type to use in place of an original scalar element when
@@ -4254,7 +4253,7 @@ static MVT getPromotedVectorElementType(const TargetLowering &TLI,
}
void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
- DEBUG(dbgs() << "Trying to promote node\n");
+ LLVM_DEBUG(dbgs() << "Trying to promote node\n");
SmallVector<SDValue, 8> Results;
MVT OVT = Node->getSimpleValueType(0);
if (Node->getOpcode() == ISD::UINT_TO_FP ||
@@ -4692,10 +4691,10 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
// Replace the original node with the legalized result.
if (!Results.empty()) {
- DEBUG(dbgs() << "Successfully promoted node\n");
+ LLVM_DEBUG(dbgs() << "Successfully promoted node\n");
ReplaceNode(Node, Results.data());
} else
- DEBUG(dbgs() << "Could not promote node\n");
+ LLVM_DEBUG(dbgs() << "Could not promote node\n");
}
/// This is the entry point for the file.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 3f17d4d605e6..67661cece5ef 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -47,8 +47,8 @@ static RTLIB::Libcall GetFPLibCall(EVT VT,
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Soften float result " << ResNo << ": "; N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Soften float result " << ResNo << ": "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue R = SDValue();
switch (N->getOpcode()) {
@@ -738,8 +738,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
- DEBUG(dbgs() << "Soften float operand " << OpNo << ": "; N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Soften float operand " << OpNo << ": "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue Res = SDValue();
switch (N->getOpcode()) {
@@ -1039,7 +1039,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) {
/// have invalid operands or may have other results that need promotion, we just
/// know that (at least) one result needs expansion.
void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Expand float result: "; N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Expand float result: "; N->dump(&DAG); dbgs() << "\n");
SDValue Lo, Hi;
Lo = Hi = SDValue();
@@ -1538,7 +1538,7 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
/// types of the node are known to be legal, but other operands of the node may
/// need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
- DEBUG(dbgs() << "Expand float operand: "; N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Expand float operand: "; N->dump(&DAG); dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom expand this node.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 8ed0f1b6e963..e6d8b0491ecb 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -36,12 +36,13 @@ using namespace llvm;
/// may also have invalid operands or may have other results that need
/// expansion, we just know that (at least) one result needs promotion.
void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Promote integer result: "; N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Promote integer result: "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom expand this node.
if (CustomLowerNode(N, N->getValueType(ResNo), true)) {
- DEBUG(dbgs() << "Node has been custom expanded, done\n");
+ LLVM_DEBUG(dbgs() << "Node has been custom expanded, done\n");
return;
}
@@ -897,11 +898,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
/// result types of the node are known to be legal, but other operands of the
/// node may need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
- DEBUG(dbgs() << "Promote integer operand: "; N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Promote integer operand: "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue Res = SDValue();
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false)) {
- DEBUG(dbgs() << "Node has been custom lowered, done\n");
+ LLVM_DEBUG(dbgs() << "Node has been custom lowered, done\n");
return false;
}
@@ -1349,7 +1351,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_ADDSUBCARRY(SDNode *N, unsigned OpNo) {
/// have invalid operands or may have other results that need promotion, we just
/// know that (at least) one result needs expansion.
void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Expand integer result: "; N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Expand integer result: "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue Lo, Hi;
Lo = Hi = SDValue();
@@ -2887,7 +2890,8 @@ void DAGTypeLegalizer::ExpandIntRes_ATOMIC_LOAD(SDNode *N,
/// result types of the node are known to be legal, but other operands of the
/// node may need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
- DEBUG(dbgs() << "Expand integer operand: "; N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Expand integer operand: "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue Res = SDValue();
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index e2cee0717cb6..20f9a7e65149 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -224,9 +224,9 @@ bool DAGTypeLegalizer::run() {
assert(N->getNodeId() == ReadyToProcess &&
"Node should be ready if on worklist!");
- DEBUG(dbgs() << "Legalizing node: "; N->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "Legalizing node: "; N->dump(&DAG));
if (IgnoreNodeResults(N)) {
- DEBUG(dbgs() << "Ignoring node results\n");
+ LLVM_DEBUG(dbgs() << "Ignoring node results\n");
goto ScanOperands;
}
@@ -234,11 +234,11 @@ bool DAGTypeLegalizer::run() {
// types are illegal.
for (unsigned i = 0, NumResults = N->getNumValues(); i < NumResults; ++i) {
EVT ResultVT = N->getValueType(i);
- DEBUG(dbgs() << "Analyzing result type: " <<
- ResultVT.getEVTString() << "\n");
+ LLVM_DEBUG(dbgs() << "Analyzing result type: " << ResultVT.getEVTString()
+ << "\n");
switch (getTypeAction(ResultVT)) {
case TargetLowering::TypeLegal:
- DEBUG(dbgs() << "Legal result type\n");
+ LLVM_DEBUG(dbgs() << "Legal result type\n");
break;
// The following calls must take care of *all* of the node's results,
// not just the illegal result they were passed (this includes results
@@ -296,11 +296,11 @@ ScanOperands:
continue;
const auto Op = N->getOperand(i);
- DEBUG(dbgs() << "Analyzing operand: "; Op.dump(&DAG));
+ LLVM_DEBUG(dbgs() << "Analyzing operand: "; Op.dump(&DAG));
EVT OpVT = Op.getValueType();
switch (getTypeAction(OpVT)) {
case TargetLowering::TypeLegal:
- DEBUG(dbgs() << "Legal operand\n");
+ LLVM_DEBUG(dbgs() << "Legal operand\n");
continue;
// The following calls must either replace all of the node's results
// using ReplaceValueWith, and return "false"; or update the node's
@@ -370,7 +370,8 @@ ScanOperands:
}
if (i == NumOperands) {
- DEBUG(dbgs() << "Legally typed node: "; N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Legally typed node: "; N->dump(&DAG);
+ dbgs() << "\n");
}
}
NodeDone:
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 724a909a210d..41fc8c0509e2 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -229,7 +229,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
ISD::LoadExtType ExtType = LD->getExtensionType();
if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) {
- DEBUG(dbgs() << "\nLegalizing extending vector load: "; Node->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nLegalizing extending vector load: ";
+ Node->dump(&DAG));
switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0),
LD->getMemoryVT())) {
default: llvm_unreachable("This action is not supported yet!");
@@ -261,8 +262,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
EVT StVT = ST->getMemoryVT();
MVT ValVT = ST->getValue().getSimpleValueType();
if (StVT.isVector() && ST->isTruncatingStore()) {
- DEBUG(dbgs() << "\nLegalizing truncating vector store: ";
- Node->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nLegalizing truncating vector store: ";
+ Node->dump(&DAG));
switch (TLI.getTruncStoreAction(ValVT, StVT)) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
@@ -384,7 +385,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
break;
}
- DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
switch (TLI.getOperationAction(Node->getOpcode(), QueryType)) {
default: llvm_unreachable("This action is not supported yet!");
@@ -393,16 +394,16 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Changed = true;
break;
case TargetLowering::Legal:
- DEBUG(dbgs() << "Legal node: nothing to do\n");
+ LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n");
break;
case TargetLowering::Custom: {
- DEBUG(dbgs() << "Trying custom legalization\n");
+ LLVM_DEBUG(dbgs() << "Trying custom legalization\n");
if (SDValue Tmp1 = TLI.LowerOperation(Op, DAG)) {
- DEBUG(dbgs() << "Successfully custom legalized node\n");
+ LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n");
Result = Tmp1;
break;
}
- DEBUG(dbgs() << "Could not custom legalize node\n");
+ LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
LLVM_FALLTHROUGH;
}
case TargetLowering::Expand:
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index b15dcbf1a786..b975d8e314b0 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -33,9 +33,8 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Scalarize node result " << ResNo << ": ";
- N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Scalarize node result " << ResNo << ": "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue R = SDValue();
switch (N->getOpcode()) {
@@ -443,9 +442,8 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) {
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
- DEBUG(dbgs() << "Scalarize node operand " << OpNo << ": ";
- N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Scalarize node operand " << OpNo << ": "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue Res = SDValue();
if (!Res.getNode()) {
@@ -628,9 +626,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo) {
/// invalid operands or may have other results that need legalization, we just
/// know that (at least) one result needs vector splitting.
void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Split node result: ";
- N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Split node result: "; N->dump(&DAG); dbgs() << "\n");
SDValue Lo, Hi;
// See if the target wants to custom expand this node.
@@ -1376,8 +1372,8 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo,
std::tie(SplitLoVT, SplitHiVT) = DAG.GetSplitDestVTs(NewSrcVT);
if (TLI.isTypeLegal(SrcVT) && !TLI.isTypeLegal(SplitSrcVT) &&
TLI.isTypeLegal(NewSrcVT) && TLI.isTypeLegal(SplitLoVT)) {
- DEBUG(dbgs() << "Split vector extend via incremental extend:";
- N->dump(&DAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Split vector extend via incremental extend:";
+ N->dump(&DAG); dbgs() << "\n");
// Extend the source vector by one step.
SDValue NewSrc =
DAG.getNode(N->getOpcode(), dl, NewSrcVT, N->getOperand(0));
@@ -1512,9 +1508,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
/// the node are known to be legal, but other operands of the node may need
/// legalization as well as the specified one.
bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
- DEBUG(dbgs() << "Split node operand: ";
- N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Split node operand: "; N->dump(&DAG); dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom split this node.
@@ -2183,9 +2177,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_FCOPYSIGN(SDNode *N) {
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
- DEBUG(dbgs() << "Widen node result " << ResNo << ": ";
- N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Widen node result " << ResNo << ": "; N->dump(&DAG);
+ dbgs() << "\n");
// See if the target wants to custom widen this node.
if (CustomWidenLowerNode(N, N->getValueType(ResNo)))
@@ -3329,9 +3322,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_SETCC(SDNode *N) {
// Widen Vector Operand
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
- DEBUG(dbgs() << "Widen node operand " << OpNo << ": ";
- N->dump(&DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Widen node operand " << OpNo << ": "; N->dump(&DAG);
+ dbgs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom widen this node.
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index f55d85f01e7c..3944d7df286d 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -116,7 +116,7 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGFast::Schedule() {
- DEBUG(dbgs() << "********** List Scheduling **********\n");
+ LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
NumLiveRegs = 0;
LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
@@ -125,8 +125,8 @@ void ScheduleDAGFast::Schedule() {
// Build the scheduling graph.
BuildSchedGraph(nullptr);
- DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
- SUnits[su].dumpAll(this));
+ LLVM_DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) SUnits[su]
+ .dumpAll(this));
// Execute the actual scheduling loop.
ListScheduleBottomUp();
@@ -181,8 +181,8 @@ void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
- DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
- DEBUG(SU->dump(this));
+ LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
+ LLVM_DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
SU->setHeightToAtLeast(CurCycle);
@@ -237,7 +237,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return nullptr;
- DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
N = NewNodes[1];
@@ -347,7 +347,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
SU = NewSU;
}
- DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
NewSU = Clone(SU);
// New SUnit has the exact same predecessors.
@@ -593,14 +593,14 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
// Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
- DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
- << " to SU #" << Copies.front()->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
+ << " to SU #" << Copies.front()->NodeNum << "\n");
AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
NewDef = Copies.back();
}
- DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
- << " to SU #" << TrySU->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
+ << " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
AddPred(NewDef, SDep(TrySU, SDep::Artificial));
TrySU->isAvailable = false;
@@ -667,8 +667,8 @@ void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
// These nodes do not need to be translated into MIs.
return;
- DEBUG(dbgs() << "\n*** Scheduling: ");
- DEBUG(N->dump(DAG));
+ LLVM_DEBUG(dbgs() << "\n*** Scheduling: ");
+ LLVM_DEBUG(N->dump(DAG));
Sequence.push_back(N);
unsigned NumOps = N->getNumOperands();
@@ -714,7 +714,7 @@ static SDNode *findGluedUser(SDNode *N) {
}
void ScheduleDAGLinearize::Schedule() {
- DEBUG(dbgs() << "********** DAG Linearization **********\n");
+ LLVM_DEBUG(dbgs() << "********** DAG Linearization **********\n");
SmallVector<SDNode*, 8> Glues;
unsigned DAGSize = 0;
@@ -764,15 +764,13 @@ ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
InstrEmitter Emitter(BB, InsertPos);
DenseMap<SDValue, unsigned> VRBaseMap;
- DEBUG({
- dbgs() << "\n*** Final schedule ***\n";
- });
+ LLVM_DEBUG({ dbgs() << "\n*** Final schedule ***\n"; });
unsigned NumNodes = Sequence.size();
MachineBasicBlock *BB = Emitter.getBlock();
for (unsigned i = 0; i != NumNodes; ++i) {
SDNode *N = Sequence[NumNodes-i-1];
- DEBUG(N->dump(DAG));
+ LLVM_DEBUG(N->dump(DAG));
Emitter.EmitNode(N, false, false, VRBaseMap);
// Emit any debug values associated with the node.
@@ -788,7 +786,7 @@ ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
}
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
InsertPos = Emitter.getInsertPos();
return Emitter.getBlock();
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 617f84d11ef1..73f855f8f84c 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -347,8 +347,8 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGRRList::Schedule() {
- DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)
- << " '" << BB->getName() << "' **********\n");
+ LLVM_DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)
+ << " '" << BB->getName() << "' **********\n");
CurCycle = 0;
IssueCount = 0;
@@ -365,8 +365,7 @@ void ScheduleDAGRRList::Schedule() {
// Build the scheduling graph.
BuildSchedGraph(nullptr);
- DEBUG(for (SUnit &SU : SUnits)
- SU.dumpAll(this));
+ LLVM_DEBUG(for (SUnit &SU : SUnits) SU.dumpAll(this));
Topo.InitDAGTopologicalSorting();
AvailableQueue->initNodes(SUnits);
@@ -378,11 +377,11 @@ void ScheduleDAGRRList::Schedule() {
AvailableQueue->releaseState();
- DEBUG({
- dbgs() << "*** Final schedule ***\n";
- dumpSchedule();
- dbgs() << '\n';
- });
+ LLVM_DEBUG({
+ dbgs() << "*** Final schedule ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
}
//===----------------------------------------------------------------------===//
@@ -729,13 +728,13 @@ static void resetVRegCycle(SUnit *SU);
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
- DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
- DEBUG(SU->dump(this));
+ LLVM_DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
+ LLVM_DEBUG(SU->dump(this));
#ifndef NDEBUG
if (CurCycle < SU->getHeight())
- DEBUG(dbgs() << " Height [" << SU->getHeight()
- << "] pipeline stall!\n");
+ LLVM_DEBUG(dbgs() << " Height [" << SU->getHeight()
+ << "] pipeline stall!\n");
#endif
// FIXME: Do not modify node height. It may interfere with
@@ -828,8 +827,8 @@ void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
/// its predecessor states to reflect the change.
void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
- DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
- DEBUG(SU->dump(this));
+ LLVM_DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
+ LLVM_DEBUG(SU->dump(this));
for (SDep &Pred : SU->Preds) {
CapturePred(&Pred);
@@ -1011,7 +1010,7 @@ SUnit *ScheduleDAGRRList::TryUnfoldSU(SUnit *SU) {
computeLatency(LoadSU);
}
- DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
// Now that we are committed to unfolding replace DAG Uses.
for (unsigned i = 0; i != NumVals; ++i)
@@ -1118,12 +1117,13 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
if (!N)
return nullptr;
- DEBUG(dbgs() << "Considering duplicating the SU\n");
- DEBUG(SU->dump(this));
+ LLVM_DEBUG(dbgs() << "Considering duplicating the SU\n");
+ LLVM_DEBUG(SU->dump(this));
if (N->getGluedNode() &&
!TII->canCopyGluedNodeDuringSchedule(N)) {
- DEBUG(dbgs()
+ LLVM_DEBUG(
+ dbgs()
<< "Giving up because it has incoming glue and the target does not "
"want to copy it\n");
return nullptr;
@@ -1134,7 +1134,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
MVT VT = N->getSimpleValueType(i);
if (VT == MVT::Glue) {
- DEBUG(dbgs() << "Giving up because it has outgoing glue\n");
+ LLVM_DEBUG(dbgs() << "Giving up because it has outgoing glue\n");
return nullptr;
} else if (VT == MVT::Other)
TryUnfold = true;
@@ -1142,8 +1142,9 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
for (const SDValue &Op : N->op_values()) {
MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
if (VT == MVT::Glue && !TII->canCopyGluedNodeDuringSchedule(N)) {
- DEBUG(dbgs() << "Giving up because it one of the operands is glue and "
- "the target does not want to copy it\n");
+ LLVM_DEBUG(
+ dbgs() << "Giving up because it one of the operands is glue and "
+ "the target does not want to copy it\n");
return nullptr;
}
}
@@ -1160,7 +1161,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
return SU;
}
- DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
NewSU = CreateClone(SU);
// New SUnit has the exact same predecessors.
@@ -1421,7 +1422,7 @@ void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
// Furthermore, it may have been made available again, in which case it is
// now already in the AvailableQueue.
if (SU->isAvailable && !SU->NodeQueueId) {
- DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n');
+ LLVM_DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n');
AvailableQueue->push(SU);
}
if (i < Interferences.size())
@@ -1442,12 +1443,10 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
SmallVector<unsigned, 4> LRegs;
if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
break;
- DEBUG(dbgs() << " Interfering reg ";
- if (LRegs[0] == TRI->getNumRegs())
- dbgs() << "CallResource";
- else
- dbgs() << printReg(LRegs[0], TRI);
- dbgs() << " SU #" << CurSU->NodeNum << '\n');
+ LLVM_DEBUG(dbgs() << " Interfering reg ";
+ if (LRegs[0] == TRI->getNumRegs()) dbgs() << "CallResource";
+ else dbgs() << printReg(LRegs[0], TRI);
+ dbgs() << " SU #" << CurSU->NodeNum << '\n');
std::pair<LRegsMapT::iterator, bool> LRegsPair =
LRegsMap.insert(std::make_pair(CurSU, LRegs));
if (LRegsPair.second) {
@@ -1493,17 +1492,17 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
if (!BtSU->isPending)
AvailableQueue->remove(BtSU);
}
- DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("
- << TrySU->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum
+ << ") to SU(" << TrySU->NodeNum << ")\n");
AddPred(TrySU, SDep(BtSU, SDep::Artificial));
// If one or more successors has been unscheduled, then the current
// node is no longer available.
if (!TrySU->isAvailable || !TrySU->NodeQueueId) {
- DEBUG(dbgs() << "TrySU not available; choosing node from queue\n");
+ LLVM_DEBUG(dbgs() << "TrySU not available; choosing node from queue\n");
CurSU = AvailableQueue->pop();
} else {
- DEBUG(dbgs() << "TrySU available\n");
+ LLVM_DEBUG(dbgs() << "TrySU available\n");
// Available and in AvailableQueue
AvailableQueue->remove(TrySU);
CurSU = TrySU;
@@ -1547,14 +1546,14 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
- DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
- << " to SU #" << Copies.front()->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
+ << " to SU #" << Copies.front()->NodeNum << "\n");
AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
NewDef = Copies.back();
}
- DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
- << " to SU #" << TrySU->NodeNum << "\n");
+ LLVM_DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
+ << " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
AddPred(NewDef, SDep(TrySU, SDep::Artificial));
TrySU->isAvailable = false;
@@ -1582,8 +1581,8 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
// priority. If it is not ready put it back. Schedule the node.
Sequence.reserve(SUnits.size());
while (!AvailableQueue->empty() || !Interferences.empty()) {
- DEBUG(dbgs() << "\nExamining Available:\n";
- AvailableQueue->dump(this));
+ LLVM_DEBUG(dbgs() << "\nExamining Available:\n";
+ AvailableQueue->dump(this));
// Pick the best node to schedule taking all constraints into
// consideration.
@@ -2046,8 +2045,8 @@ LLVM_DUMP_METHOD void RegReductionPQBase::dumpRegPressure() const {
unsigned Id = RC->getID();
unsigned RP = RegPressure[Id];
if (!RP) continue;
- DEBUG(dbgs() << TRI->getRegClassName(RC) << ": " << RP << " / "
- << RegLimit[Id] << '\n');
+ LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << ": " << RP << " / "
+ << RegLimit[Id] << '\n');
}
}
#endif
@@ -2199,14 +2198,15 @@ void RegReductionPQBase::scheduledNode(SUnit *SU) {
if (RegPressure[RCId] < Cost) {
// Register pressure tracking is imprecise. This can happen. But we try
// hard not to let it happen because it likely results in poor scheduling.
- DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
+ LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum
+ << ") has too many regdefs\n");
RegPressure[RCId] = 0;
}
else {
RegPressure[RCId] -= Cost;
}
}
- DEBUG(dumpRegPressure());
+ LLVM_DEBUG(dumpRegPressure());
}
void RegReductionPQBase::unscheduledNode(SUnit *SU) {
@@ -2286,7 +2286,7 @@ void RegReductionPQBase::unscheduledNode(SUnit *SU) {
}
}
- DEBUG(dumpRegPressure());
+ LLVM_DEBUG(dumpRegPressure());
}
//===----------------------------------------------------------------------===//
@@ -2381,7 +2381,7 @@ static void initVRegCycle(SUnit *SU) {
if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
return;
- DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
SU->isVRegCycle = true;
@@ -2419,7 +2419,7 @@ static bool hasVRegCycleUse(const SUnit *SU) {
if (Pred.isCtrl()) continue; // ignore chain preds
if (Pred.getSUnit()->isVRegCycle &&
Pred.getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
- DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
return true;
}
}
@@ -2479,9 +2479,9 @@ static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
int LDepth = left->getDepth() - LPenalty;
int RDepth = right->getDepth() - RPenalty;
if (LDepth != RDepth) {
- DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
- << ") depth " << LDepth << " vs SU (" << right->NodeNum
- << ") depth " << RDepth << "\n");
+ LLVM_DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
+ << ") depth " << LDepth << " vs SU (" << right->NodeNum
+ << ") depth " << RDepth << "\n");
return LDepth < RDepth ? 1 : -1;
}
if (left->Latency != right->Latency)
@@ -2503,9 +2503,9 @@ static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
static const char *const PhysRegMsg[] = { " has no physreg",
" defines a physreg" };
#endif
- DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
- << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
- << PhysRegMsg[RHasPhysReg] << "\n");
+ LLVM_DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
+ << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum
+ << ") " << PhysRegMsg[RHasPhysReg] << "\n");
return LHasPhysReg < RHasPhysReg;
}
}
@@ -2649,13 +2649,13 @@ bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
// Avoid causing spills. If register pressure is high, schedule for
// register pressure reduction.
if (LHigh && !RHigh) {
- DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
- << right->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
+ << right->NodeNum << ")\n");
return true;
}
else if (!LHigh && RHigh) {
- DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
- << left->NodeNum << ")\n");
+ LLVM_DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
+ << left->NodeNum << ")\n");
return false;
}
if (!LHigh && !RHigh) {
@@ -2717,8 +2717,9 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
}
if (!DisableSchedRegPressure && LPDiff != RPDiff) {
- DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
- << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
+ LLVM_DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum
+ << "): " << LPDiff << " != SU(" << right->NodeNum
+ << "): " << RPDiff << "\n");
return LPDiff > RPDiff;
}
@@ -2730,8 +2731,9 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
}
if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
- DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
- << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
+ LLVM_DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
+ << " != SU(" << right->NodeNum << "): " << RLiveUses
+ << "\n");
return LLiveUses < RLiveUses;
}
@@ -2745,9 +2747,9 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
if (!DisableSchedCriticalPath) {
int spread = (int)left->getDepth() - (int)right->getDepth();
if (std::abs(spread) > MaxReorderWindow) {
- DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
- << left->getDepth() << " != SU(" << right->NodeNum << "): "
- << right->getDepth() << "\n");
+ LLVM_DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
+ << left->getDepth() << " != SU(" << right->NodeNum
+ << "): " << right->getDepth() << "\n");
return left->getDepth() < right->getDepth();
}
}
@@ -2968,9 +2970,10 @@ void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
// Ok, the transformation is safe and the heuristics suggest it is
// profitable. Update the graph.
- DEBUG(dbgs() << " Prescheduling SU #" << SU.NodeNum
- << " next to PredSU #" << PredSU->NodeNum
- << " to guide scheduling in the presence of multiple uses\n");
+ LLVM_DEBUG(
+ dbgs() << " Prescheduling SU #" << SU.NodeNum << " next to PredSU #"
+ << PredSU->NodeNum
+ << " to guide scheduling in the presence of multiple uses\n");
for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
SDep Edge = PredSU->Succs[i];
assert(!Edge.isAssignedRegDep());
@@ -3059,8 +3062,9 @@ void RegReductionPQBase::AddPseudoTwoAddrDeps() {
(isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
(!SU.isCommutable && SuccSU->isCommutable)) &&
!scheduleDAG->IsReachable(SuccSU, &SU)) {
- DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
- << SU.NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
+ LLVM_DEBUG(dbgs()
+ << " Adding a pseudo-two-addr edge from SU #"
+ << SU.NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
scheduleDAG->AddPred(&SU, SDep(SuccSU, SDep::Artificial));
}
}
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
index 07b46b9183ab..84055f8ecc1a 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
@@ -93,8 +93,8 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGVLIW::Schedule() {
- DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)
- << " '" << BB->getName() << "' **********\n");
+ LLVM_DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)
+ << " '" << BB->getName() << "' **********\n");
// Build the scheduling graph.
BuildSchedGraph(AA);
@@ -151,8 +151,8 @@ void ScheduleDAGVLIW::releaseSuccessors(SUnit *SU) {
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGVLIW::scheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
- DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
- DEBUG(SU->dump(this));
+ LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
+ LLVM_DEBUG(SU->dump(this));
Sequence.push_back(SU);
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
@@ -246,7 +246,7 @@ void ScheduleDAGVLIW::listScheduleTopDown() {
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem, just advance
// the current cycle and try again.
- DEBUG(dbgs() << "*** Advancing cycle, no work to do\n");
+ LLVM_DEBUG(dbgs() << "*** Advancing cycle, no work to do\n");
HazardRec->AdvanceCycle();
++NumStalls;
++CurCycle;
@@ -254,7 +254,7 @@ void ScheduleDAGVLIW::listScheduleTopDown() {
// Otherwise, we have no instructions to issue and we have instructions
// that will fault if we don't do this right. This is the case for
// processors without pipeline interlocks and other cases.
- DEBUG(dbgs() << "*** Emitting noop\n");
+ LLVM_DEBUG(dbgs() << "*** Emitting noop\n");
HazardRec->EmitNoop();
Sequence.push_back(nullptr); // NULL here means noop
++NumNoops;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 688eb78d67ac..f486a90380ed 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -90,10 +90,7 @@ void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
#define DEBUG_TYPE "selectiondag"
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
- DEBUG(
- dbgs() << Msg;
- V.getNode()->dump(G);
- );
+ LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
}
//===----------------------------------------------------------------------===//
@@ -7410,8 +7407,9 @@ void SelectionDAG::salvageDebugInfo(SDNode &N) {
DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
ClonedDVs.push_back(Clone);
DV->setIsInvalidated();
- DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this);
- dbgs() << " into " << *DIExpr << '\n');
+ LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
+ N0.getNode()->dumprFull(this);
+ dbgs() << " into " << *DIExpr << '\n');
}
}
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 57eadbb44101..c79a885ece54 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1102,7 +1102,8 @@ void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
DIExpression *DanglingExpr = DI->getExpression();
if (DanglingVariable == Variable &&
Expr->fragmentsOverlap(DanglingExpr)) {
- DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Dropping dangling debug info for " << *DI << "\n");
DDI = DanglingDebugInfo();
}
}
@@ -1127,24 +1128,24 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
SDDbgValue *SDV;
if (Val.getNode()) {
if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
- DEBUG(dbgs() << "Resolve dangling debug info [order=" << DbgSDNodeOrder
- << "] for:\n " << *DI << "\n");
- DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
+ LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
+ << DbgSDNodeOrder << "] for:\n " << *DI << "\n");
+ LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
// Increase the SDNodeOrder for the DbgValue here to make sure it is
// inserted after the definition of Val when emitting the instructions
// after ISel. An alternative could be to teach
// ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
- DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder)
- dbgs() << "changing SDNodeOrder from " << DbgSDNodeOrder
- << " to " << ValSDNodeOrder << "\n");
+ LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
+ << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
+ << ValSDNodeOrder << "\n");
SDV = getDbgValue(Val, Variable, Expr, dl,
std::max(DbgSDNodeOrder, ValSDNodeOrder));
DAG.AddDbgValue(SDV, Val.getNode(), false);
} else
- DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
- << "in EmitFuncArgumentDbgValue\n");
+ LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
+ << "in EmitFuncArgumentDbgValue\n");
} else
- DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
}
DanglingDebugInfoMap[V].clear();
}
@@ -2750,7 +2751,7 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
}
if (isVectorReductionOp(&I)) {
Flags.setVectorReduction(true);
- DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
+ LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
}
if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
Flags.copyFMF(*FPOp);
@@ -5142,7 +5143,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
const Value *Address = DI.getVariableLocation();
if (!Address || isa<UndefValue>(Address) ||
(Address->use_empty() && !isa<Argument>(Address))) {
- DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return nullptr;
}
@@ -5204,7 +5205,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// virtual register info from the FuncInfo.ValueMap.
if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
N)) {
- DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
return nullptr;
@@ -5307,8 +5308,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return nullptr;
}
- DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
- DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
+ LLVM_DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
return nullptr;
}
@@ -8629,7 +8630,8 @@ findArgumentCopyElisionCandidates(const DataLayout &DL,
continue;
}
- DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI << '\n');
+ LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
+ << '\n');
// Mark this alloca and store for argument copy elision.
*Info = StaticAllocaInfo::Elidable;
@@ -8670,8 +8672,9 @@ static void tryToElideArgumentCopy(
int OldIndex = AllocaIndex;
MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
- DEBUG(dbgs() << " argument copy elision failed due to bad fixed stack "
- "object size\n");
+ LLVM_DEBUG(
+ dbgs() << " argument copy elision failed due to bad fixed stack "
+ "object size\n");
return;
}
unsigned RequiredAlignment = AI->getAlignment();
@@ -8680,16 +8683,16 @@ static void tryToElideArgumentCopy(
AI->getAllocatedType());
}
if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
- DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
- "greater than stack argument alignment ("
- << RequiredAlignment << " vs "
- << MFI.getObjectAlignment(FixedIndex) << ")\n");
+ LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
+ "greater than stack argument alignment ("
+ << RequiredAlignment << " vs "
+ << MFI.getObjectAlignment(FixedIndex) << ")\n");
return;
}
// Perform the elision. Delete the old stack object and replace its only use
// in the variable info map. Mark the stack object as mutable.
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
<< " Replacing frame index " << OldIndex << " with " << FixedIndex
<< '\n';
@@ -8861,14 +8864,14 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
"LowerFormalArguments didn't return a valid chain!");
assert(InVals.size() == Ins.size() &&
"LowerFormalArguments didn't emit the correct number of values!");
- DEBUG({
- for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- assert(InVals[i].getNode() &&
- "LowerFormalArguments emitted a null value!");
- assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
- "LowerFormalArguments emitted a value with the wrong type!");
- }
- });
+ LLVM_DEBUG({
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ assert(InVals[i].getNode() &&
+ "LowerFormalArguments emitted a null value!");
+ assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
+ "LowerFormalArguments emitted a value with the wrong type!");
+ }
+ });
// Update the DAG with the new chain value resulting from argument lowering.
DAG.setRoot(NewRoot);
@@ -10012,8 +10015,8 @@ MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
if (!SwitchPeeled)
return SwitchMBB;
- DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: " << TopCaseProb
- << "\n");
+ LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
+ << TopCaseProb << "\n");
// Record the MBB for the peeled switch statement.
MachineFunction::iterator BBI(SwitchMBB);
@@ -10030,10 +10033,11 @@ MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
Clusters.erase(PeeledCaseIt);
for (CaseCluster &CC : Clusters) {
- DEBUG(dbgs() << "Scale the probablity for one cluster, before scaling: "
- << CC.Prob << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Scale the probablity for one cluster, before scaling: "
+ << CC.Prob << "\n");
CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
- DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
+ LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
}
PeeledCaseProb = TopCaseProb;
return PeeledSwitchMBB;
@@ -10112,11 +10116,13 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
findJumpTables(Clusters, &SI, DefaultMBB);
findBitTestClusters(Clusters, &SI);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Case clusters: ";
for (const CaseCluster &C : Clusters) {
- if (C.Kind == CC_JumpTable) dbgs() << "JT:";
- if (C.Kind == CC_BitTests) dbgs() << "BT:";
+ if (C.Kind == CC_JumpTable)
+ dbgs() << "JT:";
+ if (C.Kind == CC_BitTests)
+ dbgs() << "BT:";
C.Low->getValue().print(dbgs(), true);
if (C.Low != C.High) {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index f8ccb9cbbab0..f9e4bc6196b9 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -212,26 +212,27 @@ namespace llvm {
return;
IS.OptLevel = NewOptLevel;
IS.TM.setOptLevel(NewOptLevel);
- DEBUG(dbgs() << "\nChanging optimization level for Function "
- << IS.MF->getFunction().getName() << "\n");
- DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel
- << " ; After: -O" << NewOptLevel << "\n");
+ LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
+ << IS.MF->getFunction().getName() << "\n");
+ LLVM_DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel << " ; After: -O"
+ << NewOptLevel << "\n");
SavedFastISel = IS.TM.Options.EnableFastISel;
if (NewOptLevel == CodeGenOpt::None) {
IS.TM.setFastISel(IS.TM.getO0WantsFastISel());
- DEBUG(dbgs() << "\tFastISel is "
- << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
- << "\n");
+ LLVM_DEBUG(
+ dbgs() << "\tFastISel is "
+ << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
+ << "\n");
}
}
~OptLevelChanger() {
if (IS.OptLevel == SavedOptLevel)
return;
- DEBUG(dbgs() << "\nRestoring optimization level for Function "
- << IS.MF->getFunction().getName() << "\n");
- DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel
- << " ; After: -O" << SavedOptLevel << "\n");
+ LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
+ << IS.MF->getFunction().getName() << "\n");
+ LLVM_DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel << " ; After: -O"
+ << SavedOptLevel << "\n");
IS.OptLevel = SavedOptLevel;
IS.TM.setOptLevel(SavedOptLevel);
IS.TM.setFastISel(SavedFastISel);
@@ -412,7 +413,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
- DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
SplitCriticalSideEffectEdges(const_cast<Function &>(Fn), DT, LI);
@@ -516,8 +517,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// FIXME: VR def may not be in entry block.
Def->getParent()->insert(std::next(InsertPos), MI);
} else
- DEBUG(dbgs() << "Dropping debug info for dead vreg"
- << TargetRegisterInfo::virtReg2Index(Reg) << "\n");
+ LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
+ << TargetRegisterInfo::virtReg2Index(Reg) << "\n");
}
// If Reg is live-in then update debug info to track its copy in a vreg.
@@ -624,8 +625,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// at this point.
FuncInfo->clear();
- DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
- DEBUG(MF->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
+ LLVM_DEBUG(MF->print(dbgs()));
return true;
}
@@ -735,9 +736,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
BlockName =
(MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
}
- DEBUG(dbgs() << "Initial selection DAG: " << printMBBReference(*FuncInfo->MBB)
- << " '" << BlockName << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Initial selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (ViewDAGCombine1 && MatchFilterBB)
CurDAG->viewGraph("dag-combine1 input for " + BlockName);
@@ -752,10 +754,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
- DEBUG(dbgs() << "Optimized lowered selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Optimized lowered selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
@@ -772,10 +774,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
- DEBUG(dbgs() << "Type-legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
// Only allow creation of legal node types.
CurDAG->NewNodesMustHaveLegalTypes = true;
@@ -794,10 +796,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
- DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
}
{
@@ -807,10 +809,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
}
if (Changed) {
- DEBUG(dbgs() << "Vector-legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Vector-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
{
NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
@@ -818,10 +820,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->LegalizeTypes();
}
- DEBUG(dbgs() << "Vector/type-legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Vector/type-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (ViewDAGCombineLT && MatchFilterBB)
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
@@ -833,10 +835,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized vector-legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Optimized vector-legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
@@ -854,10 +856,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
- DEBUG(dbgs() << "Legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (ViewDAGCombine2 && MatchFilterBB)
CurDAG->viewGraph("dag-combine2 input for " + BlockName);
@@ -872,10 +874,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
if (TTI.hasBranchDivergence())
CurDAG->VerifyDAGDiverence();
- DEBUG(dbgs() << "Optimized legalized selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Optimized legalized selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
@@ -891,10 +893,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
DoInstructionSelection();
}
- DEBUG(dbgs() << "Selected selection DAG: "
- << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
- << "'\n";
- CurDAG->dump());
+ LLVM_DEBUG(dbgs() << "Selected selection DAG: "
+ << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
+ << "'\n";
+ CurDAG->dump());
if (ViewSchedDAGs && MatchFilterBB)
CurDAG->viewGraph("scheduler input for " + BlockName);
@@ -1013,9 +1015,9 @@ int SelectionDAGISel::getUninvalidatedNodeId(SDNode *N) {
}
void SelectionDAGISel::DoInstructionSelection() {
- DEBUG(dbgs() << "===== Instruction selection begins: "
- << printMBBReference(*FuncInfo->MBB) << " '"
- << FuncInfo->MBB->getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
+ << printMBBReference(*FuncInfo->MBB) << " '"
+ << FuncInfo->MBB->getName() << "'\n");
PreprocessISelDAG();
@@ -1087,8 +1089,8 @@ void SelectionDAGISel::DoInstructionSelection() {
if (Node->isStrictFPOpcode())
Node = CurDAG->mutateStrictFPToFP(Node);
- DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
- Node->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
+ Node->dump(CurDAG));
Select(Node);
}
@@ -1096,7 +1098,7 @@ void SelectionDAGISel::DoInstructionSelection() {
CurDAG->setRoot(Dummy.getValue());
}
- DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
+ LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
PostprocessISelDAG();
}
@@ -1486,7 +1488,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = nullptr;
if (TM.Options.EnableFastISel) {
- DEBUG(dbgs() << "Enabling fast-isel\n");
+ LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
}
@@ -1837,12 +1839,12 @@ FindSplitPointForStackProtector(MachineBasicBlock *BB) {
void
SelectionDAGISel::FinishBasicBlock() {
- DEBUG(dbgs() << "Total amount of phi nodes to update: "
- << FuncInfo->PHINodesToUpdate.size() << "\n";
- for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
- dbgs() << "Node " << i << " : ("
- << FuncInfo->PHINodesToUpdate[i].first
- << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
+ LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
+ << FuncInfo->PHINodesToUpdate.size() << "\n";
+ for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
+ ++i) dbgs()
+ << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
+ << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
// Next, now that we know what the last MBB the LLVM BB expanded is, update
// PHI nodes in successors.
@@ -2473,7 +2475,7 @@ void SelectionDAGISel::UpdateChains(
if (!NowDeadNodes.empty())
CurDAG->RemoveDeadNodes(NowDeadNodes);
- DEBUG(dbgs() << "ISEL: Match complete!\n");
+ LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
}
/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
@@ -2961,7 +2963,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
// update the chain results when the pattern is complete.
SmallVector<SDNode*, 3> ChainNodesMatched;
- DEBUG(dbgs() << "ISEL: Starting pattern match\n");
+ LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
// Determine where to start the interpreter. Normally we start at opcode #0,
// but if the state machine starts with an OPC_SwitchOpcode, then we
@@ -2973,7 +2975,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
// Already computed the OpcodeOffset table, just index into it.
if (N.getOpcode() < OpcodeOffset.size())
MatcherIndex = OpcodeOffset[N.getOpcode()];
- DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
+ LLVM_DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
} else if (MatcherTable[0] == OPC_SwitchOpcode) {
// Otherwise, the table isn't computed, but the state machine does start
@@ -3040,9 +3042,10 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
if (!Result)
break;
- DEBUG(dbgs() << " Skipped scope entry (due to false predicate) at "
- << "index " << MatcherIndexOfPredicate
- << ", continuing at " << FailIndex << "\n");
+ LLVM_DEBUG(
+ dbgs() << " Skipped scope entry (due to false predicate) at "
+ << "index " << MatcherIndexOfPredicate << ", continuing at "
+ << FailIndex << "\n");
++NumDAGIselRetries;
// Otherwise, we know that this case of the Scope is guaranteed to fail,
@@ -3091,11 +3094,8 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
if (auto *MN = dyn_cast<MemSDNode>(N))
MatchedMemRefs.push_back(MN->getMemOperand());
else {
- DEBUG(
- dbgs() << "Expected MemSDNode ";
- N->dump(CurDAG);
- dbgs() << '\n'
- );
+ LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
+ dbgs() << '\n');
}
continue;
@@ -3216,8 +3216,8 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
if (CaseSize == 0) break;
// Otherwise, execute the case we found.
- DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart
- << " to " << MatcherIndex << "\n");
+ LLVM_DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to "
+ << MatcherIndex << "\n");
continue;
}
@@ -3248,8 +3248,9 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
if (CaseSize == 0) break;
// Otherwise, execute the case we found.
- DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
- << "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
+ LLVM_DEBUG(dbgs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
+ << "] from " << SwitchStart << " to " << MatcherIndex
+ << '\n');
continue;
}
case OPC_CheckChild0Type: case OPC_CheckChild1Type:
@@ -3629,14 +3630,11 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
Res->setMemRefs(MemRefs, MemRefs + NumMemRefs);
}
- DEBUG(
- if (!MatchedMemRefs.empty() && Res->memoperands_empty())
- dbgs() << " Dropping mem operands\n";
- dbgs() << " "
- << (IsMorphNodeTo ? "Morphed" : "Created")
- << " node: ";
- Res->dump(CurDAG);
- );
+ LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
+ << " Dropping mem operands\n";
+ dbgs() << " " << (IsMorphNodeTo ? "Morphed" : "Created")
+ << " node: ";
+ Res->dump(CurDAG););
// If this was a MorphNodeTo then we're completely done!
if (IsMorphNodeTo) {
@@ -3698,7 +3696,8 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
// If the code reached this point, then the match failed. See if there is
// another child to try in the current 'Scope', otherwise pop it until we
// find a case to check.
- DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
+ LLVM_DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex
+ << "\n");
++NumDAGIselRetries;
while (true) {
if (MatchScopes.empty()) {
@@ -3718,7 +3717,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
MatcherIndex = LastScope.FailIndex;
- DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
+ LLVM_DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
InputChain = LastScope.InputChain;
InputGlue = LastScope.InputGlue;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index be4ab094bf49..3b19bff4743d 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -229,7 +229,7 @@ bool SelectionDAG::setSubgraphColorHelper(SDNode *N, const char *Color, DenseSet
if (level >= 20) {
if (!printed) {
printed = true;
- DEBUG(dbgs() << "setSubgraphColor hit max level\n");
+ LLVM_DEBUG(dbgs() << "setSubgraphColor hit max level\n");
}
return true;
}
diff --git a/lib/CodeGen/ShrinkWrap.cpp b/lib/CodeGen/ShrinkWrap.cpp
index d4fbe0a8df07..7647df0d340d 100644
--- a/lib/CodeGen/ShrinkWrap.cpp
+++ b/lib/CodeGen/ShrinkWrap.cpp
@@ -254,7 +254,7 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI,
RegScavenger *RS) const {
if (MI.getOpcode() == FrameSetupOpcode ||
MI.getOpcode() == FrameDestroyOpcode) {
- DEBUG(dbgs() << "Frame instruction: " << MI << '\n');
+ LLVM_DEBUG(dbgs() << "Frame instruction: " << MI << '\n');
return true;
}
for (const MachineOperand &MO : MI.operands()) {
@@ -286,8 +286,8 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI,
}
// Skip FrameIndex operands in DBG_VALUE instructions.
if (UseOrDefCSR || (MO.isFI() && !MI.isDebugValue())) {
- DEBUG(dbgs() << "Use or define CSR(" << UseOrDefCSR << ") or FI("
- << MO.isFI() << "): " << MI << '\n');
+ LLVM_DEBUG(dbgs() << "Use or define CSR(" << UseOrDefCSR << ") or FI("
+ << MO.isFI() << "): " << MI << '\n');
return true;
}
}
@@ -318,7 +318,7 @@ void ShrinkWrap::updateSaveRestorePoints(MachineBasicBlock &MBB,
Save = MDT->findNearestCommonDominator(Save, &MBB);
if (!Save) {
- DEBUG(dbgs() << "Found a block that is not reachable from Entry\n");
+ LLVM_DEBUG(dbgs() << "Found a block that is not reachable from Entry\n");
return;
}
@@ -352,7 +352,8 @@ void ShrinkWrap::updateSaveRestorePoints(MachineBasicBlock &MBB,
}
if (!Restore) {
- DEBUG(dbgs() << "Restore point needs to be spanned on several blocks\n");
+ LLVM_DEBUG(
+ dbgs() << "Restore point needs to be spanned on several blocks\n");
return;
}
@@ -435,7 +436,7 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF))
return false;
- DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
init(MF);
@@ -447,7 +448,7 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
// results. Moreover, we may miss that the prologue and
// epilogue are not in the same loop, leading to unbalanced
// construction/deconstruction of the stack frame.
- DEBUG(dbgs() << "Irreducible CFGs are not supported yet\n");
+ LLVM_DEBUG(dbgs() << "Irreducible CFGs are not supported yet\n");
return false;
}
@@ -456,11 +457,11 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr);
for (MachineBasicBlock &MBB : MF) {
- DEBUG(dbgs() << "Look into: " << MBB.getNumber() << ' ' << MBB.getName()
- << '\n');
+ LLVM_DEBUG(dbgs() << "Look into: " << MBB.getNumber() << ' '
+ << MBB.getName() << '\n');
if (MBB.isEHFuncletEntry()) {
- DEBUG(dbgs() << "EH Funclets are not supported yet.\n");
+ LLVM_DEBUG(dbgs() << "EH Funclets are not supported yet.\n");
return false;
}
@@ -474,7 +475,7 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
// basic block can jump out from the middle.
updateSaveRestorePoints(MBB, RS.get());
if (!ArePointsInteresting()) {
- DEBUG(dbgs() << "EHPad prevents shrink-wrapping\n");
+ LLVM_DEBUG(dbgs() << "EHPad prevents shrink-wrapping\n");
return false;
}
continue;
@@ -489,7 +490,7 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
// If we are at a point where we cannot improve the placement of
// save/restore instructions, just give up.
if (!ArePointsInteresting()) {
- DEBUG(dbgs() << "No Shrink wrap candidate found\n");
+ LLVM_DEBUG(dbgs() << "No Shrink wrap candidate found\n");
return false;
}
// No need to look for other instructions, this basic block
@@ -502,20 +503,21 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
// because it means we did not encounter any frame/CSR related code.
// Otherwise, we would have returned from the previous loop.
assert(!Save && !Restore && "We miss a shrink-wrap opportunity?!");
- DEBUG(dbgs() << "Nothing to shrink-wrap\n");
+ LLVM_DEBUG(dbgs() << "Nothing to shrink-wrap\n");
return false;
}
- DEBUG(dbgs() << "\n ** Results **\nFrequency of the Entry: " << EntryFreq
- << '\n');
+ LLVM_DEBUG(dbgs() << "\n ** Results **\nFrequency of the Entry: " << EntryFreq
+ << '\n');
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
do {
- DEBUG(dbgs() << "Shrink wrap candidates (#, Name, Freq):\nSave: "
- << Save->getNumber() << ' ' << Save->getName() << ' '
- << MBFI->getBlockFreq(Save).getFrequency() << "\nRestore: "
- << Restore->getNumber() << ' ' << Restore->getName() << ' '
- << MBFI->getBlockFreq(Restore).getFrequency() << '\n');
+ LLVM_DEBUG(dbgs() << "Shrink wrap candidates (#, Name, Freq):\nSave: "
+ << Save->getNumber() << ' ' << Save->getName() << ' '
+ << MBFI->getBlockFreq(Save).getFrequency()
+ << "\nRestore: " << Restore->getNumber() << ' '
+ << Restore->getName() << ' '
+ << MBFI->getBlockFreq(Restore).getFrequency() << '\n');
bool IsSaveCheap, TargetCanUseSaveAsPrologue = false;
if (((IsSaveCheap = EntryFreq >= MBFI->getBlockFreq(Save).getFrequency()) &&
@@ -523,7 +525,8 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
((TargetCanUseSaveAsPrologue = TFI->canUseAsPrologue(*Save)) &&
TFI->canUseAsEpilogue(*Restore)))
break;
- DEBUG(dbgs() << "New points are too expensive or invalid for the target\n");
+ LLVM_DEBUG(
+ dbgs() << "New points are too expensive or invalid for the target\n");
MachineBasicBlock *NewBB;
if (!IsSaveCheap || !TargetCanUseSaveAsPrologue) {
Save = FindIDom<>(*Save, Save->predecessors(), *MDT);
@@ -545,9 +548,10 @@ bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
return false;
}
- DEBUG(dbgs() << "Final shrink wrap candidates:\nSave: " << Save->getNumber()
- << ' ' << Save->getName() << "\nRestore: "
- << Restore->getNumber() << ' ' << Restore->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Final shrink wrap candidates:\nSave: "
+ << Save->getNumber() << ' ' << Save->getName()
+ << "\nRestore: " << Restore->getNumber() << ' '
+ << Restore->getName() << '\n');
MachineFrameInfo &MFI = MF.getFrameInfo();
MFI.setSavePoint(Save);
diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp
index 611ec97689ad..f657c16a3206 100644
--- a/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/lib/CodeGen/SjLjEHPrepare.cpp
@@ -307,8 +307,8 @@ void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
for (InvokeInst *Invoke : Invokes) {
BasicBlock *UnwindBlock = Invoke->getUnwindDest();
if (UnwindBlock != &BB && LiveBBs.count(UnwindBlock)) {
- DEBUG(dbgs() << "SJLJ Spill: " << Inst << " around "
- << UnwindBlock->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "SJLJ Spill: " << Inst << " around "
+ << UnwindBlock->getName() << "\n");
NeedsSpill = true;
break;
}
diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp
index ff706a6a6552..ed74b3e4fa19 100644
--- a/lib/CodeGen/SlotIndexes.cpp
+++ b/lib/CodeGen/SlotIndexes.cpp
@@ -97,7 +97,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
// Sort the Idx2MBBMap
llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
- DEBUG(mf->print(dbgs(), this));
+ LLVM_DEBUG(mf->print(dbgs(), this));
// And we're done!
return false;
@@ -147,7 +147,7 @@ void SlotIndexes::removeSingleMachineInstrFromMaps(MachineInstr &MI) {
void SlotIndexes::renumberIndexes() {
// Renumber updates the index of every element of the index list.
- DEBUG(dbgs() << "\n*** Renumbering SlotIndexes ***\n");
+ LLVM_DEBUG(dbgs() << "\n*** Renumbering SlotIndexes ***\n");
++NumGlobalRenum;
unsigned index = 0;
@@ -174,8 +174,8 @@ void SlotIndexes::renumberIndexes(IndexList::iterator curItr) {
// If the next index is bigger, we have caught up.
} while (curItr != indexList.end() && curItr->getIndex() <= index);
- DEBUG(dbgs() << "\n*** Renumbered SlotIndexes " << startItr->getIndex() << '-'
- << index << " ***\n");
+ LLVM_DEBUG(dbgs() << "\n*** Renumbered SlotIndexes " << startItr->getIndex()
+ << '-' << index << " ***\n");
++NumLocalRenum;
}
diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp
index 9c0b6c39952b..d639f4475301 100644
--- a/lib/CodeGen/SplitKit.cpp
+++ b/lib/CodeGen/SplitKit.cpp
@@ -192,7 +192,7 @@ void SplitAnalysis::analyzeUses() {
// I am looking at you, RegisterCoalescer!
DidRepairRange = true;
++NumRepairs;
- DEBUG(dbgs() << "*** Fixing inconsistent live interval! ***\n");
+ LLVM_DEBUG(dbgs() << "*** Fixing inconsistent live interval! ***\n");
const_cast<LiveIntervals&>(LIS)
.shrinkToUses(const_cast<LiveInterval*>(CurLI));
UseBlocks.clear();
@@ -202,10 +202,9 @@ void SplitAnalysis::analyzeUses() {
assert(fixed && "Couldn't fix broken live interval");
}
- DEBUG(dbgs() << "Analyze counted "
- << UseSlots.size() << " instrs in "
- << UseBlocks.size() << " blocks, through "
- << NumThroughBlocks << " blocks.\n");
+ LLVM_DEBUG(dbgs() << "Analyze counted " << UseSlots.size() << " instrs in "
+ << UseBlocks.size() << " blocks, through "
+ << NumThroughBlocks << " blocks.\n");
}
/// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks
@@ -686,20 +685,20 @@ unsigned SplitEditor::openIntv() {
void SplitEditor::selectIntv(unsigned Idx) {
assert(Idx != 0 && "Cannot select the complement interval");
assert(Idx < Edit->size() && "Can only select previously opened interval");
- DEBUG(dbgs() << " selectIntv " << OpenIdx << " -> " << Idx << '\n');
+ LLVM_DEBUG(dbgs() << " selectIntv " << OpenIdx << " -> " << Idx << '\n');
OpenIdx = Idx;
}
SlotIndex SplitEditor::enterIntvBefore(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before enterIntvBefore");
- DEBUG(dbgs() << " enterIntvBefore " << Idx);
+ LLVM_DEBUG(dbgs() << " enterIntvBefore " << Idx);
Idx = Idx.getBaseIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
if (!ParentVNI) {
- DEBUG(dbgs() << ": not live\n");
+ LLVM_DEBUG(dbgs() << ": not live\n");
return Idx;
}
- DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
+ LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
assert(MI && "enterIntvBefore called with invalid index");
@@ -709,14 +708,14 @@ SlotIndex SplitEditor::enterIntvBefore(SlotIndex Idx) {
SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before enterIntvAfter");
- DEBUG(dbgs() << " enterIntvAfter " << Idx);
+ LLVM_DEBUG(dbgs() << " enterIntvAfter " << Idx);
Idx = Idx.getBoundaryIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
if (!ParentVNI) {
- DEBUG(dbgs() << ": not live\n");
+ LLVM_DEBUG(dbgs() << ": not live\n");
return Idx;
}
- DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
+ LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
assert(MI && "enterIntvAfter called with invalid index");
@@ -729,18 +728,18 @@ SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
assert(OpenIdx && "openIntv not called before enterIntvAtEnd");
SlotIndex End = LIS.getMBBEndIdx(&MBB);
SlotIndex Last = End.getPrevSlot();
- DEBUG(dbgs() << " enterIntvAtEnd " << printMBBReference(MBB) << ", "
- << Last);
+ LLVM_DEBUG(dbgs() << " enterIntvAtEnd " << printMBBReference(MBB) << ", "
+ << Last);
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Last);
if (!ParentVNI) {
- DEBUG(dbgs() << ": not live\n");
+ LLVM_DEBUG(dbgs() << ": not live\n");
return End;
}
- DEBUG(dbgs() << ": valno " << ParentVNI->id);
+ LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id);
VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Last, MBB,
SA.getLastSplitPointIter(&MBB));
RegAssign.insert(VNI->def, End, OpenIdx);
- DEBUG(dump());
+ LLVM_DEBUG(dump());
return VNI->def;
}
@@ -751,23 +750,23 @@ void SplitEditor::useIntv(const MachineBasicBlock &MBB) {
void SplitEditor::useIntv(SlotIndex Start, SlotIndex End) {
assert(OpenIdx && "openIntv not called before useIntv");
- DEBUG(dbgs() << " useIntv [" << Start << ';' << End << "):");
+ LLVM_DEBUG(dbgs() << " useIntv [" << Start << ';' << End << "):");
RegAssign.insert(Start, End, OpenIdx);
- DEBUG(dump());
+ LLVM_DEBUG(dump());
}
SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before leaveIntvAfter");
- DEBUG(dbgs() << " leaveIntvAfter " << Idx);
+ LLVM_DEBUG(dbgs() << " leaveIntvAfter " << Idx);
// The interval must be live beyond the instruction at Idx.
SlotIndex Boundary = Idx.getBoundaryIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Boundary);
if (!ParentVNI) {
- DEBUG(dbgs() << ": not live\n");
+ LLVM_DEBUG(dbgs() << ": not live\n");
return Boundary.getNextSlot();
}
- DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
+ LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Boundary);
assert(MI && "No instruction at index");
@@ -789,16 +788,16 @@ SlotIndex SplitEditor::leaveIntvAfter(SlotIndex Idx) {
SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) {
assert(OpenIdx && "openIntv not called before leaveIntvBefore");
- DEBUG(dbgs() << " leaveIntvBefore " << Idx);
+ LLVM_DEBUG(dbgs() << " leaveIntvBefore " << Idx);
// The interval must be live into the instruction at Idx.
Idx = Idx.getBaseIndex();
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
if (!ParentVNI) {
- DEBUG(dbgs() << ": not live\n");
+ LLVM_DEBUG(dbgs() << ": not live\n");
return Idx.getNextSlot();
}
- DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
+ LLVM_DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
assert(MI && "No instruction at index");
@@ -809,19 +808,19 @@ SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) {
SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
assert(OpenIdx && "openIntv not called before leaveIntvAtTop");
SlotIndex Start = LIS.getMBBStartIdx(&MBB);
- DEBUG(dbgs() << " leaveIntvAtTop " << printMBBReference(MBB) << ", "
- << Start);
+ LLVM_DEBUG(dbgs() << " leaveIntvAtTop " << printMBBReference(MBB) << ", "
+ << Start);
VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Start);
if (!ParentVNI) {
- DEBUG(dbgs() << ": not live\n");
+ LLVM_DEBUG(dbgs() << ": not live\n");
return Start;
}
VNInfo *VNI = defFromParent(0, ParentVNI, Start, MBB,
MBB.SkipPHIsLabelsAndDebug(MBB.begin()));
RegAssign.insert(Start, VNI->def, OpenIdx);
- DEBUG(dump());
+ LLVM_DEBUG(dump());
return VNI->def;
}
@@ -836,9 +835,9 @@ void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
// The complement interval will be extended as needed by LRCalc.extend().
if (ParentVNI)
forceRecompute(0, *ParentVNI);
- DEBUG(dbgs() << " overlapIntv [" << Start << ';' << End << "):");
+ LLVM_DEBUG(dbgs() << " overlapIntv [" << Start << ';' << End << "):");
RegAssign.insert(Start, End, OpenIdx);
- DEBUG(dump());
+ LLVM_DEBUG(dump());
}
//===----------------------------------------------------------------------===//
@@ -847,7 +846,7 @@ void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
LiveInterval *LI = &LIS.getInterval(Edit->get(0));
- DEBUG(dbgs() << "Removing " << Copies.size() << " back-copies.\n");
+ LLVM_DEBUG(dbgs() << "Removing " << Copies.size() << " back-copies.\n");
RegAssignMap::iterator AssignI;
AssignI.setMap(RegAssign);
@@ -862,7 +861,7 @@ void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
do AtBegin = MBBI == MBB->begin();
while (!AtBegin && (--MBBI)->isDebugInstr());
- DEBUG(dbgs() << "Removing " << Def << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << "Removing " << Def << '\t' << *MI);
LIS.removeVRegDefAt(*LI, Def);
LIS.RemoveMachineInstrFromMaps(*MI);
MI->eraseFromParent();
@@ -877,11 +876,12 @@ void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
continue;
unsigned RegIdx = AssignI.value();
if (AtBegin || !MBBI->readsVirtualRegister(Edit->getReg())) {
- DEBUG(dbgs() << " cannot find simple kill of RegIdx " << RegIdx << '\n');
+ LLVM_DEBUG(dbgs() << " cannot find simple kill of RegIdx " << RegIdx
+ << '\n');
forceRecompute(RegIdx, *Edit->getParent().getVNInfoAt(Def));
} else {
SlotIndex Kill = LIS.getInstructionIndex(*MBBI).getRegSlot();
- DEBUG(dbgs() << " move kill to " << Kill << '\t' << *MBBI);
+ LLVM_DEBUG(dbgs() << " move kill to " << Kill << '\t' << *MBBI);
AssignI.setStop(Kill);
}
}
@@ -908,15 +908,17 @@ SplitEditor::findShallowDominator(MachineBasicBlock *MBB,
// MBB isn't in a loop, it doesn't get any better. All dominators have a
// higher frequency by definition.
if (!Loop) {
- DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates "
- << printMBBReference(*MBB) << " at depth 0\n");
+ LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
+ << " dominates " << printMBBReference(*MBB)
+ << " at depth 0\n");
return MBB;
}
// We'll never be able to exit the DefLoop.
if (Loop == DefLoop) {
- DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates "
- << printMBBReference(*MBB) << " in the same loop\n");
+ LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
+ << " dominates " << printMBBReference(*MBB)
+ << " in the same loop\n");
return MBB;
}
@@ -925,8 +927,9 @@ SplitEditor::findShallowDominator(MachineBasicBlock *MBB,
if (Depth < BestDepth) {
BestMBB = MBB;
BestDepth = Depth;
- DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates "
- << printMBBReference(*MBB) << " at depth " << Depth << '\n');
+ LLVM_DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB)
+ << " dominates " << printMBBReference(*MBB)
+ << " at depth " << Depth << '\n');
}
// Leave loop by going to the immediate dominator of the loop header.
@@ -1032,14 +1035,14 @@ void SplitEditor::hoistCopies() {
// instruction in the complement range. All other copies of ParentVNI
// should be eliminated.
if (VNI->def == ParentVNI->def) {
- DEBUG(dbgs() << "Direct complement def at " << VNI->def << '\n');
+ LLVM_DEBUG(dbgs() << "Direct complement def at " << VNI->def << '\n');
Dom = DomPair(ValMBB, VNI->def);
continue;
}
// Skip the singly mapped values. There is nothing to gain from hoisting a
// single back-copy.
if (Values.lookup(std::make_pair(0, ParentVNI->id)).getPointer()) {
- DEBUG(dbgs() << "Single complement def at " << VNI->def << '\n');
+ LLVM_DEBUG(dbgs() << "Single complement def at " << VNI->def << '\n');
continue;
}
@@ -1063,10 +1066,11 @@ void SplitEditor::hoistCopies() {
Costs[ParentVNI->id] += MBFI.getBlockFreq(ValMBB);
}
- DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@' << VNI->def
- << " for parent " << ParentVNI->id << '@' << ParentVNI->def
- << " hoist to " << printMBBReference(*Dom.first) << ' '
- << Dom.second << '\n');
+ LLVM_DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@'
+ << VNI->def << " for parent " << ParentVNI->id << '@'
+ << ParentVNI->def << " hoist to "
+ << printMBBReference(*Dom.first) << ' ' << Dom.second
+ << '\n');
}
// Insert the hoisted copies.
@@ -1119,7 +1123,7 @@ bool SplitEditor::transferValues() {
bool Skipped = false;
RegAssignMap::const_iterator AssignI = RegAssign.begin();
for (const LiveRange::Segment &S : Edit->getParent()) {
- DEBUG(dbgs() << " blit " << S << ':');
+ LLVM_DEBUG(dbgs() << " blit " << S << ':');
VNInfo *ParentVNI = S.valno;
// RegAssign has holes where RegIdx 0 should be used.
SlotIndex Start = S.start;
@@ -1141,14 +1145,14 @@ bool SplitEditor::transferValues() {
}
// The interval [Start;End) is continuously mapped to RegIdx, ParentVNI.
- DEBUG(dbgs() << " [" << Start << ';' << End << ")=" << RegIdx
- << '(' << printReg(Edit->get(RegIdx)) << ')');
+ LLVM_DEBUG(dbgs() << " [" << Start << ';' << End << ")=" << RegIdx << '('
+ << printReg(Edit->get(RegIdx)) << ')');
LiveInterval &LI = LIS.getInterval(Edit->get(RegIdx));
// Check for a simply defined value that can be blitted directly.
ValueForcePair VFP = Values.lookup(std::make_pair(RegIdx, ParentVNI->id));
if (VNInfo *VNI = VFP.getPointer()) {
- DEBUG(dbgs() << ':' << VNI->id);
+ LLVM_DEBUG(dbgs() << ':' << VNI->id);
LI.addSegment(LiveInterval::Segment(Start, End, VNI));
Start = End;
continue;
@@ -1156,7 +1160,7 @@ bool SplitEditor::transferValues() {
// Skip values with forced recomputation.
if (VFP.getInt()) {
- DEBUG(dbgs() << "(recalc)");
+ LLVM_DEBUG(dbgs() << "(recalc)");
Skipped = true;
Start = End;
continue;
@@ -1175,7 +1179,7 @@ bool SplitEditor::transferValues() {
if (Start != BlockStart) {
VNInfo *VNI = LI.extendInBlock(BlockStart, std::min(BlockEnd, End));
assert(VNI && "Missing def for complex mapped value");
- DEBUG(dbgs() << ':' << VNI->id << "*" << printMBBReference(*MBB));
+ LLVM_DEBUG(dbgs() << ':' << VNI->id << "*" << printMBBReference(*MBB));
// MBB has its own def. Is it also live-out?
if (BlockEnd <= End)
LRC.setLiveOutValue(&*MBB, VNI);
@@ -1188,7 +1192,7 @@ bool SplitEditor::transferValues() {
// Handle the live-in blocks covered by [Start;End).
assert(Start <= BlockStart && "Expected live-in block");
while (BlockStart < End) {
- DEBUG(dbgs() << ">" << printMBBReference(*MBB));
+ LLVM_DEBUG(dbgs() << ">" << printMBBReference(*MBB));
BlockEnd = LIS.getMBBEndIdx(&*MBB);
if (BlockStart == ParentVNI->def) {
// This block has the def of a parent PHI, so it isn't live-in.
@@ -1213,7 +1217,7 @@ bool SplitEditor::transferValues() {
}
Start = End;
} while (Start != S.end);
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
LRCalc[0].calculateValues();
@@ -1315,7 +1319,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
++RI;
// LiveDebugVariables should have handled all DBG_VALUE instructions.
if (MI->isDebugValue()) {
- DEBUG(dbgs() << "Zapping " << *MI);
+ LLVM_DEBUG(dbgs() << "Zapping " << *MI);
MO.setReg(0);
continue;
}
@@ -1331,8 +1335,8 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
unsigned RegIdx = RegAssign.lookup(Idx);
LiveInterval &LI = LIS.getInterval(Edit->get(RegIdx));
MO.setReg(LI.reg);
- DEBUG(dbgs() << " rewr " << printMBBReference(*MI->getParent()) << '\t'
- << Idx << ':' << RegIdx << '\t' << *MI);
+ LLVM_DEBUG(dbgs() << " rewr " << printMBBReference(*MI->getParent())
+ << '\t' << Idx << ':' << RegIdx << '\t' << *MI);
// Extend liveness to Idx if the instruction reads reg.
if (!ExtendRanges || MO.isUndef())
@@ -1417,7 +1421,7 @@ void SplitEditor::deleteRematVictims() {
if (!MI->allDefsAreDead())
continue;
- DEBUG(dbgs() << "All defs dead: " << *MI);
+ LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
Dead.push_back(MI);
}
}
@@ -1599,9 +1603,9 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(MBBNum);
- DEBUG(dbgs() << "%bb." << MBBNum << " [" << Start << ';' << Stop << ") intf "
- << LeaveBefore << '-' << EnterAfter << ", live-through "
- << IntvIn << " -> " << IntvOut);
+ LLVM_DEBUG(dbgs() << "%bb." << MBBNum << " [" << Start << ';' << Stop
+ << ") intf " << LeaveBefore << '-' << EnterAfter
+ << ", live-through " << IntvIn << " -> " << IntvOut);
assert((IntvIn || IntvOut) && "Use splitSingleBlock for isolated blocks");
@@ -1612,7 +1616,7 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
MachineBasicBlock *MBB = VRM.getMachineFunction().getBlockNumbered(MBBNum);
if (!IntvOut) {
- DEBUG(dbgs() << ", spill on entry.\n");
+ LLVM_DEBUG(dbgs() << ", spill on entry.\n");
//
// <<<<<<<<< Possible LeaveBefore interference.
// |-----------| Live through.
@@ -1626,7 +1630,7 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
}
if (!IntvIn) {
- DEBUG(dbgs() << ", reload on exit.\n");
+ LLVM_DEBUG(dbgs() << ", reload on exit.\n");
//
// >>>>>>> Possible EnterAfter interference.
// |-----------| Live through.
@@ -1640,7 +1644,7 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
}
if (IntvIn == IntvOut && !LeaveBefore && !EnterAfter) {
- DEBUG(dbgs() << ", straight through.\n");
+ LLVM_DEBUG(dbgs() << ", straight through.\n");
//
// |-----------| Live through.
// ------------- Straight through, same intv, no interference.
@@ -1656,7 +1660,7 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
if (IntvIn != IntvOut && (!LeaveBefore || !EnterAfter ||
LeaveBefore.getBaseIndex() > EnterAfter.getBoundaryIndex())) {
- DEBUG(dbgs() << ", switch avoiding interference.\n");
+ LLVM_DEBUG(dbgs() << ", switch avoiding interference.\n");
//
// >>>> <<<< Non-overlapping EnterAfter/LeaveBefore interference.
// |-----------| Live through.
@@ -1677,7 +1681,7 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum,
return;
}
- DEBUG(dbgs() << ", create local intv for interference.\n");
+ LLVM_DEBUG(dbgs() << ", create local intv for interference.\n");
//
// >>><><><><<<< Overlapping EnterAfter/LeaveBefore interference.
// |-----------| Live through.
@@ -1701,17 +1705,18 @@ void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
- DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop
- << "), uses " << BI.FirstInstr << '-' << BI.LastInstr
- << ", reg-in " << IntvIn << ", leave before " << LeaveBefore
- << (BI.LiveOut ? ", stack-out" : ", killed in block"));
+ LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';'
+ << Stop << "), uses " << BI.FirstInstr << '-'
+ << BI.LastInstr << ", reg-in " << IntvIn
+ << ", leave before " << LeaveBefore
+ << (BI.LiveOut ? ", stack-out" : ", killed in block"));
assert(IntvIn && "Must have register in");
assert(BI.LiveIn && "Must be live-in");
assert((!LeaveBefore || LeaveBefore > Start) && "Bad interference");
if (!BI.LiveOut && (!LeaveBefore || LeaveBefore >= BI.LastInstr)) {
- DEBUG(dbgs() << " before interference.\n");
+ LLVM_DEBUG(dbgs() << " before interference.\n");
//
// <<< Interference after kill.
// |---o---x | Killed in block.
@@ -1736,13 +1741,13 @@ void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
// \_____ Stack interval is live-out.
//
if (BI.LastInstr < LSP) {
- DEBUG(dbgs() << ", spill after last use before interference.\n");
+ LLVM_DEBUG(dbgs() << ", spill after last use before interference.\n");
selectIntv(IntvIn);
SlotIndex Idx = leaveIntvAfter(BI.LastInstr);
useIntv(Start, Idx);
assert((!LeaveBefore || Idx <= LeaveBefore) && "Interference");
} else {
- DEBUG(dbgs() << ", spill before last split point.\n");
+ LLVM_DEBUG(dbgs() << ", spill before last split point.\n");
selectIntv(IntvIn);
SlotIndex Idx = leaveIntvBefore(LSP);
overlapIntv(Idx, BI.LastInstr);
@@ -1757,7 +1762,7 @@ void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI,
// different register.
unsigned LocalIntv = openIntv();
(void)LocalIntv;
- DEBUG(dbgs() << ", creating local interval " << LocalIntv << ".\n");
+ LLVM_DEBUG(dbgs() << ", creating local interval " << LocalIntv << ".\n");
if (!BI.LiveOut || BI.LastInstr < LSP) {
//
@@ -1793,10 +1798,11 @@ void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
SlotIndex Start, Stop;
std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
- DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop
- << "), uses " << BI.FirstInstr << '-' << BI.LastInstr
- << ", reg-out " << IntvOut << ", enter after " << EnterAfter
- << (BI.LiveIn ? ", stack-in" : ", defined in block"));
+ LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';'
+ << Stop << "), uses " << BI.FirstInstr << '-'
+ << BI.LastInstr << ", reg-out " << IntvOut
+ << ", enter after " << EnterAfter
+ << (BI.LiveIn ? ", stack-in" : ", defined in block"));
SlotIndex LSP = SA.getLastSplitPoint(BI.MBB->getNumber());
@@ -1805,7 +1811,7 @@ void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
assert((!EnterAfter || EnterAfter < LSP) && "Bad interference");
if (!BI.LiveIn && (!EnterAfter || EnterAfter <= BI.FirstInstr)) {
- DEBUG(dbgs() << " after interference.\n");
+ LLVM_DEBUG(dbgs() << " after interference.\n");
//
// >>>> Interference before def.
// | o---o---| Defined in block.
@@ -1817,7 +1823,7 @@ void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
}
if (!EnterAfter || EnterAfter < BI.FirstInstr.getBaseIndex()) {
- DEBUG(dbgs() << ", reload after interference.\n");
+ LLVM_DEBUG(dbgs() << ", reload after interference.\n");
//
// >>>> Interference before def.
// |---o---o---| Live-through, stack-in.
@@ -1833,7 +1839,7 @@ void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI,
// The interference is overlapping somewhere we wanted to use IntvOut. That
// means we need to create a local interval that can be allocated a
// different register.
- DEBUG(dbgs() << ", interference overlaps uses.\n");
+ LLVM_DEBUG(dbgs() << ", interference overlaps uses.\n");
//
// >>>>>>> Interference overlapping uses.
// |---o---o---| Live-through, stack-in.
diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp
index e9eb60058279..4f3d1f704aa3 100644
--- a/lib/CodeGen/StackColoring.cpp
+++ b/lib/CodeGen/StackColoring.cpp
@@ -673,13 +673,13 @@ unsigned StackColoring::collectMarkers(unsigned NumSlot) {
}
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
if (Allocation) {
- DEBUG(dbgs() << "Found a lifetime ");
- DEBUG(dbgs() << (MI.getOpcode() == TargetOpcode::LIFETIME_START
- ? "start"
- : "end"));
- DEBUG(dbgs() << " marker for slot #" << Slot);
- DEBUG(dbgs() << " with allocation: " << Allocation->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found a lifetime ");
+ LLVM_DEBUG(dbgs() << (MI.getOpcode() == TargetOpcode::LIFETIME_START
+ ? "start"
+ : "end"));
+ LLVM_DEBUG(dbgs() << " marker for slot #" << Slot);
+ LLVM_DEBUG(dbgs()
+ << " with allocation: " << Allocation->getName() << "\n");
}
Markers.push_back(&MI);
MarkersFound += 1;
@@ -708,7 +708,7 @@ unsigned StackColoring::collectMarkers(unsigned NumSlot) {
for (unsigned slot = 0; slot < NumSlot; ++slot)
if (NumStartLifetimes[slot] > 1 || NumEndLifetimes[slot] > 1)
ConservativeSlots.set(slot);
- DEBUG(dumpBV("Conservative slots", ConservativeSlots));
+ LLVM_DEBUG(dumpBV("Conservative slots", ConservativeSlots));
// Step 2: compute begin/end sets for each block
@@ -739,14 +739,16 @@ unsigned StackColoring::collectMarkers(unsigned NumSlot) {
BlockInfo.End.set(Slot);
} else {
for (auto Slot : slots) {
- DEBUG(dbgs() << "Found a use of slot #" << Slot);
- DEBUG(dbgs() << " at " << printMBBReference(*MBB) << " index ");
- DEBUG(Indexes->getInstructionIndex(MI).print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Found a use of slot #" << Slot);
+ LLVM_DEBUG(dbgs()
+ << " at " << printMBBReference(*MBB) << " index ");
+ LLVM_DEBUG(Indexes->getInstructionIndex(MI).print(dbgs()));
const AllocaInst *Allocation = MFI->getObjectAllocation(Slot);
if (Allocation) {
- DEBUG(dbgs() << " with allocation: "<< Allocation->getName());
+ LLVM_DEBUG(dbgs()
+ << " with allocation: " << Allocation->getName());
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
if (BlockInfo.End.test(Slot)) {
BlockInfo.End.reset(Slot);
}
@@ -881,7 +883,7 @@ bool StackColoring::removeAllMarkers() {
}
Markers.clear();
- DEBUG(dbgs()<<"Removed "<<Count<<" markers.\n");
+ LLVM_DEBUG(dbgs() << "Removed " << Count << " markers.\n");
return Count;
}
@@ -895,8 +897,8 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
if (!VI.Var)
continue;
if (SlotRemap.count(VI.Slot)) {
- DEBUG(dbgs() << "Remapping debug info for ["
- << cast<DILocalVariable>(VI.Var)->getName() << "].\n");
+ LLVM_DEBUG(dbgs() << "Remapping debug info for ["
+ << cast<DILocalVariable>(VI.Var)->getName() << "].\n");
VI.Slot = SlotRemap[VI.Slot];
FixedDbg++;
}
@@ -1065,9 +1067,9 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
SlotRemap.count(H.CatchObj.FrameIndex))
H.CatchObj.FrameIndex = SlotRemap[H.CatchObj.FrameIndex];
- DEBUG(dbgs()<<"Fixed "<<FixedMemOp<<" machine memory operands.\n");
- DEBUG(dbgs()<<"Fixed "<<FixedDbg<<" debug locations.\n");
- DEBUG(dbgs()<<"Fixed "<<FixedInstr<<" machine instructions.\n");
+ LLVM_DEBUG(dbgs() << "Fixed " << FixedMemOp << " machine memory operands.\n");
+ LLVM_DEBUG(dbgs() << "Fixed " << FixedDbg << " debug locations.\n");
+ LLVM_DEBUG(dbgs() << "Fixed " << FixedInstr << " machine instructions.\n");
}
void StackColoring::removeInvalidSlotRanges() {
@@ -1105,7 +1107,7 @@ void StackColoring::removeInvalidSlotRanges() {
SlotIndex Index = Indexes->getInstructionIndex(I);
if (Interval->find(Index) == Interval->end()) {
Interval->clear();
- DEBUG(dbgs()<<"Invalidating range #"<<Slot<<"\n");
+ LLVM_DEBUG(dbgs() << "Invalidating range #" << Slot << "\n");
EscapedAllocas++;
}
}
@@ -1129,8 +1131,8 @@ void StackColoring::expungeSlotMap(DenseMap<int, int> &SlotRemap,
}
bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
- DEBUG(dbgs() << "********** Stack Coloring **********\n"
- << "********** Function: " << Func.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Stack Coloring **********\n"
+ << "********** Function: " << Func.getName() << '\n');
MF = &Func;
MFI = &MF->getFrameInfo();
Indexes = &getAnalysis<SlotIndexes>();
@@ -1157,21 +1159,23 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
unsigned NumMarkers = collectMarkers(NumSlots);
unsigned TotalSize = 0;
- DEBUG(dbgs()<<"Found "<<NumMarkers<<" markers and "<<NumSlots<<" slots\n");
- DEBUG(dbgs()<<"Slot structure:\n");
+ LLVM_DEBUG(dbgs() << "Found " << NumMarkers << " markers and " << NumSlots
+ << " slots\n");
+ LLVM_DEBUG(dbgs() << "Slot structure:\n");
for (int i=0; i < MFI->getObjectIndexEnd(); ++i) {
- DEBUG(dbgs()<<"Slot #"<<i<<" - "<<MFI->getObjectSize(i)<<" bytes.\n");
+ LLVM_DEBUG(dbgs() << "Slot #" << i << " - " << MFI->getObjectSize(i)
+ << " bytes.\n");
TotalSize += MFI->getObjectSize(i);
}
- DEBUG(dbgs()<<"Total Stack size: "<<TotalSize<<" bytes\n\n");
+ LLVM_DEBUG(dbgs() << "Total Stack size: " << TotalSize << " bytes\n\n");
// Don't continue because there are not enough lifetime markers, or the
// stack is too small, or we are told not to optimize the slots.
if (NumMarkers < 2 || TotalSize < 16 || DisableColoring ||
skipFunction(Func.getFunction())) {
- DEBUG(dbgs()<<"Will not try to merge slots.\n");
+ LLVM_DEBUG(dbgs() << "Will not try to merge slots.\n");
return removeAllMarkers();
}
@@ -1184,12 +1188,12 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
// Calculate the liveness of each block.
calculateLocalLiveness();
- DEBUG(dbgs() << "Dataflow iterations: " << NumIterations << "\n");
- DEBUG(dump());
+ LLVM_DEBUG(dbgs() << "Dataflow iterations: " << NumIterations << "\n");
+ LLVM_DEBUG(dump());
// Propagate the liveness information.
calculateLiveIntervals(NumSlots);
- DEBUG(dumpIntervals());
+ LLVM_DEBUG(dumpIntervals());
// Search for allocas which are used outside of the declared lifetime
// markers.
@@ -1260,8 +1264,8 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
SlotRemap[SecondSlot] = FirstSlot;
SortedSlots[J] = -1;
- DEBUG(dbgs()<<"Merging #"<<FirstSlot<<" and slots #"<<
- SecondSlot<<" together.\n");
+ LLVM_DEBUG(dbgs() << "Merging #" << FirstSlot << " and slots #"
+ << SecondSlot << " together.\n");
unsigned MaxAlignment = std::max(MFI->getObjectAlignment(FirstSlot),
MFI->getObjectAlignment(SecondSlot));
@@ -1281,8 +1285,8 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
// Record statistics.
StackSpaceSaved += ReducedSize;
StackSlotMerged += RemovedSlots;
- DEBUG(dbgs()<<"Merge "<<RemovedSlots<<" slots. Saved "<<
- ReducedSize<<" bytes\n");
+ LLVM_DEBUG(dbgs() << "Merge " << RemovedSlots << " slots. Saved "
+ << ReducedSize << " bytes\n");
// Scan the entire function and update all machine operands that use frame
// indices to use the remapped frame index.
diff --git a/lib/CodeGen/StackMapLivenessAnalysis.cpp b/lib/CodeGen/StackMapLivenessAnalysis.cpp
index 32d6f54f6795..fc2895a03f60 100644
--- a/lib/CodeGen/StackMapLivenessAnalysis.cpp
+++ b/lib/CodeGen/StackMapLivenessAnalysis.cpp
@@ -106,8 +106,8 @@ bool StackMapLiveness::runOnMachineFunction(MachineFunction &MF) {
if (!EnablePatchPointLiveness)
return false;
- DEBUG(dbgs() << "********** COMPUTING STACKMAP LIVENESS: " << MF.getName()
- << " **********\n");
+ LLVM_DEBUG(dbgs() << "********** COMPUTING STACKMAP LIVENESS: "
+ << MF.getName() << " **********\n");
TRI = MF.getSubtarget().getRegisterInfo();
++NumStackMapFuncVisited;
@@ -124,7 +124,7 @@ bool StackMapLiveness::calculateLiveness(MachineFunction &MF) {
bool HasChanged = false;
// For all basic blocks in the function.
for (auto &MBB : MF) {
- DEBUG(dbgs() << "****** BB " << MBB.getName() << " ******\n");
+ LLVM_DEBUG(dbgs() << "****** BB " << MBB.getName() << " ******\n");
LiveRegs.init(*TRI);
// FIXME: This should probably be addLiveOuts().
LiveRegs.addLiveOutsNoPristines(MBB);
@@ -138,7 +138,7 @@ bool StackMapLiveness::calculateLiveness(MachineFunction &MF) {
HasStackMap = true;
++NumStackMaps;
}
- DEBUG(dbgs() << " " << LiveRegs << " " << *I);
+ LLVM_DEBUG(dbgs() << " " << LiveRegs << " " << *I);
LiveRegs.stepBackward(*I);
}
++NumBBsVisited;
diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp
index 873190cfe7a2..19a191c01db9 100644
--- a/lib/CodeGen/StackMaps.cpp
+++ b/lib/CodeGen/StackMaps.cpp
@@ -420,13 +420,13 @@ void StackMaps::emitStackmapHeader(MCStreamer &OS) {
OS.EmitIntValue(0, 2); // Reserved.
// Num functions.
- DEBUG(dbgs() << WSMP << "#functions = " << FnInfos.size() << '\n');
+ LLVM_DEBUG(dbgs() << WSMP << "#functions = " << FnInfos.size() << '\n');
OS.EmitIntValue(FnInfos.size(), 4);
// Num constants.
- DEBUG(dbgs() << WSMP << "#constants = " << ConstPool.size() << '\n');
+ LLVM_DEBUG(dbgs() << WSMP << "#constants = " << ConstPool.size() << '\n');
OS.EmitIntValue(ConstPool.size(), 4);
// Num callsites.
- DEBUG(dbgs() << WSMP << "#callsites = " << CSInfos.size() << '\n');
+ LLVM_DEBUG(dbgs() << WSMP << "#callsites = " << CSInfos.size() << '\n');
OS.EmitIntValue(CSInfos.size(), 4);
}
@@ -439,11 +439,11 @@ void StackMaps::emitStackmapHeader(MCStreamer &OS) {
/// }
void StackMaps::emitFunctionFrameRecords(MCStreamer &OS) {
// Function Frame records.
- DEBUG(dbgs() << WSMP << "functions:\n");
+ LLVM_DEBUG(dbgs() << WSMP << "functions:\n");
for (auto const &FR : FnInfos) {
- DEBUG(dbgs() << WSMP << "function addr: " << FR.first
- << " frame size: " << FR.second.StackSize
- << " callsite count: " << FR.second.RecordCount << '\n');
+ LLVM_DEBUG(dbgs() << WSMP << "function addr: " << FR.first
+ << " frame size: " << FR.second.StackSize
+ << " callsite count: " << FR.second.RecordCount << '\n');
OS.EmitSymbolValue(FR.first, 8);
OS.EmitIntValue(FR.second.StackSize, 8);
OS.EmitIntValue(FR.second.RecordCount, 8);
@@ -455,9 +455,9 @@ void StackMaps::emitFunctionFrameRecords(MCStreamer &OS) {
/// int64 : Constants[NumConstants]
void StackMaps::emitConstantPoolEntries(MCStreamer &OS) {
// Constant pool entries.
- DEBUG(dbgs() << WSMP << "constants:\n");
+ LLVM_DEBUG(dbgs() << WSMP << "constants:\n");
for (const auto &ConstEntry : ConstPool) {
- DEBUG(dbgs() << WSMP << ConstEntry.second << '\n');
+ LLVM_DEBUG(dbgs() << WSMP << ConstEntry.second << '\n');
OS.EmitIntValue(ConstEntry.second, 8);
}
}
@@ -492,7 +492,7 @@ void StackMaps::emitConstantPoolEntries(MCStreamer &OS) {
/// 0x4, Constant, Offset (small constant)
/// 0x5, ConstIndex, Constants[Offset] (large constant)
void StackMaps::emitCallsiteEntries(MCStreamer &OS) {
- DEBUG(print(dbgs()));
+ LLVM_DEBUG(print(dbgs()));
// Callsite entries.
for (const auto &CSI : CSInfos) {
const LocationVec &CSLocs = CSI.Locations;
@@ -569,7 +569,7 @@ void StackMaps::serializeToStackMapSection() {
OS.EmitLabel(OutContext.getOrCreateSymbol(Twine("__LLVM_StackMaps")));
// Serialize data.
- DEBUG(dbgs() << "********** Stack Map Output **********\n");
+ LLVM_DEBUG(dbgs() << "********** Stack Map Output **********\n");
emitStackmapHeader(OS);
emitFunctionFrameRecords(OS);
emitConstantPoolEntries(OS);
diff --git a/lib/CodeGen/StackSlotColoring.cpp b/lib/CodeGen/StackSlotColoring.cpp
index cd13b3f79696..b3ffebca6133 100644
--- a/lib/CodeGen/StackSlotColoring.cpp
+++ b/lib/CodeGen/StackSlotColoring.cpp
@@ -213,10 +213,10 @@ void StackSlotColoring::InitializeSlots() {
[](Pair *LHS, Pair *RHS) { return LHS->first < RHS->first; });
// Gather all spill slots into a list.
- DEBUG(dbgs() << "Spill slot intervals:\n");
+ LLVM_DEBUG(dbgs() << "Spill slot intervals:\n");
for (auto *I : Intervals) {
LiveInterval &li = I->second;
- DEBUG(li.dump());
+ LLVM_DEBUG(li.dump());
int FI = TargetRegisterInfo::stackSlot2Index(li.reg);
if (MFI->isDeadObjectIndex(FI))
continue;
@@ -225,7 +225,7 @@ void StackSlotColoring::InitializeSlots() {
OrigSizes[FI] = MFI->getObjectSize(FI);
AllColors.set(FI);
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
// Sort them by weight.
std::stable_sort(SSIntervals.begin(), SSIntervals.end(), IntervalSorter());
@@ -267,7 +267,7 @@ int StackSlotColoring::ColorSlot(LiveInterval *li) {
}
if (Color != -1 && MFI->getStackID(Color) != MFI->getStackID(FI)) {
- DEBUG(dbgs() << "cannot share FIs with different stack IDs\n");
+ LLVM_DEBUG(dbgs() << "cannot share FIs with different stack IDs\n");
Share = false;
}
@@ -282,7 +282,7 @@ int StackSlotColoring::ColorSlot(LiveInterval *li) {
// Record the assignment.
Assignments[Color].push_back(li);
- DEBUG(dbgs() << "Assigning fi#" << FI << " to fi#" << Color << "\n");
+ LLVM_DEBUG(dbgs() << "Assigning fi#" << FI << " to fi#" << Color << "\n");
// Change size and alignment of the allocated slot. If there are multiple
// objects sharing the same slot, then make sure the size and alignment
@@ -305,7 +305,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
SmallVector<SmallVector<int, 4>, 16> RevMap(NumObjs);
BitVector UsedColors(NumObjs);
- DEBUG(dbgs() << "Color spill slot intervals:\n");
+ LLVM_DEBUG(dbgs() << "Color spill slot intervals:\n");
bool Changed = false;
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
@@ -319,7 +319,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
Changed |= (SS != NewSS);
}
- DEBUG(dbgs() << "\nSpill slots after coloring:\n");
+ LLVM_DEBUG(dbgs() << "\nSpill slots after coloring:\n");
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
LiveInterval *li = SSIntervals[i];
int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
@@ -330,8 +330,8 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
#ifndef NDEBUG
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i)
- DEBUG(SSIntervals[i]->dump());
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(SSIntervals[i]->dump());
+ LLVM_DEBUG(dbgs() << '\n');
#endif
if (!Changed)
@@ -358,7 +358,8 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
// Delete unused stack slots.
while (NextColor != -1) {
- DEBUG(dbgs() << "Removing unused stack object fi#" << NextColor << "\n");
+ LLVM_DEBUG(dbgs() << "Removing unused stack object fi#" << NextColor
+ << "\n");
MFI->RemoveStackObject(NextColor);
NextColor = AllColors.find_next(NextColor);
}
@@ -454,10 +455,10 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
}
bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
- dbgs() << "********** Stack Slot Coloring **********\n"
- << "********** Function: " << MF.getName() << '\n';
- });
+ LLVM_DEBUG({
+ dbgs() << "********** Stack Slot Coloring **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
if (skipFunction(MF.getFunction()))
return false;
diff --git a/lib/CodeGen/TailDuplicator.cpp b/lib/CodeGen/TailDuplicator.cpp
index b4ee8777d1fd..b118c176a897 100644
--- a/lib/CodeGen/TailDuplicator.cpp
+++ b/lib/CodeGen/TailDuplicator.cpp
@@ -262,7 +262,7 @@ bool TailDuplicator::tailDuplicateBlocks() {
bool MadeChange = false;
if (PreRegAlloc && TailDupVerify) {
- DEBUG(dbgs() << "\n*** Before tail-duplicating\n");
+ LLVM_DEBUG(dbgs() << "\n*** Before tail-duplicating\n");
VerifyPHIs(*MF, true);
}
@@ -718,8 +718,8 @@ bool TailDuplicator::duplicateSimpleBB(
continue;
Changed = true;
- DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
- << "From simple Succ: " << *TailBB);
+ LLVM_DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
+ << "From simple Succ: " << *TailBB);
MachineBasicBlock *NewTarget = *TailBB->succ_begin();
MachineBasicBlock *NextBB = PredBB->getNextNode();
@@ -799,8 +799,8 @@ bool TailDuplicator::tailDuplicate(bool IsSimple, MachineBasicBlock *TailBB,
MachineBasicBlock *ForcedLayoutPred,
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallVectorImpl<MachineInstr *> &Copies) {
- DEBUG(dbgs() << "\n*** Tail-duplicating " << printMBBReference(*TailBB)
- << '\n');
+ LLVM_DEBUG(dbgs() << "\n*** Tail-duplicating " << printMBBReference(*TailBB)
+ << '\n');
DenseSet<unsigned> UsedByPhi;
getRegsUsedByPHIs(*TailBB, &UsedByPhi);
@@ -830,8 +830,8 @@ bool TailDuplicator::tailDuplicate(bool IsSimple, MachineBasicBlock *TailBB,
if (IsLayoutSuccessor)
continue;
- DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
- << "From Succ: " << *TailBB);
+ LLVM_DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
+ << "From Succ: " << *TailBB);
TDBBs.push_back(PredBB);
@@ -893,8 +893,8 @@ bool TailDuplicator::tailDuplicate(bool IsSimple, MachineBasicBlock *TailBB,
(!PriorTBB || PriorTBB == TailBB) &&
TailBB->pred_size() == 1 &&
!TailBB->hasAddressTaken()) {
- DEBUG(dbgs() << "\nMerging into block: " << *PrevBB
- << "From MBB: " << *TailBB);
+ LLVM_DEBUG(dbgs() << "\nMerging into block: " << *PrevBB
+ << "From MBB: " << *TailBB);
// There may be a branch to the layout successor. This is unlikely but it
// happens. The correct thing to do is to remove the branch before
// duplicating the instructions in all cases.
@@ -999,7 +999,7 @@ void TailDuplicator::removeDeadBlock(
MachineBasicBlock *MBB,
function_ref<void(MachineBasicBlock *)> *RemovalCallback) {
assert(MBB->pred_empty() && "MBB must be dead!");
- DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
+ LLVM_DEBUG(dbgs() << "\nRemoving MBB: " << *MBB);
if (RemovalCallback)
(*RemovalCallback)(MBB);
diff --git a/lib/CodeGen/TargetRegisterInfo.cpp b/lib/CodeGen/TargetRegisterInfo.cpp
index 80622ea7bae6..661dc18f7a85 100644
--- a/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/lib/CodeGen/TargetRegisterInfo.cpp
@@ -443,7 +443,8 @@ bool TargetRegisterInfo::needsStackRealignment(
if (F.hasFnAttribute("stackrealign") || requiresRealignment) {
if (canRealignStack(MF))
return true;
- DEBUG(dbgs() << "Can't realign function's stack: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Can't realign function's stack: " << F.getName()
+ << "\n");
}
return false;
}
diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp
index da55c18854a7..0ca435016ead 100644
--- a/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -685,15 +685,15 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI,
unsigned RegCIdx,
unsigned Dist) {
unsigned RegC = MI->getOperand(RegCIdx).getReg();
- DEBUG(dbgs() << "2addr: COMMUTING : " << *MI);
+ LLVM_DEBUG(dbgs() << "2addr: COMMUTING : " << *MI);
MachineInstr *NewMI = TII->commuteInstruction(*MI, false, RegBIdx, RegCIdx);
if (NewMI == nullptr) {
- DEBUG(dbgs() << "2addr: COMMUTING FAILED!\n");
+ LLVM_DEBUG(dbgs() << "2addr: COMMUTING FAILED!\n");
return false;
}
- DEBUG(dbgs() << "2addr: COMMUTED TO: " << *NewMI);
+ LLVM_DEBUG(dbgs() << "2addr: COMMUTED TO: " << *NewMI);
assert(NewMI == MI &&
"TargetInstrInfo::commuteInstruction() should not return a new "
"instruction unless it was requested.");
@@ -740,8 +740,8 @@ TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi,
if (!NewMI)
return false;
- DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi);
- DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI);
+ LLVM_DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi);
+ LLVM_DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI);
bool Sunk = false;
if (LIS)
@@ -1014,7 +1014,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi,
LV->addVirtualRegisterKilled(Reg, *MI);
}
- DEBUG(dbgs() << "\trescheduled below kill: " << *KillMI);
+ LLVM_DEBUG(dbgs() << "\trescheduled below kill: " << *KillMI);
return true;
}
@@ -1181,7 +1181,7 @@ rescheduleKillAboveMI(MachineBasicBlock::iterator &mi,
LV->addVirtualRegisterKilled(Reg, *MI);
}
- DEBUG(dbgs() << "\trescheduled kill: " << *KillMI);
+ LLVM_DEBUG(dbgs() << "\trescheduled kill: " << *KillMI);
return true;
}
@@ -1352,7 +1352,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
const MCInstrDesc &UnfoldMCID = TII->get(NewOpc);
if (UnfoldMCID.getNumDefs() == 1) {
// Unfold the load.
- DEBUG(dbgs() << "2addr: UNFOLDING: " << MI);
+ LLVM_DEBUG(dbgs() << "2addr: UNFOLDING: " << MI);
const TargetRegisterClass *RC =
TRI->getAllocatableClass(
TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI, *MF));
@@ -1361,7 +1361,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
if (!TII->unfoldMemoryOperand(*MF, MI, Reg,
/*UnfoldLoad=*/true,
/*UnfoldStore=*/false, NewMIs)) {
- DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
+ LLVM_DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
return false;
}
assert(NewMIs.size() == 2 &&
@@ -1374,8 +1374,8 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
MBB->insert(mi, NewMIs[0]);
MBB->insert(mi, NewMIs[1]);
- DEBUG(dbgs() << "2addr: NEW LOAD: " << *NewMIs[0]
- << "2addr: NEW INST: " << *NewMIs[1]);
+ LLVM_DEBUG(dbgs() << "2addr: NEW LOAD: " << *NewMIs[0]
+ << "2addr: NEW INST: " << *NewMIs[1]);
// Transform the instruction, now that it no longer has a load.
unsigned NewDstIdx = NewMIs[1]->findRegisterDefOperandIdx(regA);
@@ -1440,7 +1440,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
// Transforming didn't eliminate the tie and didn't lead to an
// improvement. Clean up the unfolded instructions and keep the
// original.
- DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
+ LLVM_DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
NewMIs[0]->eraseFromParent();
NewMIs[1]->eraseFromParent();
}
@@ -1484,7 +1484,7 @@ collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
MRI->constrainRegClass(DstReg, RC);
SrcMO.setReg(DstReg);
SrcMO.setSubReg(0);
- DEBUG(dbgs() << "\t\trewrite undef:\t" << *MI);
+ LLVM_DEBUG(dbgs() << "\t\trewrite undef:\t" << *MI);
continue;
}
TiedOperands[SrcReg].push_back(std::make_pair(SrcIdx, DstIdx));
@@ -1583,7 +1583,7 @@ TwoAddressInstructionPass::processTiedPairs(MachineInstr *MI,
}
}
- DEBUG(dbgs() << "\t\tprepend:\t" << *MIB);
+ LLVM_DEBUG(dbgs() << "\t\tprepend:\t" << *MIB);
MachineOperand &MO = MI->getOperand(SrcIdx);
assert(MO.isReg() && MO.getReg() == RegB && MO.isUse() &&
@@ -1677,9 +1677,8 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
bool MadeChange = false;
- DEBUG(dbgs() << "********** REWRITING TWO-ADDR INSTRS **********\n");
- DEBUG(dbgs() << "********** Function: "
- << MF->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** REWRITING TWO-ADDR INSTRS **********\n");
+ LLVM_DEBUG(dbgs() << "********** Function: " << MF->getName() << '\n');
// This pass takes the function out of SSA form.
MRI->leaveSSA();
@@ -1722,7 +1721,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
++NumTwoAddressInstrs;
MadeChange = true;
- DEBUG(dbgs() << '\t' << *mi);
+ LLVM_DEBUG(dbgs() << '\t' << *mi);
// If the instruction has a single pair of tied operands, try some
// transformations that may either eliminate the tied operands or
@@ -1749,7 +1748,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
// Now iterate over the information collected above.
for (auto &TO : TiedOperands) {
processTiedPairs(&*mi, TO.second, Dist);
- DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
+ LLVM_DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
}
// Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
@@ -1763,7 +1762,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
mi->getOperand(0).setIsUndef(mi->getOperand(1).isUndef());
mi->RemoveOperand(1);
mi->setDesc(TII->get(TargetOpcode::COPY));
- DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
+ LLVM_DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
}
// Clear TiedOperands here instead of at the top of the loop
@@ -1796,7 +1795,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
if (MI.getOperand(0).getSubReg() ||
TargetRegisterInfo::isPhysicalRegister(DstReg) ||
!(MI.getNumOperands() & 1)) {
- DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << MI);
+ LLVM_DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << MI);
llvm_unreachable(nullptr);
}
@@ -1847,19 +1846,19 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
if (LV && isKill && !TargetRegisterInfo::isPhysicalRegister(SrcReg))
LV->replaceKillInstruction(SrcReg, MI, *CopyMI);
- DEBUG(dbgs() << "Inserted: " << *CopyMI);
+ LLVM_DEBUG(dbgs() << "Inserted: " << *CopyMI);
}
MachineBasicBlock::iterator EndMBBI =
std::next(MachineBasicBlock::iterator(MI));
if (!DefEmitted) {
- DEBUG(dbgs() << "Turned: " << MI << " into an IMPLICIT_DEF");
+ LLVM_DEBUG(dbgs() << "Turned: " << MI << " into an IMPLICIT_DEF");
MI.setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
for (int j = MI.getNumOperands() - 1, ee = 0; j > ee; --j)
MI.RemoveOperand(j);
} else {
- DEBUG(dbgs() << "Eliminated: " << MI);
+ LLVM_DEBUG(dbgs() << "Eliminated: " << MI);
MI.eraseFromParent();
}
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index ccec691fc072..3f9dcd4edcec 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -242,10 +242,9 @@ bool VirtRegRewriter::runOnMachineFunction(MachineFunction &fn) {
Indexes = &getAnalysis<SlotIndexes>();
LIS = &getAnalysis<LiveIntervals>();
VRM = &getAnalysis<VirtRegMap>();
- DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
- << "********** Function: "
- << MF->getName() << '\n');
- DEBUG(VRM->dump());
+ LLVM_DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
+ << "********** Function: " << MF->getName() << '\n');
+ LLVM_DEBUG(VRM->dump());
// Add kill flags while we still have virtual registers.
LIS->addKillFlags(VRM);
@@ -377,7 +376,7 @@ bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const {
void VirtRegRewriter::handleIdentityCopy(MachineInstr &MI) const {
if (!MI.isIdentityCopy())
return;
- DEBUG(dbgs() << "Identity copy: " << MI);
+ LLVM_DEBUG(dbgs() << "Identity copy: " << MI);
++NumIdCopies;
// Copies like:
@@ -388,14 +387,14 @@ void VirtRegRewriter::handleIdentityCopy(MachineInstr &MI) const {
// instruction to maintain this information.
if (MI.getOperand(0).isUndef() || MI.getNumOperands() > 2) {
MI.setDesc(TII->get(TargetOpcode::KILL));
- DEBUG(dbgs() << " replace by: " << MI);
+ LLVM_DEBUG(dbgs() << " replace by: " << MI);
return;
}
if (Indexes)
Indexes->removeSingleMachineInstrFromMaps(MI);
MI.eraseFromBundle();
- DEBUG(dbgs() << " deleted.\n");
+ LLVM_DEBUG(dbgs() << " deleted.\n");
}
/// The liverange splitting logic sometimes produces bundles of copies when
@@ -462,7 +461,7 @@ void VirtRegRewriter::rewrite() {
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
- DEBUG(MBBI->print(dbgs(), Indexes));
+ LLVM_DEBUG(MBBI->print(dbgs(), Indexes));
for (MachineBasicBlock::instr_iterator
MII = MBBI->instr_begin(), MIE = MBBI->instr_end(); MII != MIE;) {
MachineInstr *MI = &*MII;
@@ -545,7 +544,7 @@ void VirtRegRewriter::rewrite() {
while (!SuperDefs.empty())
MI->addRegisterDefined(SuperDefs.pop_back_val(), TRI);
- DEBUG(dbgs() << "> " << *MI);
+ LLVM_DEBUG(dbgs() << "> " << *MI);
expandCopyBundle(*MI);
diff --git a/lib/CodeGen/WinEHPrepare.cpp b/lib/CodeGen/WinEHPrepare.cpp
index 2592c2d98eda..199e7228a8a4 100644
--- a/lib/CodeGen/WinEHPrepare.cpp
+++ b/lib/CodeGen/WinEHPrepare.cpp
@@ -271,10 +271,11 @@ static void calculateCXXStateNumbers(WinEHFuncInfo &FuncInfo,
}
int CatchHigh = FuncInfo.getLastStateNumber();
addTryBlockMapEntry(FuncInfo, TryLow, TryHigh, CatchHigh, Handlers);
- DEBUG(dbgs() << "TryLow[" << BB->getName() << "]: " << TryLow << '\n');
- DEBUG(dbgs() << "TryHigh[" << BB->getName() << "]: " << TryHigh << '\n');
- DEBUG(dbgs() << "CatchHigh[" << BB->getName() << "]: " << CatchHigh
- << '\n');
+ LLVM_DEBUG(dbgs() << "TryLow[" << BB->getName() << "]: " << TryLow << '\n');
+ LLVM_DEBUG(dbgs() << "TryHigh[" << BB->getName() << "]: " << TryHigh
+ << '\n');
+ LLVM_DEBUG(dbgs() << "CatchHigh[" << BB->getName() << "]: " << CatchHigh
+ << '\n');
} else {
auto *CleanupPad = cast<CleanupPadInst>(FirstNonPHI);
@@ -285,8 +286,8 @@ static void calculateCXXStateNumbers(WinEHFuncInfo &FuncInfo,
int CleanupState = addUnwindMapEntry(FuncInfo, ParentState, BB);
FuncInfo.EHPadStateMap[CleanupPad] = CleanupState;
- DEBUG(dbgs() << "Assigning state #" << CleanupState << " to BB "
- << BB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Assigning state #" << CleanupState << " to BB "
+ << BB->getName() << '\n');
for (const BasicBlock *PredBlock : predecessors(BB)) {
if ((PredBlock = getEHPadFromPredecessor(PredBlock,
CleanupPad->getParentPad()))) {
@@ -351,8 +352,8 @@ static void calculateSEHStateNumbers(WinEHFuncInfo &FuncInfo,
// Everything in the __try block uses TryState as its parent state.
FuncInfo.EHPadStateMap[CatchSwitch] = TryState;
- DEBUG(dbgs() << "Assigning state #" << TryState << " to BB "
- << CatchPadBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Assigning state #" << TryState << " to BB "
+ << CatchPadBB->getName() << '\n');
for (const BasicBlock *PredBlock : predecessors(BB))
if ((PredBlock = getEHPadFromPredecessor(PredBlock,
CatchSwitch->getParentPad())))
@@ -387,8 +388,8 @@ static void calculateSEHStateNumbers(WinEHFuncInfo &FuncInfo,
int CleanupState = addSEHFinally(FuncInfo, ParentState, BB);
FuncInfo.EHPadStateMap[CleanupPad] = CleanupState;
- DEBUG(dbgs() << "Assigning state #" << CleanupState << " to BB "
- << BB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Assigning state #" << CleanupState << " to BB "
+ << BB->getName() << '\n');
for (const BasicBlock *PredBlock : predecessors(BB))
if ((PredBlock =
getEHPadFromPredecessor(PredBlock, CleanupPad->getParentPad())))
@@ -1034,17 +1035,17 @@ bool WinEHPrepare::prepareExplicitEH(Function &F) {
demotePHIsOnFunclets(F);
if (!DisableCleanups) {
- DEBUG(verifyFunction(F));
+ LLVM_DEBUG(verifyFunction(F));
removeImplausibleInstructions(F);
- DEBUG(verifyFunction(F));
+ LLVM_DEBUG(verifyFunction(F));
cleanupPreparedFunclets(F);
}
- DEBUG(verifyPreparedFunclets(F));
+ LLVM_DEBUG(verifyPreparedFunclets(F));
// Recolor the CFG to verify that all is well.
- DEBUG(colorFunclets(F));
- DEBUG(verifyPreparedFunclets(F));
+ LLVM_DEBUG(colorFunclets(F));
+ LLVM_DEBUG(verifyPreparedFunclets(F));
BlockColors.clear();
FuncletBlocks.clear();
diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp
index 2cc6c460288a..dae0ea894204 100644
--- a/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -214,7 +214,7 @@ void ExecutionEngine::addGlobalMapping(StringRef Name, uint64_t Addr) {
assert(!Name.empty() && "Empty GlobalMapping symbol name!");
- DEBUG(dbgs() << "JIT: Map \'" << Name << "\' to [" << Addr << "]\n";);
+ LLVM_DEBUG(dbgs() << "JIT: Map \'" << Name << "\' to [" << Addr << "]\n";);
uint64_t &CurVal = EEState.getGlobalAddressMap()[Name];
assert((!CurVal || !Addr) && "GlobalMapping already established!");
CurVal = Addr;
@@ -343,13 +343,14 @@ void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
unsigned PtrSize = EE->getDataLayout().getPointerSize();
Array = make_unique<char[]>((InputArgv.size()+1)*PtrSize);
- DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array.get() << "\n");
+ LLVM_DEBUG(dbgs() << "JIT: ARGV = " << (void *)Array.get() << "\n");
Type *SBytePtr = Type::getInt8PtrTy(C);
for (unsigned i = 0; i != InputArgv.size(); ++i) {
unsigned Size = InputArgv[i].size()+1;
auto Dest = make_unique<char[]>(Size);
- DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void*)Dest.get() << "\n");
+ LLVM_DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void *)Dest.get()
+ << "\n");
std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest.get());
Dest[Size-1] = 0;
@@ -1180,8 +1181,8 @@ void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
}
void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
- DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
- DEBUG(Init->dump());
+ LLVM_DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
+ LLVM_DEBUG(Init->dump());
if (isa<UndefValue>(Init))
return;
@@ -1228,7 +1229,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
return;
}
- DEBUG(dbgs() << "Bad Type: " << *Init->getType() << "\n");
+ LLVM_DEBUG(dbgs() << "Bad Type: " << *Init->getType() << "\n");
llvm_unreachable("Unknown constant type to initialize memory with!");
}
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index 358366c765fc..9e77d160c30b 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -976,9 +976,9 @@ void Interpreter::visitAllocaInst(AllocaInst &I) {
// Allocate enough memory to hold the type...
void *Memory = safe_malloc(MemToAlloc);
- DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
- << NumElements << " (Total: " << MemToAlloc << ") at "
- << uintptr_t(Memory) << '\n');
+ LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
+ << " bytes) x " << NumElements << " (Total: " << MemToAlloc
+ << ") at " << uintptr_t(Memory) << '\n');
GenericValue Result = PTOGV(Memory);
assert(Result.PointerVal && "Null pointer returned by malloc!");
@@ -1025,7 +1025,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
GenericValue Result;
Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
- DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
+ LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
return Result;
}
@@ -2118,7 +2118,7 @@ void Interpreter::run() {
// Track the number of dynamic instructions executed.
++NumDynamicInsts;
- DEBUG(dbgs() << "About to interpret: " << I);
+ LLVM_DEBUG(dbgs() << "About to interpret: " << I);
visit(I); // Dispatch to one of the visit* methods...
}
}
diff --git a/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp b/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
index 3581d6458395..29d9d080fc55 100644
--- a/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
+++ b/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
@@ -57,9 +57,10 @@ public:
void OProfileJITEventListener::initialize() {
if (!Wrapper->op_open_agent()) {
const std::string err_str = sys::StrError();
- DEBUG(dbgs() << "Failed to connect to OProfile agent: " << err_str << "\n");
+ LLVM_DEBUG(dbgs() << "Failed to connect to OProfile agent: " << err_str
+ << "\n");
} else {
- DEBUG(dbgs() << "Connected to OProfile agent.\n");
+ LLVM_DEBUG(dbgs() << "Connected to OProfile agent.\n");
}
}
@@ -67,10 +68,10 @@ OProfileJITEventListener::~OProfileJITEventListener() {
if (Wrapper->isAgentAvailable()) {
if (Wrapper->op_close_agent() == -1) {
const std::string err_str = sys::StrError();
- DEBUG(dbgs() << "Failed to disconnect from OProfile agent: "
- << err_str << "\n");
+ LLVM_DEBUG(dbgs() << "Failed to disconnect from OProfile agent: "
+ << err_str << "\n");
} else {
- DEBUG(dbgs() << "Disconnected from OProfile agent.\n");
+ LLVM_DEBUG(dbgs() << "Disconnected from OProfile agent.\n");
}
}
}
@@ -103,9 +104,9 @@ void OProfileJITEventListener::NotifyObjectEmitted(
if (Wrapper->op_write_native_code(Name.data(), Addr, (void *)Addr, Size) ==
-1) {
- DEBUG(dbgs() << "Failed to tell OProfile about native function " << Name
- << " at [" << (void *)Addr << "-" << ((char *)Addr + Size)
- << "]\n");
+ LLVM_DEBUG(dbgs() << "Failed to tell OProfile about native function "
+ << Name << " at [" << (void *)Addr << "-"
+ << ((char *)Addr + Size) << "]\n");
continue;
}
// TODO: support line number info (similar to IntelJITEventListener.cpp)
@@ -135,9 +136,10 @@ void OProfileJITEventListener::NotifyFreeingObject(const ObjectFile &Obj) {
uint64_t Addr = *AddrOrErr;
if (Wrapper->op_unload_native_code(Addr) == -1) {
- DEBUG(dbgs()
- << "Failed to tell OProfile about unload of native function at "
- << (void*)Addr << "\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Failed to tell OProfile about unload of native function at "
+ << (void *)Addr << "\n");
continue;
}
}
diff --git a/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp b/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
index d96278a8137b..b473ac3faf4c 100644
--- a/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
+++ b/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
@@ -64,15 +64,16 @@ bool OProfileWrapper::initialize() {
// If the oprofile daemon is not running, don't load the opagent library
if (!isOProfileRunning()) {
- DEBUG(dbgs() << "OProfile daemon is not detected.\n");
+ LLVM_DEBUG(dbgs() << "OProfile daemon is not detected.\n");
return false;
}
std::string error;
if(!DynamicLibrary::LoadLibraryPermanently("libopagent.so", &error)) {
- DEBUG(dbgs()
- << "OProfile connector library libopagent.so could not be loaded: "
- << error << "\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "OProfile connector library libopagent.so could not be loaded: "
+ << error << "\n");
}
// Get the addresses of the opagent functions
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 3d274b63a4fa..d3ac4f145c83 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -122,10 +122,8 @@ void RuntimeDyldImpl::resolveRelocations() {
MutexGuard locked(lock);
// Print out the sections prior to relocation.
- DEBUG(
- for (int i = 0, e = Sections.size(); i != e; ++i)
- dumpSectionMemory(Sections[i], "before relocations");
- );
+ LLVM_DEBUG(for (int i = 0, e = Sections.size(); i != e; ++i)
+ dumpSectionMemory(Sections[i], "before relocations"););
// First, resolve relocations associated with external symbols.
if (auto Err = resolveExternalSymbols()) {
@@ -140,18 +138,15 @@ void RuntimeDyldImpl::resolveRelocations() {
// entry provides the section to which the relocation will be applied.
int Idx = it->first;
uint64_t Addr = Sections[Idx].getLoadAddress();
- DEBUG(dbgs() << "Resolving relocations Section #" << Idx << "\t"
- << format("%p", (uintptr_t)Addr) << "\n");
+ LLVM_DEBUG(dbgs() << "Resolving relocations Section #" << Idx << "\t"
+ << format("%p", (uintptr_t)Addr) << "\n");
resolveRelocationList(it->second, Addr);
}
Relocations.clear();
// Print out sections after relocation.
- DEBUG(
- for (int i = 0, e = Sections.size(); i != e; ++i)
- dumpSectionMemory(Sections[i], "after relocations");
- );
-
+ LLVM_DEBUG(for (int i = 0, e = Sections.size(); i != e; ++i)
+ dumpSectionMemory(Sections[i], "after relocations"););
}
void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
@@ -230,7 +225,7 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
}
// Parse symbols
- DEBUG(dbgs() << "Parse symbols:\n");
+ LLVM_DEBUG(dbgs() << "Parse symbols:\n");
for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
++I) {
uint32_t Flags = I->getFlags();
@@ -297,10 +292,10 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
unsigned SectionID = AbsoluteSymbolSection;
- DEBUG(dbgs() << "\tType: " << SymType << " (absolute) Name: " << Name
- << " SID: " << SectionID
- << " Offset: " << format("%p", (uintptr_t)Addr)
- << " flags: " << Flags << "\n");
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " (absolute) Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)Addr)
+ << " flags: " << Flags << "\n");
GlobalSymbolTable[Name] = SymbolTableEntry(SectionID, Addr, JITSymFlags);
} else if (SymType == object::SymbolRef::ST_Function ||
SymType == object::SymbolRef::ST_Data ||
@@ -329,10 +324,10 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
else
return SectionIDOrErr.takeError();
- DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name
- << " SID: " << SectionID
- << " Offset: " << format("%p", (uintptr_t)SectOffset)
- << " flags: " << Flags << "\n");
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)SectOffset)
+ << " flags: " << Flags << "\n");
GlobalSymbolTable[Name] =
SymbolTableEntry(SectionID, SectOffset, JITSymFlags);
}
@@ -344,7 +339,7 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
return std::move(Err);
// Parse and process relocations
- DEBUG(dbgs() << "Parse relocations:\n");
+ LLVM_DEBUG(dbgs() << "Parse relocations:\n");
for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
SI != SE; ++SI) {
StubMap Stubs;
@@ -367,7 +362,7 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
else
return SectionIDOrErr.takeError();
- DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
for (; I != E;)
if (auto IOrErr = processRelocationRef(SectionID, I, Obj, LocalSections, Stubs))
@@ -669,8 +664,9 @@ Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
SectionEntry("<common symbols>", Addr, CommonSize, CommonSize, 0));
memset(Addr, 0, CommonSize);
- DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID << " new addr: "
- << format("%p", Addr) << " DataSize: " << CommonSize << "\n");
+ LLVM_DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
+ << " new addr: " << format("%p", Addr)
+ << " DataSize: " << CommonSize << "\n");
// Assign the address of each symbol
for (auto &Sym : SymbolsToAllocate) {
@@ -688,8 +684,8 @@ Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
Offset += AlignOffset;
}
JITSymbolFlags JITSymFlags = getJITSymbolFlags(Sym);
- DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
- << format("%p", Addr) << "\n");
+ LLVM_DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
+ << format("%p", Addr) << "\n");
GlobalSymbolTable[Name] =
SymbolTableEntry(SectionID, Offset, JITSymFlags);
Offset += Size;
@@ -785,21 +781,22 @@ RuntimeDyldImpl::emitSection(const ObjectFile &Obj,
DataSize &= ~(getStubAlignment() - 1);
}
- DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
- << " obj addr: " << format("%p", pData)
- << " new addr: " << format("%p", Addr)
- << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
- << " Allocate: " << Allocate << "\n");
+ LLVM_DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: "
+ << Name << " obj addr: " << format("%p", pData)
+ << " new addr: " << format("%p", Addr) << " DataSize: "
+ << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
} else {
// Even if we didn't load the section, we need to record an entry for it
// to handle later processing (and by 'handle' I mean don't do anything
// with these sections).
Allocate = 0;
Addr = nullptr;
- DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
- << " obj addr: " << format("%p", data.data()) << " new addr: 0"
- << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
- << " Allocate: " << Allocate << "\n");
+ LLVM_DEBUG(
+ dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
+ << " obj addr: " << format("%p", data.data()) << " new addr: 0"
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
}
Sections.push_back(
@@ -976,10 +973,11 @@ void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
// Addr is a uint64_t because we can't assume the pointer width
// of the target is the same as that of the host. Just use a generic
// "big enough" type.
- DEBUG(dbgs() << "Reassigning address for section " << SectionID << " ("
- << Sections[SectionID].getName() << "): "
- << format("0x%016" PRIx64, Sections[SectionID].getLoadAddress())
- << " -> " << format("0x%016" PRIx64, Addr) << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Reassigning address for section " << SectionID << " ("
+ << Sections[SectionID].getName() << "): "
+ << format("0x%016" PRIx64, Sections[SectionID].getLoadAddress())
+ << " -> " << format("0x%016" PRIx64, Addr) << "\n");
Sections[SectionID].setLoadAddress(Addr);
}
@@ -1034,8 +1032,8 @@ Error RuntimeDyldImpl::resolveExternalSymbols() {
StringRef Name = i->first();
if (Name.size() == 0) {
// This is an absolute symbol, use an address of zero.
- DEBUG(dbgs() << "Resolving absolute relocations."
- << "\n");
+ LLVM_DEBUG(dbgs() << "Resolving absolute relocations."
+ << "\n");
RelocationList &Relocs = i->second;
resolveRelocationList(Relocs, 0);
} else {
@@ -1077,8 +1075,8 @@ Error RuntimeDyldImpl::resolveExternalSymbols() {
// if the target symbol is Thumb.
Addr = modifyAddressBasedOnFlags(Addr, Flags);
- DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
- << format("0x%lx", Addr) << "\n");
+ LLVM_DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
+ << format("0x%lx", Addr) << "\n");
// This list may have been updated when we called getSymbolAddress, so
// don't change this code to get the list earlier.
RelocationList &Relocs = i->second;
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
index 3d944bf7b605..fa8906869b3a 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -688,12 +688,13 @@ RuntimeDyldCheckerImpl::RuntimeDyldCheckerImpl(RuntimeDyld &RTDyld,
bool RuntimeDyldCheckerImpl::check(StringRef CheckExpr) const {
CheckExpr = CheckExpr.trim();
- DEBUG(dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr << "'...\n");
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr
+ << "'...\n");
RuntimeDyldCheckerExprEval P(*this, ErrStream);
bool Result = P.evaluate(CheckExpr);
(void)Result;
- DEBUG(dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
- << (Result ? "passed" : "FAILED") << ".\n");
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
+ << (Result ? "passed" : "FAILED") << ".\n");
return Result;
}
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index b6992f659460..322883f4feae 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -273,8 +273,8 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
case ELF::R_X86_64_64: {
support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
Value + Addend;
- DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
- << format("%p\n", Section.getAddressWithOffset(Offset)));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
break;
}
case ELF::R_X86_64_32:
@@ -286,8 +286,8 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
TruncatedAddr;
- DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
- << format("%p\n", Section.getAddressWithOffset(Offset)));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
break;
}
case ELF::R_X86_64_PC8: {
@@ -354,12 +354,12 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
// Data should use target endian. Code should always use little endian.
bool isBE = Arch == Triple::aarch64_be;
- DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
- << format("%llx", Section.getAddressWithOffset(Offset))
- << " FinalAddress: 0x" << format("%llx", FinalAddress)
- << " Value: 0x" << format("%llx", Value) << " Type: 0x"
- << format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
- << "\n");
+ LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend) << "\n");
switch (Type) {
default:
@@ -474,11 +474,12 @@ void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
Value += Addend;
- DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
- << Section.getAddressWithOffset(Offset)
- << " FinalAddress: " << format("%p", FinalAddress) << " Value: "
- << format("%x", Value) << " Type: " << format("%x", Type)
- << " Addend: " << format("%x", Addend) << "\n");
+ LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%x", Value)
+ << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
switch (Type) {
default:
@@ -859,16 +860,16 @@ void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
break;
case ELF::R_BPF_64_64: {
write(isBE, Section.getAddressWithOffset(Offset), Value + Addend);
- DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
- << format("%p\n", Section.getAddressWithOffset(Offset)));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
break;
}
case ELF::R_BPF_64_32: {
Value += Addend;
assert(Value <= UINT32_MAX);
write(isBE, Section.getAddressWithOffset(Offset), static_cast<uint32_t>(Value));
- DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
- << format("%p\n", Section.getAddressWithOffset(Offset)));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
break;
}
}
@@ -1025,7 +1026,7 @@ void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
relocation_iterator RelI,
StubMap &Stubs) {
- DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
+ LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
SectionEntry &Section = Sections[SectionID];
uint64_t Offset = RelI->getOffset();
@@ -1036,10 +1037,10 @@ void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
resolveRelocation(Section, Offset,
(uint64_t)Section.getAddressWithOffset(i->second),
RelType, 0);
- DEBUG(dbgs() << " Stub function found\n");
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
} else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
// Create a new stub function.
- DEBUG(dbgs() << " Create a new stub function\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.getStubOffset();
uint8_t *StubTargetAddr = createStubFunction(
Section.getAddressWithOffset(Section.getStubOffset()));
@@ -1096,8 +1097,8 @@ RuntimeDyldELF::processRelocationRef(
else
return TargetNameOrErr.takeError();
}
- DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
- << " TargetName: " << TargetName << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
+ << " TargetName: " << TargetName << "\n");
RelocationValueRef Value;
// First search for the symbol in the local symbol table
SymbolRef::Type SymType = SymbolRef::ST_Unknown;
@@ -1138,7 +1139,7 @@ RuntimeDyldELF::processRelocationRef(
section_iterator si = *SectionOrErr;
if (si == Obj.section_end())
llvm_unreachable("Symbol section not found, bad object file format!");
- DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
bool isCode = si->isText();
if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
ObjSectionToID))
@@ -1170,8 +1171,8 @@ RuntimeDyldELF::processRelocationRef(
uint64_t Offset = RelI->getOffset();
- DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
- << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
+ << "\n");
if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be)) {
if (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26) {
resolveAArch64Branch(SectionID, Value, RelI, Stubs);
@@ -1193,7 +1194,7 @@ RuntimeDyldELF::processRelocationRef(
if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
RelType == ELF::R_ARM_JUMP24) {
// This is an ARM branch relocation, need to use a stub function.
- DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
+ LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
SectionEntry &Section = Sections[SectionID];
// Look for an existing stub.
@@ -1203,10 +1204,10 @@ RuntimeDyldELF::processRelocationRef(
Section, Offset,
reinterpret_cast<uint64_t>(Section.getAddressWithOffset(i->second)),
RelType, 0);
- DEBUG(dbgs() << " Stub function found\n");
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
- DEBUG(dbgs() << " Create a new stub function\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.getStubOffset();
uint8_t *StubTargetAddr = createStubFunction(
Section.getAddressWithOffset(Section.getStubOffset()));
@@ -1241,7 +1242,7 @@ RuntimeDyldELF::processRelocationRef(
uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
if (RelType == ELF::R_MIPS_26) {
// This is an Mips branch relocation, need to use a stub function.
- DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
SectionEntry &Section = Sections[SectionID];
// Extract the addend from the instruction.
@@ -1256,10 +1257,10 @@ RuntimeDyldELF::processRelocationRef(
if (i != Stubs.end()) {
RelocationEntry RE(SectionID, Offset, RelType, i->second);
addRelocationForSection(RE, SectionID);
- DEBUG(dbgs() << " Stub function found\n");
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
- DEBUG(dbgs() << " Create a new stub function\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.getStubOffset();
unsigned AbiVariant = Obj.getPlatformFlags();
@@ -1343,7 +1344,7 @@ RuntimeDyldELF::processRelocationRef(
addRelocationForSection(RE, Value.SectionID);
} else if (RelType == ELF::R_MIPS_26) {
// This is an Mips branch relocation, need to use a stub function.
- DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
SectionEntry &Section = Sections[SectionID];
// Look up for existing stub.
@@ -1351,10 +1352,10 @@ RuntimeDyldELF::processRelocationRef(
if (i != Stubs.end()) {
RelocationEntry RE(SectionID, Offset, RelType, i->second);
addRelocationForSection(RE, SectionID);
- DEBUG(dbgs() << " Stub function found\n");
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
- DEBUG(dbgs() << " Create a new stub function\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.getStubOffset();
unsigned AbiVariant = Obj.getPlatformFlags();
@@ -1462,10 +1463,10 @@ RuntimeDyldELF::processRelocationRef(
reinterpret_cast<uint64_t>(
Section.getAddressWithOffset(i->second)),
RelType, 0);
- DEBUG(dbgs() << " Stub function found\n");
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
- DEBUG(dbgs() << " Create a new stub function\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.getStubOffset();
uint8_t *StubTargetAddr = createStubFunction(
Section.getAddressWithOffset(Section.getStubOffset()),
@@ -1582,7 +1583,7 @@ RuntimeDyldELF::processRelocationRef(
// parts of the stub separately. However, as things stand, we allocate
// a stub for every relocation, so using a GOT in JIT code should be
// no less space efficient than using an explicit constant pool.
- DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
+ LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
SectionEntry &Section = Sections[SectionID];
// Look for an existing stub.
@@ -1590,10 +1591,10 @@ RuntimeDyldELF::processRelocationRef(
uintptr_t StubAddress;
if (i != Stubs.end()) {
StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
- DEBUG(dbgs() << " Stub function found\n");
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
- DEBUG(dbgs() << " Create a new stub function\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
uintptr_t BaseAddress = uintptr_t(Section.getAddress());
uintptr_t StubAlignment = getStubAlignment();
@@ -1644,10 +1645,10 @@ RuntimeDyldELF::processRelocationRef(
uintptr_t StubAddress;
if (i != Stubs.end()) {
StubAddress = uintptr_t(Section.getAddress()) + i->second;
- DEBUG(dbgs() << " Stub function found\n");
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function (equivalent to a PLT entry).
- DEBUG(dbgs() << " Create a new stub function\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
uintptr_t BaseAddress = uintptr_t(Section.getAddress());
uintptr_t StubAlignment = getStubAlignment();
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index b0561f68edb3..c5a215c83331 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -196,10 +196,10 @@ Error RuntimeDyldMachO::populateIndirectSymbolPointersSection(
assert((PTSectionSize % PTEntrySize) == 0 &&
"Pointers section does not contain a whole number of stubs?");
- DEBUG(dbgs() << "Populating pointer table section "
- << Sections[PTSectionID].getName() << ", Section ID "
- << PTSectionID << ", " << NumPTEntries << " entries, "
- << PTEntrySize << " bytes each:\n");
+ LLVM_DEBUG(dbgs() << "Populating pointer table section "
+ << Sections[PTSectionID].getName() << ", Section ID "
+ << PTSectionID << ", " << NumPTEntries << " entries, "
+ << PTEntrySize << " bytes each:\n");
for (unsigned i = 0; i < NumPTEntries; ++i) {
unsigned SymbolIndex =
@@ -210,8 +210,8 @@ Error RuntimeDyldMachO::populateIndirectSymbolPointersSection(
IndirectSymbolName = *IndirectSymbolNameOrErr;
else
return IndirectSymbolNameOrErr.takeError();
- DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
- << ", PT offset: " << PTEntryOffset << "\n");
+ LLVM_DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
RelocationEntry RE(PTSectionID, PTEntryOffset,
MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
addRelocationForSymbol(RE, IndirectSymbolName);
@@ -275,8 +275,8 @@ unsigned char *RuntimeDyldMachOCRTPBase<Impl>::processFDE(uint8_t *P,
int64_t DeltaForEH) {
typedef typename Impl::TargetPtrT TargetPtrT;
- DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
- << ", Delta for EH: " << DeltaForEH << "\n");
+ LLVM_DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
+ << ", Delta for EH: " << DeltaForEH << "\n");
uint32_t Length = readBytesUnaligned(P, 4);
P += 4;
uint8_t *Ret = P + Length;
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
index 04678f224466..dd65051edad7 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
@@ -80,9 +80,9 @@ public:
SmallString<32> RelTypeName;
RelI->getTypeName(RelTypeName);
#endif
- DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
- << " RelType: " << RelTypeName << " TargetName: " << TargetName
- << " Addend " << Addend << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
unsigned TargetSectionID = -1;
if (Section == Obj.section_end()) {
@@ -145,10 +145,11 @@ public:
: Sections[RE.Sections.SectionA].getLoadAddressWithOffset(
RE.Addend);
assert(Result <= UINT32_MAX && "relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_I386_DIR32"
- << " TargetSection: " << RE.Sections.SectionA
- << " Value: " << format("0x%08" PRIx32, Result) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
writeBytesUnaligned(Result, Target, 4);
break;
}
@@ -159,10 +160,11 @@ public:
Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend) -
Sections[0].getLoadAddress();
assert(Result <= UINT32_MAX && "relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_I386_DIR32NB"
- << " TargetSection: " << RE.Sections.SectionA
- << " Value: " << format("0x%08" PRIx32, Result) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
writeBytesUnaligned(Result, Target, 4);
break;
}
@@ -176,10 +178,11 @@ public:
"relocation overflow");
assert(static_cast<int64_t>(Result) >= INT32_MIN &&
"relocation underflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_I386_REL32"
- << " TargetSection: " << RE.Sections.SectionA
- << " Value: " << format("0x%08" PRIx32, Result) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_REL32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
writeBytesUnaligned(Result, Target, 4);
break;
}
@@ -187,18 +190,18 @@ public:
// 16-bit section index of the section that contains the target.
assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
"relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_I386_SECTION Value: " << RE.SectionID
- << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECTION Value: "
+ << RE.SectionID << '\n');
writeBytesUnaligned(RE.SectionID, Target, 2);
break;
case COFF::IMAGE_REL_I386_SECREL:
// 32-bit offset of the target from the beginning of its section.
assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
"relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_I386_SECREL Value: " << RE.Addend
- << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECREL Value: "
+ << RE.Addend << '\n');
writeBytesUnaligned(RE.Addend, Target, 4);
break;
default:
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
index 9000435764df..729ea1ec48a4 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
@@ -97,9 +97,9 @@ public:
SmallString<32> RelTypeName;
RelI->getTypeName(RelTypeName);
#endif
- DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
- << " RelType: " << RelTypeName << " TargetName: " << TargetName
- << " Addend " << Addend << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
unsigned TargetSectionID = -1;
if (Section == Obj.section_end()) {
@@ -187,10 +187,11 @@ public:
: Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
Result |= ISASelectionBit;
assert(Result <= UINT32_MAX && "relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_ADDR32"
- << " TargetSection: " << RE.Sections.SectionA
- << " Value: " << format("0x%08" PRIx32, Result) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
writeBytesUnaligned(Result, Target, 4);
break;
}
@@ -200,10 +201,11 @@ public:
uint64_t Result = Sections[RE.Sections.SectionA].getLoadAddress() -
Sections[0].getLoadAddress() + RE.Addend;
assert(Result <= UINT32_MAX && "relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_ADDR32NB"
- << " TargetSection: " << RE.Sections.SectionA
- << " Value: " << format("0x%08" PRIx32, Result) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
Result |= ISASelectionBit;
writeBytesUnaligned(Result, Target, 4);
break;
@@ -212,18 +214,18 @@ public:
// 16-bit section index of the section that contains the target.
assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
"relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_SECTION Value: " << RE.SectionID
- << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECTION Value: "
+ << RE.SectionID << '\n');
writeBytesUnaligned(RE.SectionID, Target, 2);
break;
case COFF::IMAGE_REL_ARM_SECREL:
// 32-bit offset of the target from the beginning of its section.
assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
"relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_SECREL Value: " << RE.Addend
- << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECREL Value: " << RE.Addend
+ << '\n');
writeBytesUnaligned(RE.Addend, Target, 2);
break;
case COFF::IMAGE_REL_ARM_MOV32T: {
@@ -231,10 +233,11 @@ public:
uint64_t Result =
Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
assert(Result <= UINT32_MAX && "relocation overflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_MOV32T"
- << " TargetSection: " << RE.Sections.SectionA
- << " Value: " << format("0x%08" PRIx32, Result) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_MOV32T"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
// MOVW(T3): |11110|i|10|0|1|0|0|imm4|0|imm3|Rd|imm8|
// imm32 = zext imm4:i:imm3:imm8
@@ -262,9 +265,9 @@ public:
"relocation overflow");
assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
"relocation underflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_BRANCH20T"
- << " Value: " << static_cast<int32_t>(Value) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH20T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
static_cast<void>(Value);
llvm_unreachable("unimplemented relocation");
break;
@@ -277,9 +280,9 @@ public:
"relocation overflow");
assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
"relocation underflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_BRANCH24T"
- << " Value: " << static_cast<int32_t>(Value) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH24T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
static_cast<void>(Value);
llvm_unreachable("unimplemented relocation");
break;
@@ -292,9 +295,9 @@ public:
"relocation overflow");
assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
"relocation underflow");
- DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
- << " RelType: IMAGE_REL_ARM_BLX23T"
- << " Value: " << static_cast<int32_t>(Value) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BLX23T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
static_cast<void>(Value);
llvm_unreachable("unimplemented relocation");
break;
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
index dc2adc6cb2a0..2d6e5c4aea67 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
@@ -143,15 +143,16 @@ public:
auto Stub = Stubs.find(OriginalRelValueRef);
if (Stub == Stubs.end()) {
- DEBUG(dbgs() << " Create a new stub function for " << TargetName.data()
- << "\n");
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
StubOffset = Section.getStubOffset();
Stubs[OriginalRelValueRef] = StubOffset;
createStubFunction(Section.getAddressWithOffset(StubOffset));
Section.advanceStubOffset(getMaxStubSize());
} else {
- DEBUG(dbgs() << " Stub function found for " << TargetName.data() << "\n");
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
StubOffset = Stub->second;
}
@@ -232,9 +233,9 @@ public:
break;
}
- DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
- << " RelType: " << RelType << " TargetName: " << TargetName
- << " Addend " << Addend << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelType << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
if (IsExtern) {
RelocationEntry RE(SectionID, Offset, RelType, Addend);
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
index fe0f48e66a81..3a166b40af2d 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
@@ -55,12 +55,12 @@ RuntimeDyldELFMips::evaluateMIPS32Relocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type) {
- DEBUG(dbgs() << "evaluateMIPS32Relocation, LocalAddress: 0x"
- << format("%llx", Section.getAddressWithOffset(Offset))
- << " FinalAddress: 0x"
- << format("%llx", Section.getLoadAddressWithOffset(Offset))
- << " Value: 0x" << format("%llx", Value) << " Type: 0x"
- << format("%x", Type) << "\n");
+ LLVM_DEBUG(dbgs() << "evaluateMIPS32Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << "\n");
switch (Type) {
default:
@@ -110,15 +110,16 @@ int64_t RuntimeDyldELFMips::evaluateMIPS64Relocation(
const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
int64_t Addend, uint64_t SymOffset, SID SectionID) {
- DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
- << format("%llx", Section.getAddressWithOffset(Offset))
- << " FinalAddress: 0x"
- << format("%llx", Section.getLoadAddressWithOffset(Offset))
- << " Value: 0x" << format("%llx", Value) << " Type: 0x"
- << format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
- << " Offset: " << format("%llx" PRIx64, Offset)
- << " SID: " << format("%d", SectionID)
- << " SymOffset: " << format("%x", SymOffset) << "\n");
+ LLVM_DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend)
+ << " Offset: " << format("%llx" PRIx64, Offset)
+ << " SID: " << format("%d", SectionID)
+ << " SymOffset: " << format("%x", SymOffset) << "\n");
switch (Type) {
default:
@@ -307,13 +308,12 @@ void RuntimeDyldELFMips::resolveMIPSO32Relocation(const SectionEntry &Section,
uint8_t *TargetPtr = Section.getAddressWithOffset(Offset);
Value += Addend;
- DEBUG(dbgs() << "resolveMIPSO32Relocation, LocalAddress: "
- << Section.getAddressWithOffset(Offset) << " FinalAddress: "
- << format("%p", Section.getLoadAddressWithOffset(Offset))
- << " Value: " << format("%x", Value)
- << " Type: " << format("%x", Type)
- << " Addend: " << format("%x", Addend)
- << " SymOffset: " << format("%x", Offset) << "\n");
+ LLVM_DEBUG(dbgs() << "resolveMIPSO32Relocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset) << " FinalAddress: "
+ << format("%p", Section.getLoadAddressWithOffset(Offset))
+ << " Value: " << format("%x", Value) << " Type: "
+ << format("%x", Type) << " Addend: " << format("%x", Addend)
+ << " SymOffset: " << format("%x", Offset) << "\n");
Value = evaluateMIPS32Relocation(Section, Offset, Value, Type);
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
index 5f02493e1e21..7c3385e828af 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -329,7 +329,7 @@ public:
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
- DEBUG(dumpRelocationToResolve(RE, Value));
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
index 16a0f6ef2a76..64a6b2901819 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -200,7 +200,7 @@ public:
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
- DEBUG(dumpRelocationToResolve(RE, Value));
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
@@ -406,11 +406,11 @@ private:
// addend = Encoded - Expected
// = Encoded - (AddrA - AddrB)
- DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA << ", AddrB: " << AddrB
- << ", Addend: " << Addend << ", SectionA ID: " << SectionAID
- << ", SectionAOffset: " << SectionAOffset
- << ", SectionB ID: " << SectionBID
- << ", SectionBOffset: " << SectionBOffset << "\n");
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
HalfDiffKindBits);
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
index c42f1751a181..d384d70b8b0f 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -97,7 +97,7 @@ public:
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
- DEBUG(dumpRelocationToResolve(RE, Value));
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
@@ -192,11 +192,11 @@ private:
// Compute the addend 'C' from the original expression 'A - B + C'.
Addend -= AddrA - AddrB;
- DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA << ", AddrB: " << AddrB
- << ", Addend: " << Addend << ", SectionA ID: " << SectionAID
- << ", SectionAOffset: " << SectionAOffset
- << ", SectionB ID: " << SectionBID
- << ", SectionBOffset: " << SectionBOffset << "\n");
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
SectionAOffset, SectionBID, SectionBOffset,
IsPCRel, Size);
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
index 32fd3efddd0d..9732ea6a0cd2 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -85,7 +85,7 @@ public:
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
- DEBUG(dumpRelocationToResolve(RE, Value));
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
diff --git a/lib/IR/ConstantsContext.h b/lib/IR/ConstantsContext.h
index 6585304e7674..e9f31e4ded68 100644
--- a/lib/IR/ConstantsContext.h
+++ b/lib/IR/ConstantsContext.h
@@ -695,7 +695,9 @@ public:
return nullptr;
}
- void dump() const { DEBUG(dbgs() << "Constant.cpp: ConstantUniqueMap\n"); }
+ void dump() const {
+ LLVM_DEBUG(dbgs() << "Constant.cpp: ConstantUniqueMap\n");
+ }
};
} // end namespace llvm
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index 02311db66e35..5f13f237d0d8 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -1669,8 +1669,9 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
GV->setLinkage(GlobalValue::LinkOnceODRLinkage);
break;
case LLVMLinkOnceODRAutoHideLinkage:
- DEBUG(errs() << "LLVMSetLinkage(): LLVMLinkOnceODRAutoHideLinkage is no "
- "longer supported.");
+ LLVM_DEBUG(
+ errs() << "LLVMSetLinkage(): LLVMLinkOnceODRAutoHideLinkage is no "
+ "longer supported.");
break;
case LLVMWeakAnyLinkage:
GV->setLinkage(GlobalValue::WeakAnyLinkage);
@@ -1694,19 +1695,21 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
GV->setLinkage(GlobalValue::PrivateLinkage);
break;
case LLVMDLLImportLinkage:
- DEBUG(errs()
- << "LLVMSetLinkage(): LLVMDLLImportLinkage is no longer supported.");
+ LLVM_DEBUG(
+ errs()
+ << "LLVMSetLinkage(): LLVMDLLImportLinkage is no longer supported.");
break;
case LLVMDLLExportLinkage:
- DEBUG(errs()
- << "LLVMSetLinkage(): LLVMDLLExportLinkage is no longer supported.");
+ LLVM_DEBUG(
+ errs()
+ << "LLVMSetLinkage(): LLVMDLLExportLinkage is no longer supported.");
break;
case LLVMExternalWeakLinkage:
GV->setLinkage(GlobalValue::ExternalWeakLinkage);
break;
case LLVMGhostLinkage:
- DEBUG(errs()
- << "LLVMSetLinkage(): LLVMGhostLinkage is no longer supported.");
+ LLVM_DEBUG(
+ errs() << "LLVMSetLinkage(): LLVMGhostLinkage is no longer supported.");
break;
case LLVMCommonLinkage:
GV->setLinkage(GlobalValue::CommonLinkage);
diff --git a/lib/IR/Pass.cpp b/lib/IR/Pass.cpp
index 0c7dfb6e9e78..a1dc17882493 100644
--- a/lib/IR/Pass.cpp
+++ b/lib/IR/Pass.cpp
@@ -160,8 +160,8 @@ bool FunctionPass::skipFunction(const Function &F) const {
return true;
if (F.hasFnAttribute(Attribute::OptimizeNone)) {
- DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' on function "
- << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' on function "
+ << F.getName() << "\n");
return true;
}
return false;
@@ -195,8 +195,8 @@ bool BasicBlockPass::skipBasicBlock(const BasicBlock &BB) const {
if (F->hasFnAttribute(Attribute::OptimizeNone)) {
// Report this only once per function.
if (&BB == &F->getEntryBlock())
- DEBUG(dbgs() << "Skipping pass '" << getPassName()
- << "' on function " << F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName()
+ << "' on function " << F->getName() << "\n");
return true;
}
return false;
diff --git a/lib/IR/SafepointIRVerifier.cpp b/lib/IR/SafepointIRVerifier.cpp
index 04deb434cec2..25a7f8d6d51c 100644
--- a/lib/IR/SafepointIRVerifier.cpp
+++ b/lib/IR/SafepointIRVerifier.cpp
@@ -535,16 +535,16 @@ bool GCPtrTracker::removeValidUnrelocatedDefs(const BasicBlock *BB,
Contribution.erase(&I);
PoisonedDefs.erase(&I);
ValidUnrelocatedDefs.insert(&I);
- DEBUG(dbgs() << "Removing urelocated " << I << " from Contribution of "
- << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Removing urelocated " << I
+ << " from Contribution of " << BB->getName() << "\n");
ContributionChanged = true;
} else if (PoisonedPointerDef) {
// Mark pointer as poisoned, remove its def from Contribution and trigger
// update of all successors.
Contribution.erase(&I);
PoisonedDefs.insert(&I);
- DEBUG(dbgs() << "Removing poisoned " << I << " from Contribution of "
- << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Removing poisoned " << I << " from Contribution of "
+ << BB->getName() << "\n");
ContributionChanged = true;
} else {
bool Cleared = false;
@@ -594,11 +594,11 @@ void GCPtrTracker::transferBlock(const BasicBlock *BB, BasicBlockState &BBS,
AvailableOut = std::move(Temp);
}
- DEBUG(dbgs() << "Transfered block " << BB->getName() << " from ";
- PrintValueSet(dbgs(), AvailableIn.begin(), AvailableIn.end());
- dbgs() << " to ";
- PrintValueSet(dbgs(), AvailableOut.begin(), AvailableOut.end());
- dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "Transfered block " << BB->getName() << " from ";
+ PrintValueSet(dbgs(), AvailableIn.begin(), AvailableIn.end());
+ dbgs() << " to ";
+ PrintValueSet(dbgs(), AvailableOut.begin(), AvailableOut.end());
+ dbgs() << "\n";);
}
void GCPtrTracker::transferInstruction(const Instruction &I, bool &Cleared,
@@ -698,7 +698,8 @@ void InstructionVerifier::reportInvalidUse(const Value &V,
}
static void Verify(const Function &F, const DominatorTree &DT) {
- DEBUG(dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Verifying gc pointers in function: " << F.getName()
+ << "\n");
if (PrintOnly)
dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n";
diff --git a/lib/IR/ValueSymbolTable.cpp b/lib/IR/ValueSymbolTable.cpp
index 15988caa65eb..0a7f2803cd4c 100644
--- a/lib/IR/ValueSymbolTable.cpp
+++ b/lib/IR/ValueSymbolTable.cpp
@@ -75,7 +75,8 @@ void ValueSymbolTable::reinsertValue(Value* V) {
// Try inserting the name, assuming it won't conflict.
if (vmap.insert(V->getValueName())) {
- //DEBUG(dbgs() << " Inserted value: " << V->getValueName() << ": " << *V << "\n");
+ // LLVM_DEBUG(dbgs() << " Inserted value: " << V->getValueName() << ": " <<
+ // *V << "\n");
return;
}
@@ -90,7 +91,7 @@ void ValueSymbolTable::reinsertValue(Value* V) {
}
void ValueSymbolTable::removeValueName(ValueName *V) {
- //DEBUG(dbgs() << " Removing Value: " << V->getKeyData() << "\n");
+ // LLVM_DEBUG(dbgs() << " Removing Value: " << V->getKeyData() << "\n");
// Remove the value from the symbol table.
vmap.remove(V);
}
@@ -102,7 +103,7 @@ ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
// In the common case, the name is not already in the symbol table.
auto IterBool = vmap.insert(std::make_pair(Name, V));
if (IterBool.second) {
- //DEBUG(dbgs() << " Inserted value: " << Entry.getKeyData() << ": "
+ // LLVM_DEBUG(dbgs() << " Inserted value: " << Entry.getKeyData() << ": "
// << *V << "\n");
return &*IterBool.first;
}
diff --git a/lib/LTO/ThinLTOCodeGenerator.cpp b/lib/LTO/ThinLTOCodeGenerator.cpp
index 4ab1ce24a6fa..f164a034a0b0 100644
--- a/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -976,9 +976,9 @@ void ThinLTOCodeGenerator::run() {
{
auto ErrOrBuffer = CacheEntry.tryLoadingBuffer();
- DEBUG(dbgs() << "Cache " << (ErrOrBuffer ? "hit" : "miss") << " '"
- << CacheEntryPath << "' for buffer " << count << " "
- << ModuleIdentifier << "\n");
+ LLVM_DEBUG(dbgs() << "Cache " << (ErrOrBuffer ? "hit" : "miss")
+ << " '" << CacheEntryPath << "' for buffer "
+ << count << " " << ModuleIdentifier << "\n");
if (ErrOrBuffer) {
// Cache Hit!
diff --git a/lib/MC/MachObjectWriter.cpp b/lib/MC/MachObjectWriter.cpp
index ba452af2d94a..e107d273b455 100644
--- a/lib/MC/MachObjectWriter.cpp
+++ b/lib/MC/MachObjectWriter.cpp
@@ -951,12 +951,11 @@ void MachObjectWriter::writeObject(MCAssembler &Asm,
else
report_fatal_error("Data region not terminated");
-
- DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind
- << " start: " << Start << "(" << Data->Start->getName() << ")"
- << " end: " << End << "(" << Data->End->getName() << ")"
- << " size: " << End - Start
- << "\n");
+ LLVM_DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind
+ << " start: " << Start << "(" << Data->Start->getName()
+ << ")"
+ << " end: " << End << "(" << Data->End->getName() << ")"
+ << " size: " << End - Start << "\n");
write32(Start);
write16(End - Start);
write16(Data->Kind);
diff --git a/lib/MC/WasmObjectWriter.cpp b/lib/MC/WasmObjectWriter.cpp
index 85e189689e46..f1733d49fa37 100644
--- a/lib/MC/WasmObjectWriter.cpp
+++ b/lib/MC/WasmObjectWriter.cpp
@@ -354,7 +354,7 @@ WasmObjectWriter::~WasmObjectWriter() {}
// Write out a section header and a patchable section size field.
void WasmObjectWriter::startSection(SectionBookkeeping &Section,
unsigned SectionId) {
- DEBUG(dbgs() << "startSection " << SectionId << "\n");
+ LLVM_DEBUG(dbgs() << "startSection " << SectionId << "\n");
write8(SectionId);
Section.SizeOffset = getStream().tell();
@@ -371,7 +371,7 @@ void WasmObjectWriter::startSection(SectionBookkeeping &Section,
void WasmObjectWriter::startCustomSection(SectionBookkeeping &Section,
StringRef Name) {
- DEBUG(dbgs() << "startCustomSection " << Name << "\n");
+ LLVM_DEBUG(dbgs() << "startCustomSection " << Name << "\n");
startSection(Section, wasm::WASM_SEC_CUSTOM);
// The position where the section header ends, for measuring its size.
@@ -391,7 +391,7 @@ void WasmObjectWriter::endSection(SectionBookkeeping &Section) {
if (uint32_t(Size) != Size)
report_fatal_error("section size does not fit in a uint32_t");
- DEBUG(dbgs() << "endSection size=" << Size << "\n");
+ LLVM_DEBUG(dbgs() << "endSection size=" << Size << "\n");
// Write the final section size to the payload_len field, which follows
// the section id byte.
@@ -522,7 +522,7 @@ void WasmObjectWriter::recordRelocation(MCAssembler &Asm,
}
WasmRelocationEntry Rec(FixupOffset, SymA, C, Type, &FixupSection);
- DEBUG(dbgs() << "WasmReloc: " << Rec << "\n");
+ LLVM_DEBUG(dbgs() << "WasmReloc: " << Rec << "\n");
if (FixupSection.isWasmData()) {
DataRelocations.push_back(Rec);
@@ -621,7 +621,7 @@ WasmObjectWriter::getProvisionalValue(const WasmRelocationEntry &RelEntry) {
static void addData(SmallVectorImpl<char> &DataBytes,
MCSectionWasm &DataSection) {
- DEBUG(errs() << "addData: " << DataSection.getSectionName() << "\n");
+ LLVM_DEBUG(errs() << "addData: " << DataSection.getSectionName() << "\n");
DataBytes.resize(alignTo(DataBytes.size(), DataSection.getAlignment()));
@@ -652,7 +652,7 @@ static void addData(SmallVectorImpl<char> &DataBytes,
}
}
- DEBUG(dbgs() << "addData -> " << DataBytes.size() << "\n");
+ LLVM_DEBUG(dbgs() << "addData -> " << DataBytes.size() << "\n");
}
uint32_t
@@ -677,7 +677,7 @@ void WasmObjectWriter::applyRelocations(
RelEntry.FixupSection->getSectionOffset() +
RelEntry.Offset;
- DEBUG(dbgs() << "applyRelocation: " << RelEntry << "\n");
+ LLVM_DEBUG(dbgs() << "applyRelocation: " << RelEntry << "\n");
uint32_t Value = getProvisionalValue(RelEntry);
switch (RelEntry.Type) {
@@ -1058,8 +1058,9 @@ uint32_t WasmObjectWriter::registerFunctionType(const MCSymbolWasm& Symbol) {
FunctionTypes.push_back(F);
TypeIndices[&Symbol] = Pair.first->second;
- DEBUG(dbgs() << "registerFunctionType: " << Symbol << " new:" << Pair.second << "\n");
- DEBUG(dbgs() << " -> type index: " << Pair.first->second << "\n");
+ LLVM_DEBUG(dbgs() << "registerFunctionType: " << Symbol
+ << " new:" << Pair.second << "\n");
+ LLVM_DEBUG(dbgs() << " -> type index: " << Pair.first->second << "\n");
return Pair.first->second;
}
@@ -1084,7 +1085,7 @@ static bool isInSymtab(const MCSymbolWasm &Sym) {
void WasmObjectWriter::writeObject(MCAssembler &Asm,
const MCAsmLayout &Layout) {
- DEBUG(dbgs() << "WasmObjectWriter::writeObject\n");
+ LLVM_DEBUG(dbgs() << "WasmObjectWriter::writeObject\n");
MCContext &Ctx = Asm.getContext();
// Collect information from the available symbols.
@@ -1220,14 +1221,12 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
continue;
const auto &WS = static_cast<const MCSymbolWasm &>(S);
- DEBUG(dbgs() << "MCSymbol: " << toString(WS.getType())
- << " '" << S << "'"
- << " isDefined=" << S.isDefined()
- << " isExternal=" << S.isExternal()
- << " isTemporary=" << S.isTemporary()
- << " isWeak=" << WS.isWeak()
- << " isHidden=" << WS.isHidden()
- << " isVariable=" << WS.isVariable() << "\n");
+ LLVM_DEBUG(
+ dbgs() << "MCSymbol: " << toString(WS.getType()) << " '" << S << "'"
+ << " isDefined=" << S.isDefined() << " isExternal="
+ << S.isExternal() << " isTemporary=" << S.isTemporary()
+ << " isWeak=" << WS.isWeak() << " isHidden=" << WS.isHidden()
+ << " isVariable=" << WS.isVariable() << "\n");
if (WS.isVariable())
continue;
@@ -1263,13 +1262,14 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
Index = WasmIndices.find(&WS)->second;
}
- DEBUG(dbgs() << " -> function index: " << Index << "\n");
+ LLVM_DEBUG(dbgs() << " -> function index: " << Index << "\n");
} else if (WS.isData()) {
if (WS.isTemporary() && !WS.getSize())
continue;
if (!WS.isDefined()) {
- DEBUG(dbgs() << " -> segment index: -1" << "\n");
+ LLVM_DEBUG(dbgs() << " -> segment index: -1"
+ << "\n");
continue;
}
@@ -1291,15 +1291,15 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
static_cast<uint32_t>(Layout.getSymbolOffset(WS)),
static_cast<uint32_t>(Size)};
DataLocations[&WS] = Ref;
- DEBUG(dbgs() << " -> segment index: " << Ref.Segment << "\n");
+ LLVM_DEBUG(dbgs() << " -> segment index: " << Ref.Segment << "\n");
} else if (WS.isGlobal()) {
// A "true" Wasm global (currently just __stack_pointer)
if (WS.isDefined())
report_fatal_error("don't yet support defined globals");
// An import; the index was assigned above
- DEBUG(dbgs() << " -> global index: " << WasmIndices.find(&WS)->second
- << "\n");
+ LLVM_DEBUG(dbgs() << " -> global index: "
+ << WasmIndices.find(&WS)->second << "\n");
} else {
assert(WS.isSection());
}
@@ -1318,19 +1318,20 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
// Find the target symbol of this weak alias and export that index
const auto &WS = static_cast<const MCSymbolWasm &>(S);
const MCSymbolWasm *ResolvedSym = ResolveSymbol(WS);
- DEBUG(dbgs() << WS.getName() << ": weak alias of '" << *ResolvedSym << "'\n");
+ LLVM_DEBUG(dbgs() << WS.getName() << ": weak alias of '" << *ResolvedSym
+ << "'\n");
if (WS.isFunction()) {
assert(WasmIndices.count(ResolvedSym) > 0);
uint32_t WasmIndex = WasmIndices.find(ResolvedSym)->second;
WasmIndices[&WS] = WasmIndex;
- DEBUG(dbgs() << " -> index:" << WasmIndex << "\n");
+ LLVM_DEBUG(dbgs() << " -> index:" << WasmIndex << "\n");
} else if (WS.isData()) {
assert(DataLocations.count(ResolvedSym) > 0);
const wasm::WasmDataReference &Ref =
DataLocations.find(ResolvedSym)->second;
DataLocations[&WS] = Ref;
- DEBUG(dbgs() << " -> index:" << Ref.Segment << "\n");
+ LLVM_DEBUG(dbgs() << " -> index:" << Ref.Segment << "\n");
} else {
report_fatal_error("don't yet support global aliases");
}
@@ -1343,7 +1344,7 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
WS.setIndex(INVALID_INDEX);
continue;
}
- DEBUG(dbgs() << "adding to symtab: " << WS << "\n");
+ LLVM_DEBUG(dbgs() << "adding to symtab: " << WS << "\n");
uint32_t Flags = 0;
if (WS.isWeak())
@@ -1383,8 +1384,8 @@ void WasmObjectWriter::writeObject(MCAssembler &Asm,
uint32_t FunctionIndex = WasmIndices.find(&WS)->second;
uint32_t TableIndex = TableElems.size() + kInitialTableOffset;
if (TableIndices.try_emplace(&WS, TableIndex).second) {
- DEBUG(dbgs() << " -> adding " << WS.getName()
- << " to table: " << TableIndex << "\n");
+ LLVM_DEBUG(dbgs() << " -> adding " << WS.getName()
+ << " to table: " << TableIndex << "\n");
TableElems.push_back(FunctionIndex);
registerFunctionType(WS);
}
diff --git a/lib/Object/WasmObjectFile.cpp b/lib/Object/WasmObjectFile.cpp
index 61d05a622ac5..2a2f1652bd8f 100644
--- a/lib/Object/WasmObjectFile.cpp
+++ b/lib/Object/WasmObjectFile.cpp
@@ -492,7 +492,7 @@ Error WasmObjectFile::parseLinkingSectionSymtab(const uint8_t *&Ptr,
LinkingData.SymbolTable.emplace_back(Info);
Symbols.emplace_back(LinkingData.SymbolTable.back(), FunctionType,
GlobalType);
- DEBUG(dbgs() << "Adding symbol: " << Symbols.back() << "\n");
+ LLVM_DEBUG(dbgs() << "Adding symbol: " << Symbols.back() << "\n");
}
return Error::success();
@@ -967,7 +967,7 @@ uint32_t WasmObjectFile::getSymbolFlags(DataRefImpl Symb) const {
uint32_t Result = SymbolRef::SF_None;
const WasmSymbol &Sym = getWasmSymbol(Symb);
- DEBUG(dbgs() << "getSymbolFlags: ptr=" << &Sym << " " << Sym << "\n");
+ LLVM_DEBUG(dbgs() << "getSymbolFlags: ptr=" << &Sym << " " << Sym << "\n");
if (Sym.isBindingWeak())
Result |= SymbolRef::SF_Weak;
if (!Sym.isBindingLocal())
diff --git a/lib/ProfileData/Coverage/CoverageMapping.cpp b/lib/ProfileData/Coverage/CoverageMapping.cpp
index ac0793dd4242..b3c2b182e76c 100644
--- a/lib/ProfileData/Coverage/CoverageMapping.cpp
+++ b/lib/ProfileData/Coverage/CoverageMapping.cpp
@@ -346,7 +346,7 @@ class SegmentBuilder {
else
Segments.emplace_back(StartLoc.first, StartLoc.second, IsRegionEntry);
- DEBUG({
+ LLVM_DEBUG({
const auto &Last = Segments.back();
dbgs() << "Segment at " << Last.Line << ":" << Last.Col
<< " (count = " << Last.Count << ")"
@@ -524,7 +524,7 @@ public:
sortNestedRegions(Regions);
ArrayRef<CountedRegion> CombinedRegions = combineRegions(Regions);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Combined regions:\n";
for (const auto &CR : CombinedRegions)
dbgs() << " " << CR.LineStart << ":" << CR.ColumnStart << " -> "
@@ -539,8 +539,8 @@ public:
const auto &L = Segments[I - 1];
const auto &R = Segments[I];
if (!(L.Line < R.Line) && !(L.Line == R.Line && L.Col < R.Col)) {
- DEBUG(dbgs() << " ! Segment " << L.Line << ":" << L.Col
- << " followed by " << R.Line << ":" << R.Col << "\n");
+ LLVM_DEBUG(dbgs() << " ! Segment " << L.Line << ":" << L.Col
+ << " followed by " << R.Line << ":" << R.Col << "\n");
assert(false && "Coverage segments not unique or sorted");
}
}
@@ -613,7 +613,7 @@ CoverageData CoverageMapping::getCoverageForFile(StringRef Filename) const {
}
}
- DEBUG(dbgs() << "Emitting segments for file: " << Filename << "\n");
+ LLVM_DEBUG(dbgs() << "Emitting segments for file: " << Filename << "\n");
FileCoverage.Segments = SegmentBuilder::buildSegments(Regions);
return FileCoverage;
@@ -654,7 +654,8 @@ CoverageMapping::getCoverageForFunction(const FunctionRecord &Function) const {
FunctionCoverage.Expansions.emplace_back(CR, Function);
}
- DEBUG(dbgs() << "Emitting segments for function: " << Function.Name << "\n");
+ LLVM_DEBUG(dbgs() << "Emitting segments for function: " << Function.Name
+ << "\n");
FunctionCoverage.Segments = SegmentBuilder::buildSegments(Regions);
return FunctionCoverage;
@@ -672,8 +673,8 @@ CoverageData CoverageMapping::getCoverageForExpansion(
ExpansionCoverage.Expansions.emplace_back(CR, Expansion.Function);
}
- DEBUG(dbgs() << "Emitting segments for expansion of file " << Expansion.FileID
- << "\n");
+ LLVM_DEBUG(dbgs() << "Emitting segments for expansion of file "
+ << Expansion.FileID << "\n");
ExpansionCoverage.Segments = SegmentBuilder::buildSegments(Regions);
return ExpansionCoverage;
diff --git a/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/lib/ProfileData/Coverage/CoverageMappingReader.cpp
index 5ccb23f28f72..ee48256bc2e5 100644
--- a/lib/ProfileData/Coverage/CoverageMappingReader.cpp
+++ b/lib/ProfileData/Coverage/CoverageMappingReader.cpp
@@ -228,7 +228,7 @@ Error RawCoverageMappingReader::readMappingRegionsSubArray(
ColumnEnd = std::numeric_limits<unsigned>::max();
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Counter in file " << InferredFileID << " " << LineStart << ":"
<< ColumnStart << " -> " << (LineStart + NumLines) << ":"
<< ColumnEnd << ", ";
diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp
index 98e44d249668..2c2ce9588a2c 100644
--- a/lib/Support/APInt.cpp
+++ b/lib/Support/APInt.cpp
@@ -1253,18 +1253,20 @@ static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r,
// The DEBUG macros here tend to be spam in the debug output if you're not
// debugging this code. Disable them unless KNUTH_DEBUG is defined.
-#pragma push_macro("DEBUG")
+#pragma push_macro("LLVM_DEBUG")
#ifndef KNUTH_DEBUG
-#undef DEBUG
-#define DEBUG(X) do {} while (false)
+#undef LLVM_DEBUG
+#define LLVM_DEBUG(X) \
+ do { \
+ } while (false)
#endif
- DEBUG(dbgs() << "KnuthDiv: m=" << m << " n=" << n << '\n');
- DEBUG(dbgs() << "KnuthDiv: original:");
- DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]);
- DEBUG(dbgs() << " by");
- DEBUG(for (int i = n; i >0; i--) dbgs() << " " << v[i-1]);
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: m=" << m << " n=" << n << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: original:");
+ LLVM_DEBUG(for (int i = m + n; i >= 0; i--) dbgs() << " " << u[i]);
+ LLVM_DEBUG(dbgs() << " by");
+ LLVM_DEBUG(for (int i = n; i > 0; i--) dbgs() << " " << v[i - 1]);
+ LLVM_DEBUG(dbgs() << '\n');
// D1. [Normalize.] Set d = b / (v[n-1] + 1) and multiply all the digits of
// u and v by d. Note that we have taken Knuth's advice here to use a power
// of 2 value for d such that d * v[n-1] >= b/2 (b is the base). A power of
@@ -1290,16 +1292,16 @@ static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r,
}
u[m+n] = u_carry;
- DEBUG(dbgs() << "KnuthDiv: normal:");
- DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]);
- DEBUG(dbgs() << " by");
- DEBUG(for (int i = n; i >0; i--) dbgs() << " " << v[i-1]);
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: normal:");
+ LLVM_DEBUG(for (int i = m + n; i >= 0; i--) dbgs() << " " << u[i]);
+ LLVM_DEBUG(dbgs() << " by");
+ LLVM_DEBUG(for (int i = n; i > 0; i--) dbgs() << " " << v[i - 1]);
+ LLVM_DEBUG(dbgs() << '\n');
// D2. [Initialize j.] Set j to m. This is the loop counter over the places.
int j = m;
do {
- DEBUG(dbgs() << "KnuthDiv: quotient digit #" << j << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: quotient digit #" << j << '\n');
// D3. [Calculate q'.].
// Set qp = (u[j+n]*b + u[j+n-1]) / v[n-1]. (qp=qprime=q')
// Set rp = (u[j+n]*b + u[j+n-1]) % v[n-1]. (rp=rprime=r')
@@ -1309,7 +1311,7 @@ static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r,
// value qp is one too large, and it eliminates all cases where qp is two
// too large.
uint64_t dividend = Make_64(u[j+n], u[j+n-1]);
- DEBUG(dbgs() << "KnuthDiv: dividend == " << dividend << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: dividend == " << dividend << '\n');
uint64_t qp = dividend / v[n-1];
uint64_t rp = dividend % v[n-1];
if (qp == b || qp*v[n-2] > b*rp + u[j+n-2]) {
@@ -1318,7 +1320,7 @@ static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r,
if (rp < b && (qp == b || qp*v[n-2] > b*rp + u[j+n-2]))
qp--;
}
- DEBUG(dbgs() << "KnuthDiv: qp == " << qp << ", rp == " << rp << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: qp == " << qp << ", rp == " << rp << '\n');
// D4. [Multiply and subtract.] Replace (u[j+n]u[j+n-1]...u[j]) with
// (u[j+n]u[j+n-1]..u[j]) - qp * (v[n-1]...v[1]v[0]). This computation
@@ -1334,15 +1336,15 @@ static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r,
int64_t subres = int64_t(u[j+i]) - borrow - Lo_32(p);
u[j+i] = Lo_32(subres);
borrow = Hi_32(p) - Hi_32(subres);
- DEBUG(dbgs() << "KnuthDiv: u[j+i] = " << u[j+i]
- << ", borrow = " << borrow << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: u[j+i] = " << u[j + i]
+ << ", borrow = " << borrow << '\n');
}
bool isNeg = u[j+n] < borrow;
u[j+n] -= Lo_32(borrow);
- DEBUG(dbgs() << "KnuthDiv: after subtraction:");
- DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]);
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: after subtraction:");
+ LLVM_DEBUG(for (int i = m + n; i >= 0; i--) dbgs() << " " << u[i]);
+ LLVM_DEBUG(dbgs() << '\n');
// D5. [Test remainder.] Set q[j] = qp. If the result of step D4 was
// negative, go to step D6; otherwise go on to step D7.
@@ -1363,16 +1365,16 @@ static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r,
}
u[j+n] += carry;
}
- DEBUG(dbgs() << "KnuthDiv: after correction:");
- DEBUG(for (int i = m+n; i >=0; i--) dbgs() << " " << u[i]);
- DEBUG(dbgs() << "\nKnuthDiv: digit result = " << q[j] << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: after correction:");
+ LLVM_DEBUG(for (int i = m + n; i >= 0; i--) dbgs() << " " << u[i]);
+ LLVM_DEBUG(dbgs() << "\nKnuthDiv: digit result = " << q[j] << '\n');
- // D7. [Loop on j.] Decrease j by one. Now if j >= 0, go back to D3.
+ // D7. [Loop on j.] Decrease j by one. Now if j >= 0, go back to D3.
} while (--j >= 0);
- DEBUG(dbgs() << "KnuthDiv: quotient:");
- DEBUG(for (int i = m; i >=0; i--) dbgs() <<" " << q[i]);
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "KnuthDiv: quotient:");
+ LLVM_DEBUG(for (int i = m; i >= 0; i--) dbgs() << " " << q[i]);
+ LLVM_DEBUG(dbgs() << '\n');
// D8. [Unnormalize]. Now q[...] is the desired quotient, and the desired
// remainder may be obtained by dividing u[...] by d. If r is non-null we
@@ -1383,23 +1385,23 @@ static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r,
// shift right here.
if (shift) {
uint32_t carry = 0;
- DEBUG(dbgs() << "KnuthDiv: remainder:");
+ LLVM_DEBUG(dbgs() << "KnuthDiv: remainder:");
for (int i = n-1; i >= 0; i--) {
r[i] = (u[i] >> shift) | carry;
carry = u[i] << (32 - shift);
- DEBUG(dbgs() << " " << r[i]);
+ LLVM_DEBUG(dbgs() << " " << r[i]);
}
} else {
for (int i = n-1; i >= 0; i--) {
r[i] = u[i];
- DEBUG(dbgs() << " " << r[i]);
+ LLVM_DEBUG(dbgs() << " " << r[i]);
}
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
-#pragma pop_macro("DEBUG")
+#pragma pop_macro("LLVM_DEBUG")
}
void APInt::divide(const WordType *LHS, unsigned lhsWords, const WordType *RHS,
diff --git a/lib/Support/CachePruning.cpp b/lib/Support/CachePruning.cpp
index 141573c2a1c7..7326c4fc91fb 100644
--- a/lib/Support/CachePruning.cpp
+++ b/lib/Support/CachePruning.cpp
@@ -146,7 +146,7 @@ bool llvm::pruneCache(StringRef Path, CachePruningPolicy Policy) {
if (Policy.Expiration == seconds(0) &&
Policy.MaxSizePercentageOfAvailableSpace == 0 &&
Policy.MaxSizeBytes == 0 && Policy.MaxSizeFiles == 0) {
- DEBUG(dbgs() << "No pruning settings set, exit early\n");
+ LLVM_DEBUG(dbgs() << "No pruning settings set, exit early\n");
// Nothing will be pruned, early exit
return false;
}
@@ -173,9 +173,9 @@ bool llvm::pruneCache(StringRef Path, CachePruningPolicy Policy) {
const auto TimeStampModTime = FileStatus.getLastModificationTime();
auto TimeStampAge = CurrentTime - TimeStampModTime;
if (TimeStampAge <= *Policy.Interval) {
- DEBUG(dbgs() << "Timestamp file too recent ("
- << duration_cast<seconds>(TimeStampAge).count()
- << "s old), do not prune.\n");
+ LLVM_DEBUG(dbgs() << "Timestamp file too recent ("
+ << duration_cast<seconds>(TimeStampAge).count()
+ << "s old), do not prune.\n");
return false;
}
}
@@ -207,7 +207,7 @@ bool llvm::pruneCache(StringRef Path, CachePruningPolicy Policy) {
// there.
ErrorOr<sys::fs::basic_file_status> StatusOrErr = File->status();
if (!StatusOrErr) {
- DEBUG(dbgs() << "Ignore " << File->path() << " (can't stat)\n");
+ LLVM_DEBUG(dbgs() << "Ignore " << File->path() << " (can't stat)\n");
continue;
}
@@ -215,8 +215,9 @@ bool llvm::pruneCache(StringRef Path, CachePruningPolicy Policy) {
const auto FileAccessTime = StatusOrErr->getLastAccessedTime();
auto FileAge = CurrentTime - FileAccessTime;
if (Policy.Expiration != seconds(0) && FileAge > Policy.Expiration) {
- DEBUG(dbgs() << "Remove " << File->path() << " ("
- << duration_cast<seconds>(FileAge).count() << "s old)\n");
+ LLVM_DEBUG(dbgs() << "Remove " << File->path() << " ("
+ << duration_cast<seconds>(FileAge).count()
+ << "s old)\n");
sys::fs::remove(File->path());
continue;
}
@@ -235,9 +236,9 @@ bool llvm::pruneCache(StringRef Path, CachePruningPolicy Policy) {
// Update size
TotalSize -= FileAndSize->first;
NumFiles--;
- DEBUG(dbgs() << " - Remove " << FileAndSize->second << " (size "
- << FileAndSize->first << "), new occupancy is " << TotalSize
- << "%\n");
+ LLVM_DEBUG(dbgs() << " - Remove " << FileAndSize->second << " (size "
+ << FileAndSize->first << "), new occupancy is "
+ << TotalSize << "%\n");
++FileAndSize;
};
@@ -263,9 +264,10 @@ bool llvm::pruneCache(StringRef Path, CachePruningPolicy Policy) {
AvailableSpace * Policy.MaxSizePercentageOfAvailableSpace / 100ull,
Policy.MaxSizeBytes);
- DEBUG(dbgs() << "Occupancy: " << ((100 * TotalSize) / AvailableSpace)
- << "% target is: " << Policy.MaxSizePercentageOfAvailableSpace
- << "%, " << Policy.MaxSizeBytes << " bytes\n");
+ LLVM_DEBUG(dbgs() << "Occupancy: " << ((100 * TotalSize) / AvailableSpace)
+ << "% target is: "
+ << Policy.MaxSizePercentageOfAvailableSpace << "%, "
+ << Policy.MaxSizeBytes << " bytes\n");
// Remove the oldest accessed files first, till we get below the threshold.
while (TotalSize > TotalSizeTarget && FileAndSize != FileSizes.rend())
diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp
index d2edc6368ed0..f1be43c027b2 100644
--- a/lib/Support/CommandLine.cpp
+++ b/lib/Support/CommandLine.cpp
@@ -1375,9 +1375,9 @@ bool CommandLineParser::ParseCommandLineOptions(int argc,
// Now that we know if -debug is specified, we can use it.
// Note that if ReadResponseFiles == true, this must be done before the
// memory allocated for the expanded command line is free()d below.
- DEBUG(dbgs() << "Args: ";
- for (int i = 0; i < argc; ++i) dbgs() << argv[i] << ' ';
- dbgs() << '\n';);
+ LLVM_DEBUG(dbgs() << "Args: ";
+ for (int i = 0; i < argc; ++i) dbgs() << argv[i] << ' ';
+ dbgs() << '\n';);
// Free all of the memory allocated to the map. Command line options may only
// be processed once!
diff --git a/lib/Support/DAGDeltaAlgorithm.cpp b/lib/Support/DAGDeltaAlgorithm.cpp
index f1a334bfc7be..b82aec1423f5 100644
--- a/lib/Support/DAGDeltaAlgorithm.cpp
+++ b/lib/Support/DAGDeltaAlgorithm.cpp
@@ -124,13 +124,13 @@ private:
/// ExecuteOneTest - Execute a single test predicate on the change set \p S.
bool ExecuteOneTest(const changeset_ty &S) {
// Check dependencies invariant.
- DEBUG({
- for (changeset_ty::const_iterator it = S.begin(),
- ie = S.end(); it != ie; ++it)
- for (succ_iterator_ty it2 = succ_begin(*it),
- ie2 = succ_end(*it); it2 != ie2; ++it2)
- assert(S.count(*it2) && "Attempt to run invalid changeset!");
- });
+ LLVM_DEBUG({
+ for (changeset_ty::const_iterator it = S.begin(), ie = S.end(); it != ie;
+ ++it)
+ for (succ_iterator_ty it2 = succ_begin(*it), ie2 = succ_end(*it);
+ it2 != ie2; ++it2)
+ assert(S.count(*it2) && "Attempt to run invalid changeset!");
+ });
return DDA.ExecuteOneTest(S);
}
@@ -224,60 +224,68 @@ DAGDeltaAlgorithmImpl::DAGDeltaAlgorithmImpl(
PredClosure[*it2].insert(*it);
// Dump useful debug info.
- DEBUG({
- llvm::errs() << "-- DAGDeltaAlgorithmImpl --\n";
- llvm::errs() << "Changes: [";
- for (changeset_ty::const_iterator it = Changes.begin(),
- ie = Changes.end(); it != ie; ++it) {
- if (it != Changes.begin()) llvm::errs() << ", ";
- llvm::errs() << *it;
-
- if (succ_begin(*it) != succ_end(*it)) {
- llvm::errs() << "(";
- for (succ_iterator_ty it2 = succ_begin(*it),
- ie2 = succ_end(*it); it2 != ie2; ++it2) {
- if (it2 != succ_begin(*it)) llvm::errs() << ", ";
- llvm::errs() << "->" << *it2;
- }
- llvm::errs() << ")";
+ LLVM_DEBUG({
+ llvm::errs() << "-- DAGDeltaAlgorithmImpl --\n";
+ llvm::errs() << "Changes: [";
+ for (changeset_ty::const_iterator it = Changes.begin(), ie = Changes.end();
+ it != ie; ++it) {
+ if (it != Changes.begin())
+ llvm::errs() << ", ";
+ llvm::errs() << *it;
+
+ if (succ_begin(*it) != succ_end(*it)) {
+ llvm::errs() << "(";
+ for (succ_iterator_ty it2 = succ_begin(*it), ie2 = succ_end(*it);
+ it2 != ie2; ++it2) {
+ if (it2 != succ_begin(*it))
+ llvm::errs() << ", ";
+ llvm::errs() << "->" << *it2;
}
+ llvm::errs() << ")";
}
- llvm::errs() << "]\n";
-
- llvm::errs() << "Roots: [";
- for (std::vector<change_ty>::const_iterator it = Roots.begin(),
- ie = Roots.end(); it != ie; ++it) {
- if (it != Roots.begin()) llvm::errs() << ", ";
- llvm::errs() << *it;
+ }
+ llvm::errs() << "]\n";
+
+ llvm::errs() << "Roots: [";
+ for (std::vector<change_ty>::const_iterator it = Roots.begin(),
+ ie = Roots.end();
+ it != ie; ++it) {
+ if (it != Roots.begin())
+ llvm::errs() << ", ";
+ llvm::errs() << *it;
+ }
+ llvm::errs() << "]\n";
+
+ llvm::errs() << "Predecessor Closure:\n";
+ for (changeset_ty::const_iterator it = Changes.begin(), ie = Changes.end();
+ it != ie; ++it) {
+ llvm::errs() << format(" %-4d: [", *it);
+ for (pred_closure_iterator_ty it2 = pred_closure_begin(*it),
+ ie2 = pred_closure_end(*it);
+ it2 != ie2; ++it2) {
+ if (it2 != pred_closure_begin(*it))
+ llvm::errs() << ", ";
+ llvm::errs() << *it2;
}
llvm::errs() << "]\n";
+ }
- llvm::errs() << "Predecessor Closure:\n";
- for (changeset_ty::const_iterator it = Changes.begin(),
- ie = Changes.end(); it != ie; ++it) {
- llvm::errs() << format(" %-4d: [", *it);
- for (pred_closure_iterator_ty it2 = pred_closure_begin(*it),
- ie2 = pred_closure_end(*it); it2 != ie2; ++it2) {
- if (it2 != pred_closure_begin(*it)) llvm::errs() << ", ";
- llvm::errs() << *it2;
- }
- llvm::errs() << "]\n";
- }
-
- llvm::errs() << "Successor Closure:\n";
- for (changeset_ty::const_iterator it = Changes.begin(),
- ie = Changes.end(); it != ie; ++it) {
- llvm::errs() << format(" %-4d: [", *it);
- for (succ_closure_iterator_ty it2 = succ_closure_begin(*it),
- ie2 = succ_closure_end(*it); it2 != ie2; ++it2) {
- if (it2 != succ_closure_begin(*it)) llvm::errs() << ", ";
- llvm::errs() << *it2;
- }
- llvm::errs() << "]\n";
+ llvm::errs() << "Successor Closure:\n";
+ for (changeset_ty::const_iterator it = Changes.begin(), ie = Changes.end();
+ it != ie; ++it) {
+ llvm::errs() << format(" %-4d: [", *it);
+ for (succ_closure_iterator_ty it2 = succ_closure_begin(*it),
+ ie2 = succ_closure_end(*it);
+ it2 != ie2; ++it2) {
+ if (it2 != succ_closure_begin(*it))
+ llvm::errs() << ", ";
+ llvm::errs() << *it2;
}
+ llvm::errs() << "]\n";
+ }
- llvm::errs() << "\n\n";
- });
+ llvm::errs() << "\n\n";
+ });
}
bool DAGDeltaAlgorithmImpl::GetTestResult(const changeset_ty &Changes,
@@ -312,10 +320,10 @@ DAGDeltaAlgorithmImpl::Run() {
// Invariant: CurrentSet intersect Required == {}
// Invariant: Required == (Required union succ*(Required))
while (!CurrentSet.empty()) {
- DEBUG({
- llvm::errs() << "DAG_DD - " << CurrentSet.size() << " active changes, "
- << Required.size() << " required changes\n";
- });
+ LLVM_DEBUG({
+ llvm::errs() << "DAG_DD - " << CurrentSet.size() << " active changes, "
+ << Required.size() << " required changes\n";
+ });
// Minimize the current set of changes.
DeltaActiveSetHelper Helper(*this, Required);
diff --git a/lib/Support/Debug.cpp b/lib/Support/Debug.cpp
index 9132911479a1..1a70017fee32 100644
--- a/lib/Support/Debug.cpp
+++ b/lib/Support/Debug.cpp
@@ -11,15 +11,16 @@
// code, without it being enabled all of the time, and without having to add
// command line options to enable it.
//
-// In particular, just wrap your code with the DEBUG() macro, and it will be
-// enabled automatically if you specify '-debug' on the command-line.
+// In particular, just wrap your code with the LLVM_DEBUG() macro, and it will
+// be enabled automatically if you specify '-debug' on the command-line.
// Alternatively, you can also use the SET_DEBUG_TYPE("foo") macro to specify
// that your debug code belongs to class "foo". Then, on the command line, you
// can specify '-debug-only=foo' to enable JUST the debug information for the
// foo class.
//
// When compiling without assertions, the -debug-* options and all code in
-// DEBUG() statements disappears, so it does not affect the runtime of the code.
+// LLVM_DEBUG() statements disappears, so it does not affect the runtime of the
+// code.
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Support/RandomNumberGenerator.cpp b/lib/Support/RandomNumberGenerator.cpp
index 6f1e559aaa0a..f1f22af82a81 100644
--- a/lib/Support/RandomNumberGenerator.cpp
+++ b/lib/Support/RandomNumberGenerator.cpp
@@ -36,10 +36,8 @@ static cl::opt<unsigned long long>
cl::desc("Seed for the random number generator"), cl::init(0));
RandomNumberGenerator::RandomNumberGenerator(StringRef Salt) {
- DEBUG(
- if (Seed == 0)
- dbgs() << "Warning! Using unseeded random number generator.\n"
- );
+ LLVM_DEBUG(if (Seed == 0) dbgs()
+ << "Warning! Using unseeded random number generator.\n");
// Combine seed and salts using std::seed_seq.
// Data: Seed-low, Seed-high, Salt
diff --git a/lib/Target/AArch64/AArch64A53Fix835769.cpp b/lib/Target/AArch64/AArch64A53Fix835769.cpp
index 7de5d0ef66b1..30232afaf024 100644
--- a/lib/Target/AArch64/AArch64A53Fix835769.cpp
+++ b/lib/Target/AArch64/AArch64A53Fix835769.cpp
@@ -116,7 +116,7 @@ INITIALIZE_PASS(AArch64A53Fix835769, "aarch64-fix-cortex-a53-835769-pass",
bool
AArch64A53Fix835769::runOnMachineFunction(MachineFunction &F) {
- DEBUG(dbgs() << "***** AArch64A53Fix835769 *****\n");
+ LLVM_DEBUG(dbgs() << "***** AArch64A53Fix835769 *****\n");
bool Changed = false;
TII = F.getSubtarget().getInstrInfo();
@@ -190,7 +190,8 @@ static void insertNopBeforeInstruction(MachineBasicBlock &MBB, MachineInstr* MI,
bool
AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
- DEBUG(dbgs() << "Running on MBB: " << MBB << " - scanning instructions...\n");
+ LLVM_DEBUG(dbgs() << "Running on MBB: " << MBB
+ << " - scanning instructions...\n");
// First, scan the basic block, looking for a sequence of 2 instructions
// that match the conditions under which the erratum may trigger.
@@ -206,17 +207,17 @@ AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) {
for (auto &MI : MBB) {
MachineInstr *CurrInstr = &MI;
- DEBUG(dbgs() << " Examining: " << MI);
+ LLVM_DEBUG(dbgs() << " Examining: " << MI);
if (PrevInstr) {
- DEBUG(dbgs() << " PrevInstr: " << *PrevInstr
- << " CurrInstr: " << *CurrInstr
- << " isFirstInstructionInSequence(PrevInstr): "
- << isFirstInstructionInSequence(PrevInstr) << "\n"
- << " isSecondInstructionInSequence(CurrInstr): "
- << isSecondInstructionInSequence(CurrInstr) << "\n");
+ LLVM_DEBUG(dbgs() << " PrevInstr: " << *PrevInstr
+ << " CurrInstr: " << *CurrInstr
+ << " isFirstInstructionInSequence(PrevInstr): "
+ << isFirstInstructionInSequence(PrevInstr) << "\n"
+ << " isSecondInstructionInSequence(CurrInstr): "
+ << isSecondInstructionInSequence(CurrInstr) << "\n");
if (isFirstInstructionInSequence(PrevInstr) &&
isSecondInstructionInSequence(CurrInstr)) {
- DEBUG(dbgs() << " ** pattern found at Idx " << Idx << "!\n");
+ LLVM_DEBUG(dbgs() << " ** pattern found at Idx " << Idx << "!\n");
Sequences.push_back(CurrInstr);
}
}
@@ -225,8 +226,8 @@ AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) {
++Idx;
}
- DEBUG(dbgs() << "Scan complete, " << Sequences.size()
- << " occurrences of pattern found.\n");
+ LLVM_DEBUG(dbgs() << "Scan complete, " << Sequences.size()
+ << " occurrences of pattern found.\n");
// Then update the basic block, inserting nops between the detected sequences.
for (auto &MI : Sequences) {
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 330237fc10b6..a95476b91187 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -315,7 +315,7 @@ bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
return false;
bool Changed = false;
- DEBUG(dbgs() << "***** AArch64A57FPLoadBalancing *****\n");
+ LLVM_DEBUG(dbgs() << "***** AArch64A57FPLoadBalancing *****\n");
MRI = &F.getRegInfo();
TRI = F.getRegInfo().getTargetRegisterInfo();
@@ -330,7 +330,8 @@ bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
- DEBUG(dbgs() << "Running on MBB: " << MBB << " - scanning instructions...\n");
+ LLVM_DEBUG(dbgs() << "Running on MBB: " << MBB
+ << " - scanning instructions...\n");
// First, scan the basic block producing a set of chains.
@@ -343,7 +344,8 @@ bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) {
for (auto &MI : MBB)
scanInstruction(&MI, Idx++, ActiveChains, AllChains);
- DEBUG(dbgs() << "Scan complete, "<< AllChains.size() << " chains created.\n");
+ LLVM_DEBUG(dbgs() << "Scan complete, " << AllChains.size()
+ << " chains created.\n");
// Group the chains into disjoint sets based on their liveness range. This is
// a poor-man's version of graph coloring. Ideally we'd create an interference
@@ -360,7 +362,7 @@ bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) {
for (auto &J : AllChains)
if (I != J && I->rangeOverlapsWith(*J))
EC.unionSets(I.get(), J.get());
- DEBUG(dbgs() << "Created " << EC.getNumClasses() << " disjoint sets.\n");
+ LLVM_DEBUG(dbgs() << "Created " << EC.getNumClasses() << " disjoint sets.\n");
// Now we assume that every member of an equivalence class interferes
// with every other member of that class, and with no members of other classes.
@@ -440,7 +442,7 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV,
MachineBasicBlock &MBB,
int &Parity) {
bool Changed = false;
- DEBUG(dbgs() << "colorChainSet(): #sets=" << GV.size() << "\n");
+ LLVM_DEBUG(dbgs() << "colorChainSet(): #sets=" << GV.size() << "\n");
// Sort by descending size order so that we allocate the most important
// sets first.
@@ -470,16 +472,18 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV,
// But if we really don't care, use the chain's preferred color.
C = G->getPreferredColor();
- DEBUG(dbgs() << " - Parity=" << Parity << ", Color="
- << ColorNames[(int)C] << "\n");
+ LLVM_DEBUG(dbgs() << " - Parity=" << Parity
+ << ", Color=" << ColorNames[(int)C] << "\n");
// If we'll need a fixup FMOV, don't bother. Testing has shown that this
// happens infrequently and when it does it has at least a 50% chance of
// slowing code down instead of speeding it up.
if (G->requiresFixup() && C != G->getPreferredColor()) {
C = G->getPreferredColor();
- DEBUG(dbgs() << " - " << G->str() << " - not worthwhile changing; "
- "color remains " << ColorNames[(int)C] << "\n");
+ LLVM_DEBUG(dbgs() << " - " << G->str()
+ << " - not worthwhile changing; "
+ "color remains "
+ << ColorNames[(int)C] << "\n");
}
Changed |= colorChain(G, C, MBB);
@@ -528,17 +532,17 @@ int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C,
bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C,
MachineBasicBlock &MBB) {
bool Changed = false;
- DEBUG(dbgs() << " - colorChain(" << G->str() << ", "
- << ColorNames[(int)C] << ")\n");
+ LLVM_DEBUG(dbgs() << " - colorChain(" << G->str() << ", "
+ << ColorNames[(int)C] << ")\n");
// Try and obtain a free register of the right class. Without a register
// to play with we cannot continue.
int Reg = scavengeRegister(G, C, MBB);
if (Reg == -1) {
- DEBUG(dbgs() << "Scavenging (thus coloring) failed!\n");
+ LLVM_DEBUG(dbgs() << "Scavenging (thus coloring) failed!\n");
return false;
}
- DEBUG(dbgs() << " - Scavenged register: " << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << " - Scavenged register: " << printReg(Reg, TRI) << "\n");
std::map<unsigned, unsigned> Substs;
for (MachineInstr &I : *G) {
@@ -586,11 +590,11 @@ bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C,
assert(Substs.size() == 0 && "No substitutions should be left active!");
if (G->getKill()) {
- DEBUG(dbgs() << " - Kill instruction seen.\n");
+ LLVM_DEBUG(dbgs() << " - Kill instruction seen.\n");
} else {
// We didn't have a kill instruction, but we didn't seem to need to change
// the destination register anyway.
- DEBUG(dbgs() << " - Destination register not changed.\n");
+ LLVM_DEBUG(dbgs() << " - Destination register not changed.\n");
}
return Changed;
}
@@ -611,8 +615,8 @@ void AArch64A57FPLoadBalancing::scanInstruction(
// unit.
unsigned DestReg = MI->getOperand(0).getReg();
- DEBUG(dbgs() << "New chain started for register " << printReg(DestReg, TRI)
- << " at " << *MI);
+ LLVM_DEBUG(dbgs() << "New chain started for register "
+ << printReg(DestReg, TRI) << " at " << *MI);
auto G = llvm::make_unique<Chain>(MI, Idx, getColor(DestReg));
ActiveChains[DestReg] = G.get();
@@ -631,8 +635,8 @@ void AArch64A57FPLoadBalancing::scanInstruction(
maybeKillChain(MI->getOperand(0), Idx, ActiveChains);
if (ActiveChains.find(AccumReg) != ActiveChains.end()) {
- DEBUG(dbgs() << "Chain found for accumulator register "
- << printReg(AccumReg, TRI) << " in MI " << *MI);
+ LLVM_DEBUG(dbgs() << "Chain found for accumulator register "
+ << printReg(AccumReg, TRI) << " in MI " << *MI);
// For simplicity we only chain together sequences of MULs/MLAs where the
// accumulator register is killed on each instruction. This means we don't
@@ -641,7 +645,7 @@ void AArch64A57FPLoadBalancing::scanInstruction(
// FIXME: We could extend to handle the non-kill cases for more coverage.
if (MI->getOperand(3).isKill()) {
// Add to chain.
- DEBUG(dbgs() << "Instruction was successfully added to chain.\n");
+ LLVM_DEBUG(dbgs() << "Instruction was successfully added to chain.\n");
ActiveChains[AccumReg]->add(MI, Idx, getColor(DestReg));
// Handle cases where the destination is not the same as the accumulator.
if (DestReg != AccumReg) {
@@ -651,13 +655,14 @@ void AArch64A57FPLoadBalancing::scanInstruction(
return;
}
- DEBUG(dbgs() << "Cannot add to chain because accumulator operand wasn't "
- << "marked <kill>!\n");
+ LLVM_DEBUG(
+ dbgs() << "Cannot add to chain because accumulator operand wasn't "
+ << "marked <kill>!\n");
maybeKillChain(MI->getOperand(3), Idx, ActiveChains);
}
- DEBUG(dbgs() << "Creating new chain for dest register "
- << printReg(DestReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Creating new chain for dest register "
+ << printReg(DestReg, TRI) << "\n");
auto G = llvm::make_unique<Chain>(MI, Idx, getColor(DestReg));
ActiveChains[DestReg] = G.get();
AllChains.push_back(std::move(G));
@@ -685,8 +690,8 @@ maybeKillChain(MachineOperand &MO, unsigned Idx,
// If this is a KILL of a current chain, record it.
if (MO.isKill() && ActiveChains.find(MO.getReg()) != ActiveChains.end()) {
- DEBUG(dbgs() << "Kill seen for chain " << printReg(MO.getReg(), TRI)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Kill seen for chain " << printReg(MO.getReg(), TRI)
+ << "\n");
ActiveChains[MO.getReg()]->setKill(MI, Idx, /*Immutable=*/MO.isTied());
}
ActiveChains.erase(MO.getReg());
@@ -696,8 +701,8 @@ maybeKillChain(MachineOperand &MO, unsigned Idx,
for (auto I = ActiveChains.begin(), E = ActiveChains.end();
I != E;) {
if (MO.clobbersPhysReg(I->first)) {
- DEBUG(dbgs() << "Kill (regmask) seen for chain "
- << printReg(I->first, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Kill (regmask) seen for chain "
+ << printReg(I->first, TRI) << "\n");
I->second->setKill(MI, Idx, /*Immutable=*/true);
ActiveChains.erase(I++);
} else
diff --git a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 338daecb49e5..22b0c1e3b471 100644
--- a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -277,7 +277,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
TII->get(AArch64::COPY), Dst)
.addReg(Src, getKillRegState(IsKill));
- DEBUG(dbgs() << " adding copy: " << *MIB);
+ LLVM_DEBUG(dbgs() << " adding copy: " << *MIB);
++NumCopiesInserted;
return MIB;
}
@@ -286,7 +286,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
- DEBUG(dbgs() << "Scalar transform: " << MI);
+ LLVM_DEBUG(dbgs() << "Scalar transform: " << MI);
MachineBasicBlock *MBB = MI.getParent();
unsigned OldOpc = MI.getOpcode();
@@ -391,7 +391,7 @@ bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
// runOnMachineFunction - Pass entry point from PassManager.
bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
bool Changed = false;
- DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
+ LLVM_DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
if (skipFunction(mf.getFunction()))
return false;
diff --git a/lib/Target/AArch64/AArch64CollectLOH.cpp b/lib/Target/AArch64/AArch64CollectLOH.cpp
index 0a9167edcdb3..720323f81d29 100644
--- a/lib/Target/AArch64/AArch64CollectLOH.cpp
+++ b/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -380,8 +380,8 @@ static bool handleMiddleInst(const MachineInstr &MI, LOHInfo &DefInfo,
static void handleADRP(const MachineInstr &MI, AArch64FunctionInfo &AFI,
LOHInfo &Info) {
if (Info.LastADRP != nullptr) {
- DEBUG(dbgs() << "Adding MCLOH_AdrpAdrp:\n" << '\t' << MI << '\t'
- << *Info.LastADRP);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAdrp:\n"
+ << '\t' << MI << '\t' << *Info.LastADRP);
AFI.addLOHDirective(MCLOH_AdrpAdrp, {&MI, Info.LastADRP});
++NumADRPSimpleCandidate;
}
@@ -390,48 +390,52 @@ static void handleADRP(const MachineInstr &MI, AArch64FunctionInfo &AFI,
if (Info.IsCandidate) {
switch (Info.Type) {
case MCLOH_AdrpAdd:
- DEBUG(dbgs() << "Adding MCLOH_AdrpAdd:\n" << '\t' << MI << '\t'
- << *Info.MI0);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAdd:\n"
+ << '\t' << MI << '\t' << *Info.MI0);
AFI.addLOHDirective(MCLOH_AdrpAdd, {&MI, Info.MI0});
++NumADRSimpleCandidate;
break;
case MCLOH_AdrpLdr:
if (supportLoadFromLiteral(*Info.MI0)) {
- DEBUG(dbgs() << "Adding MCLOH_AdrpLdr:\n" << '\t' << MI << '\t'
- << *Info.MI0);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdr:\n"
+ << '\t' << MI << '\t' << *Info.MI0);
AFI.addLOHDirective(MCLOH_AdrpLdr, {&MI, Info.MI0});
++NumADRPToLDR;
}
break;
case MCLOH_AdrpAddLdr:
- DEBUG(dbgs() << "Adding MCLOH_AdrpAddLdr:\n" << '\t' << MI << '\t'
- << *Info.MI1 << '\t' << *Info.MI0);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAddLdr:\n"
+ << '\t' << MI << '\t' << *Info.MI1 << '\t'
+ << *Info.MI0);
AFI.addLOHDirective(MCLOH_AdrpAddLdr, {&MI, Info.MI1, Info.MI0});
++NumADDToLDR;
break;
case MCLOH_AdrpAddStr:
if (Info.MI1 != nullptr) {
- DEBUG(dbgs() << "Adding MCLOH_AdrpAddStr:\n" << '\t' << MI << '\t'
- << *Info.MI1 << '\t' << *Info.MI0);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpAddStr:\n"
+ << '\t' << MI << '\t' << *Info.MI1 << '\t'
+ << *Info.MI0);
AFI.addLOHDirective(MCLOH_AdrpAddStr, {&MI, Info.MI1, Info.MI0});
++NumADDToSTR;
}
break;
case MCLOH_AdrpLdrGotLdr:
- DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotLdr:\n" << '\t' << MI << '\t'
- << *Info.MI1 << '\t' << *Info.MI0);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotLdr:\n"
+ << '\t' << MI << '\t' << *Info.MI1 << '\t'
+ << *Info.MI0);
AFI.addLOHDirective(MCLOH_AdrpLdrGotLdr, {&MI, Info.MI1, Info.MI0});
++NumLDRToLDR;
break;
case MCLOH_AdrpLdrGotStr:
- DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotStr:\n" << '\t' << MI << '\t'
- << *Info.MI1 << '\t' << *Info.MI0);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGotStr:\n"
+ << '\t' << MI << '\t' << *Info.MI1 << '\t'
+ << *Info.MI0);
AFI.addLOHDirective(MCLOH_AdrpLdrGotStr, {&MI, Info.MI1, Info.MI0});
++NumLDRToSTR;
break;
case MCLOH_AdrpLdrGot:
- DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGot:\n" << '\t' << MI << '\t'
- << *Info.MI0);
+ LLVM_DEBUG(dbgs() << "Adding MCLOH_AdrpLdrGot:\n"
+ << '\t' << MI << '\t' << *Info.MI0);
AFI.addLOHDirective(MCLOH_AdrpLdrGot, {&MI, Info.MI0});
break;
case MCLOH_AdrpAdrp:
@@ -485,8 +489,8 @@ bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
- DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n"
- << "Looking in function " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n"
+ << "Looking in function " << MF.getName() << '\n');
LOHInfo LOHInfos[N_GPR_REGS];
AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
diff --git a/lib/Target/AArch64/AArch64CondBrTuning.cpp b/lib/Target/AArch64/AArch64CondBrTuning.cpp
index 30cefbad884c..5ae787409ae8 100644
--- a/lib/Target/AArch64/AArch64CondBrTuning.cpp
+++ b/lib/Target/AArch64/AArch64CondBrTuning.cpp
@@ -201,10 +201,10 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
I->readsRegister(AArch64::NZCV, TRI))
return false;
}
- DEBUG(dbgs() << " Replacing instructions:\n ");
- DEBUG(DefMI.print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG(MI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << " Replacing instructions:\n ");
+ LLVM_DEBUG(DefMI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG(MI.print(dbgs()));
NewCmp = convertToFlagSetting(DefMI, IsFlagSetting);
NewBr = convertToCondBr(MI);
@@ -260,10 +260,10 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
I->readsRegister(AArch64::NZCV, TRI))
return false;
}
- DEBUG(dbgs() << " Replacing instructions:\n ");
- DEBUG(DefMI.print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG(MI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << " Replacing instructions:\n ");
+ LLVM_DEBUG(DefMI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG(MI.print(dbgs()));
NewCmp = convertToFlagSetting(DefMI, IsFlagSetting);
NewBr = convertToCondBr(MI);
@@ -275,10 +275,10 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI,
(void)NewCmp; (void)NewBr;
assert(NewCmp && NewBr && "Expected new instructions.");
- DEBUG(dbgs() << " with instruction:\n ");
- DEBUG(NewCmp->print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG(NewBr->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " with instruction:\n ");
+ LLVM_DEBUG(NewCmp->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG(NewBr->print(dbgs()));
// If this was a flag setting version of the instruction, we use the original
// instruction by just clearing the dead marked on the implicit-def of NCZV.
@@ -293,8 +293,9 @@ bool AArch64CondBrTuning::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
- DEBUG(dbgs() << "********** AArch64 Conditional Branch Tuning **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(
+ dbgs() << "********** AArch64 Conditional Branch Tuning **********\n"
+ << "********** Function: " << MF.getName() << '\n');
TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
TRI = MF.getSubtarget().getRegisterInfo();
diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index d14bde33d94e..5064762b9f77 100644
--- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -173,13 +173,14 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
case AArch64::ADDSXri: {
unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm());
if (!I->getOperand(2).isImm()) {
- DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n');
return nullptr;
} else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) {
- DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I
+ << '\n');
return nullptr;
} else if (!MRI->use_empty(I->getOperand(0).getReg())) {
- DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
return nullptr;
}
return &*I;
@@ -207,7 +208,8 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
return nullptr;
}
}
- DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n');
+ LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB)
+ << '\n');
return nullptr;
}
@@ -325,8 +327,8 @@ bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
}
bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
+ << "********** Function: " << MF.getName() << '\n');
if (skipFunction(MF.getFunction()))
return false;
@@ -384,15 +386,15 @@ bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm();
const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm();
- DEBUG(dbgs() << "Head branch:\n");
- DEBUG(dbgs() << "\tcondition: "
- << AArch64CC::getCondCodeName(HeadCmp) << '\n');
- DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n');
+ LLVM_DEBUG(dbgs() << "Head branch:\n");
+ LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp)
+ << '\n');
+ LLVM_DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n');
- DEBUG(dbgs() << "True branch:\n");
- DEBUG(dbgs() << "\tcondition: "
- << AArch64CC::getCondCodeName(TrueCmp) << '\n');
- DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
+ LLVM_DEBUG(dbgs() << "True branch:\n");
+ LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(TrueCmp)
+ << '\n');
+ LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) ||
(HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) &&
diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 9ff2472dbeca..8176b6fb269d 100644
--- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -311,7 +311,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
return &*I;
}
++NumCmpTermRejs;
- DEBUG(dbgs() << "Flags not used by terminator: " << *I);
+ LLVM_DEBUG(dbgs() << "Flags not used by terminator: " << *I);
return nullptr;
}
@@ -329,7 +329,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
// Check that the immediate operand is within range, ccmp wants a uimm5.
// Rd = SUBSri Rn, imm, shift
if (I->getOperand(3).getImm() || !isUInt<5>(I->getOperand(2).getImm())) {
- DEBUG(dbgs() << "Immediate out of range for ccmp: " << *I);
+ LLVM_DEBUG(dbgs() << "Immediate out of range for ccmp: " << *I);
++NumImmRangeRejs;
return nullptr;
}
@@ -340,7 +340,8 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
case AArch64::ADDSXrr:
if (isDeadDef(I->getOperand(0).getReg()))
return &*I;
- DEBUG(dbgs() << "Can't convert compare with live destination: " << *I);
+ LLVM_DEBUG(dbgs() << "Can't convert compare with live destination: "
+ << *I);
++NumLiveDstRejs;
return nullptr;
case AArch64::FCMPSrr:
@@ -358,18 +359,19 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
// The ccmp doesn't produce exactly the same flags as the original
// compare, so reject the transform if there are uses of the flags
// besides the terminators.
- DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I);
+ LLVM_DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I);
++NumMultNZCVUses;
return nullptr;
}
if (PRI.Defined || PRI.Clobbered) {
- DEBUG(dbgs() << "Not convertible compare: " << *I);
+ LLVM_DEBUG(dbgs() << "Not convertible compare: " << *I);
++NumUnknNZCVDefs;
return nullptr;
}
}
- DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n');
+ LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB)
+ << '\n');
return nullptr;
}
@@ -383,7 +385,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
// Reject any live-in physregs. It's probably NZCV/EFLAGS, and very hard to
// get right.
if (!MBB->livein_empty()) {
- DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
return false;
}
@@ -396,14 +398,14 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
continue;
if (++InstrCount > BlockInstrLimit && !Stress) {
- DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
- << BlockInstrLimit << " instructions.\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
+ << BlockInstrLimit << " instructions.\n");
return false;
}
// There shouldn't normally be any phis in a single-predecessor block.
if (I.isPHI()) {
- DEBUG(dbgs() << "Can't hoist: " << I);
+ LLVM_DEBUG(dbgs() << "Can't hoist: " << I);
return false;
}
@@ -411,20 +413,20 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
// speculate GOT or constant pool loads that are guaranteed not to trap,
// but we don't support that for now.
if (I.mayLoad()) {
- DEBUG(dbgs() << "Won't speculate load: " << I);
+ LLVM_DEBUG(dbgs() << "Won't speculate load: " << I);
return false;
}
// We never speculate stores, so an AA pointer isn't necessary.
bool DontMoveAcrossStore = true;
if (!I.isSafeToMove(nullptr, DontMoveAcrossStore)) {
- DEBUG(dbgs() << "Can't speculate: " << I);
+ LLVM_DEBUG(dbgs() << "Can't speculate: " << I);
return false;
}
// Only CmpMI is allowed to clobber the flags.
if (&I != CmpMI && I.modifiesRegister(AArch64::NZCV, TRI)) {
- DEBUG(dbgs() << "Clobbers flags: " << I);
+ LLVM_DEBUG(dbgs() << "Clobbers flags: " << I);
return false;
}
}
@@ -458,9 +460,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
return false;
// The CFG topology checks out.
- DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> "
- << printMBBReference(*CmpBB) << " -> "
- << printMBBReference(*Tail) << '\n');
+ LLVM_DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> "
+ << printMBBReference(*CmpBB) << " -> "
+ << printMBBReference(*Tail) << '\n');
++NumConsidered;
// Tail is allowed to have many predecessors, but we can't handle PHIs yet.
@@ -470,13 +472,13 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
// always be safe to sink the ccmp down to immediately before the CmpBB
// terminators.
if (!trivialTailPHIs()) {
- DEBUG(dbgs() << "Can't handle phis in Tail.\n");
+ LLVM_DEBUG(dbgs() << "Can't handle phis in Tail.\n");
++NumPhiRejs;
return false;
}
if (!Tail->livein_empty()) {
- DEBUG(dbgs() << "Can't handle live-in physregs in Tail.\n");
+ LLVM_DEBUG(dbgs() << "Can't handle live-in physregs in Tail.\n");
++NumPhysRejs;
return false;
}
@@ -484,13 +486,13 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
// CmpBB should never have PHIs since Head is its only predecessor.
// FIXME: Clean them up if it happens.
if (!CmpBB->empty() && CmpBB->front().isPHI()) {
- DEBUG(dbgs() << "Can't handle phis in CmpBB.\n");
+ LLVM_DEBUG(dbgs() << "Can't handle phis in CmpBB.\n");
++NumPhi2Rejs;
return false;
}
if (!CmpBB->livein_empty()) {
- DEBUG(dbgs() << "Can't handle live-in physregs in CmpBB.\n");
+ LLVM_DEBUG(dbgs() << "Can't handle live-in physregs in CmpBB.\n");
++NumPhysRejs;
return false;
}
@@ -499,7 +501,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
HeadCond.clear();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
if (TII->analyzeBranch(*Head, TBB, FBB, HeadCond)) {
- DEBUG(dbgs() << "Head branch not analyzable.\n");
+ LLVM_DEBUG(dbgs() << "Head branch not analyzable.\n");
++NumHeadBranchRejs;
return false;
}
@@ -507,13 +509,14 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
// This is weird, probably some sort of degenerate CFG, or an edge to a
// landing pad.
if (!TBB || HeadCond.empty()) {
- DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch in Head.\n");
+ LLVM_DEBUG(
+ dbgs() << "AnalyzeBranch didn't find conditional branch in Head.\n");
++NumHeadBranchRejs;
return false;
}
if (!parseCond(HeadCond, HeadCmpBBCC)) {
- DEBUG(dbgs() << "Unsupported branch type on Head\n");
+ LLVM_DEBUG(dbgs() << "Unsupported branch type on Head\n");
++NumHeadBranchRejs;
return false;
}
@@ -527,19 +530,20 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
CmpBBCond.clear();
TBB = FBB = nullptr;
if (TII->analyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) {
- DEBUG(dbgs() << "CmpBB branch not analyzable.\n");
+ LLVM_DEBUG(dbgs() << "CmpBB branch not analyzable.\n");
++NumCmpBranchRejs;
return false;
}
if (!TBB || CmpBBCond.empty()) {
- DEBUG(dbgs() << "AnalyzeBranch didn't find conditional branch in CmpBB.\n");
+ LLVM_DEBUG(
+ dbgs() << "AnalyzeBranch didn't find conditional branch in CmpBB.\n");
++NumCmpBranchRejs;
return false;
}
if (!parseCond(CmpBBCond, CmpBBTailCC)) {
- DEBUG(dbgs() << "Unsupported branch type on CmpBB\n");
+ LLVM_DEBUG(dbgs() << "Unsupported branch type on CmpBB\n");
++NumCmpBranchRejs;
return false;
}
@@ -547,9 +551,10 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
if (TBB != Tail)
CmpBBTailCC = AArch64CC::getInvertedCondCode(CmpBBTailCC);
- DEBUG(dbgs() << "Head->CmpBB on " << AArch64CC::getCondCodeName(HeadCmpBBCC)
- << ", CmpBB->Tail on " << AArch64CC::getCondCodeName(CmpBBTailCC)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Head->CmpBB on "
+ << AArch64CC::getCondCodeName(HeadCmpBBCC)
+ << ", CmpBB->Tail on "
+ << AArch64CC::getCondCodeName(CmpBBTailCC) << '\n');
CmpMI = findConvertibleCompare(CmpBB);
if (!CmpMI)
@@ -563,9 +568,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
}
void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
- DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into "
- << printMBBReference(*Head) << ":\n"
- << *CmpBB);
+ LLVM_DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into "
+ << printMBBReference(*Head) << ":\n"
+ << *CmpBB);
// All CmpBB instructions are moved into Head, and CmpBB is deleted.
// Update the CFG first.
@@ -710,7 +715,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
RemovedBlocks.push_back(CmpBB);
CmpBB->eraseFromParent();
- DEBUG(dbgs() << "Result:\n" << *Head);
+ LLVM_DEBUG(dbgs() << "Result:\n" << *Head);
++NumConverted;
}
@@ -860,13 +865,13 @@ bool AArch64ConditionalCompares::shouldConvert() {
// If code size is the main concern
if (MinSize) {
int CodeSizeDelta = CmpConv.expectedCodeSizeDelta();
- DEBUG(dbgs() << "Code size delta: " << CodeSizeDelta << '\n');
+ LLVM_DEBUG(dbgs() << "Code size delta: " << CodeSizeDelta << '\n');
// If we are minimizing the code size, do the conversion whatever
// the cost is.
if (CodeSizeDelta < 0)
return true;
if (CodeSizeDelta > 0) {
- DEBUG(dbgs() << "Code size is increasing, give up on this one.\n");
+ LLVM_DEBUG(dbgs() << "Code size is increasing, give up on this one.\n");
return false;
}
// CodeSizeDelta == 0, continue with the regular heuristics
@@ -885,24 +890,24 @@ bool AArch64ConditionalCompares::shouldConvert() {
Trace.getInstrCycles(*CmpConv.Head->getFirstTerminator()).Depth;
unsigned CmpBBDepth =
Trace.getInstrCycles(*CmpConv.CmpBB->getFirstTerminator()).Depth;
- DEBUG(dbgs() << "Head depth: " << HeadDepth
- << "\nCmpBB depth: " << CmpBBDepth << '\n');
+ LLVM_DEBUG(dbgs() << "Head depth: " << HeadDepth
+ << "\nCmpBB depth: " << CmpBBDepth << '\n');
if (CmpBBDepth > HeadDepth + DelayLimit) {
- DEBUG(dbgs() << "Branch delay would be larger than " << DelayLimit
- << " cycles.\n");
+ LLVM_DEBUG(dbgs() << "Branch delay would be larger than " << DelayLimit
+ << " cycles.\n");
return false;
}
// Check the resource depth at the bottom of CmpBB - these instructions will
// be speculated.
unsigned ResDepth = Trace.getResourceDepth(true);
- DEBUG(dbgs() << "Resources: " << ResDepth << '\n');
+ LLVM_DEBUG(dbgs() << "Resources: " << ResDepth << '\n');
// Heuristic: The speculatively executed instructions must all be able to
// merge into the Head block. The Head critical path should dominate the
// resource cost of the speculated instructions.
if (ResDepth > HeadDepth) {
- DEBUG(dbgs() << "Too many instructions to speculate.\n");
+ LLVM_DEBUG(dbgs() << "Too many instructions to speculate.\n");
return false;
}
return true;
@@ -922,8 +927,8 @@ bool AArch64ConditionalCompares::tryConvert(MachineBasicBlock *MBB) {
}
bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
- << "********** Function: " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
+ << "********** Function: " << MF.getName() << '\n');
if (skipFunction(MF.getFunction()))
return false;
diff --git a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 8e7e740da6f6..2ba10d25e939 100644
--- a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -136,18 +136,21 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
// We need to skip this instruction because while it appears to have a
// dead def it uses a frame index which might expand into a multi
// instruction sequence during EPI.
- DEBUG(dbgs() << " Ignoring, operand is frame index\n");
+ LLVM_DEBUG(dbgs() << " Ignoring, operand is frame index\n");
continue;
}
if (MI.definesRegister(AArch64::XZR) || MI.definesRegister(AArch64::WZR)) {
// It is not allowed to write to the same register (not even the zero
// register) twice in a single instruction.
- DEBUG(dbgs() << " Ignoring, XZR or WZR already used by the instruction\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Ignoring, XZR or WZR already used by the instruction\n");
continue;
}
if (shouldSkip(MI, MF)) {
- DEBUG(dbgs() << " Ignoring, Atomic instruction with acquire semantics using WZR/XZR\n");
+ LLVM_DEBUG(dbgs() << " Ignoring, Atomic instruction with acquire "
+ "semantics using WZR/XZR\n");
continue;
}
@@ -163,30 +166,30 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
(!MO.isDead() && !MRI->use_nodbg_empty(Reg)))
continue;
assert(!MO.isImplicit() && "Unexpected implicit def!");
- DEBUG(dbgs() << " Dead def operand #" << I << " in:\n ";
- MI.print(dbgs()));
+ LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n ";
+ MI.print(dbgs()));
// Be careful not to change the register if it's a tied operand.
if (MI.isRegTiedToUseOperand(I)) {
- DEBUG(dbgs() << " Ignoring, def is tied operand.\n");
+ LLVM_DEBUG(dbgs() << " Ignoring, def is tied operand.\n");
continue;
}
const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI, MF);
unsigned NewReg;
if (RC == nullptr) {
- DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
+ LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
continue;
} else if (RC->contains(AArch64::WZR))
NewReg = AArch64::WZR;
else if (RC->contains(AArch64::XZR))
NewReg = AArch64::XZR;
else {
- DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
+ LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
continue;
}
- DEBUG(dbgs() << " Replacing with zero register. New:\n ");
+ LLVM_DEBUG(dbgs() << " Replacing with zero register. New:\n ");
MO.setReg(NewReg);
MO.setIsDead();
- DEBUG(MI.print(dbgs()));
+ LLVM_DEBUG(MI.print(dbgs()));
++NumDeadDefsReplaced;
Changed = true;
// Only replace one dead register, see check for zero register above.
@@ -204,7 +207,7 @@ bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo();
MRI = &MF.getRegInfo();
- DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n");
+ LLVM_DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n");
Changed = false;
for (auto &MBB : MF)
processMachineBasicBlock(MBB);
diff --git a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
index 4be44ecb52d9..bc9a5ca97fea 100644
--- a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
+++ b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
@@ -169,7 +169,7 @@ bool FalkorMarkStridedAccesses::runOnLoop(Loop &L) {
LoadI->setMetadata(FALKOR_STRIDED_ACCESS_MD,
MDNode::get(LoadI->getContext(), {}));
++NumStridedLoadsMarked;
- DEBUG(dbgs() << "Load: " << I << " marked as strided\n");
+ LLVM_DEBUG(dbgs() << "Load: " << I << " marked as strided\n");
MadeChange = true;
}
}
@@ -731,10 +731,10 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) {
continue;
bool Fixed = false;
- DEBUG(dbgs() << "Attempting to fix tag collision: " << MI);
+ LLVM_DEBUG(dbgs() << "Attempting to fix tag collision: " << MI);
if (!DebugCounter::shouldExecute(FixCounter)) {
- DEBUG(dbgs() << "Skipping fix due to debug counter:\n " << MI);
+ LLVM_DEBUG(dbgs() << "Skipping fix due to debug counter:\n " << MI);
continue;
}
@@ -759,8 +759,8 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) {
if (TagMap.count(NewTag))
continue;
- DEBUG(dbgs() << "Changing base reg to: " << printReg(ScratchReg, TRI)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Changing base reg to: "
+ << printReg(ScratchReg, TRI) << '\n');
// Rewrite:
// Xd = LOAD Xb, off
@@ -778,8 +778,8 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) {
// If the load does a pre/post increment, then insert a MOV after as
// well to update the real base register.
if (LdI.IsPrePost) {
- DEBUG(dbgs() << "Doing post MOV of incremented reg: "
- << printReg(ScratchReg, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Doing post MOV of incremented reg: "
+ << printReg(ScratchReg, TRI) << '\n');
MI.getOperand(0).setReg(
ScratchReg); // Change tied operand pre/post update dest.
BuildMI(*MBB, std::next(MachineBasicBlock::iterator(MI)), DL,
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 2523cfdfb533..6dc5d19862a9 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1287,13 +1287,11 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
else
StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
- DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
- if (RPI.isPaired())
- dbgs() << ", " << printReg(Reg2, TRI);
- dbgs() << ") -> fi#(" << RPI.FrameIdx;
- if (RPI.isPaired())
- dbgs() << ", " << RPI.FrameIdx+1;
- dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
+ if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI);
+ dbgs() << ") -> fi#(" << RPI.FrameIdx;
+ if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1;
+ dbgs() << ")\n");
MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
if (!MRI.isReserved(Reg1))
@@ -1350,13 +1348,11 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
else
LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
- DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
- if (RPI.isPaired())
- dbgs() << ", " << printReg(Reg2, TRI);
- dbgs() << ") -> fi#(" << RPI.FrameIdx;
- if (RPI.isPaired())
- dbgs() << ", " << RPI.FrameIdx+1;
- dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
+ if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI);
+ dbgs() << ") -> fi#(" << RPI.FrameIdx;
+ if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1;
+ dbgs() << ")\n");
MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
if (RPI.isPaired()) {
@@ -1465,10 +1461,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
}
}
- DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:";
- for (unsigned Reg : SavedRegs.set_bits())
- dbgs() << ' ' << printReg(Reg, RegInfo);
- dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:";
+ for (unsigned Reg
+ : SavedRegs.set_bits()) dbgs()
+ << ' ' << printReg(Reg, RegInfo);
+ dbgs() << "\n";);
// If any callee-saved registers are used, the frame cannot be eliminated.
unsigned NumRegsSpilled = SavedRegs.count();
@@ -1477,7 +1474,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// The CSR spill slots have not been allocated yet, so estimateStackSize
// won't include them.
unsigned CFSize = MFI.estimateStackSize(MF) + 8 * NumRegsSpilled;
- DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
+ LLVM_DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF);
bool BigStack = (CFSize > EstimatedStackSizeLimit);
if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
@@ -1491,8 +1488,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// here.
if (BigStack) {
if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
- DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo)
- << " to get a scratch register.\n");
+ LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo)
+ << " to get a scratch register.\n");
SavedRegs.set(UnspilledCSGPR);
// MachO's compact unwind format relies on all registers being stored in
// pairs, so if we need to spill one extra for BigStack, then we need to
@@ -1512,8 +1509,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
unsigned Align = TRI->getSpillAlignment(RC);
int FI = MFI.CreateStackObject(Size, Align, false);
RS->addScavengingFrameIndex(FI);
- DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
- << " as the emergency spill slot.\n");
+ LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
+ << " as the emergency spill slot.\n");
}
}
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 3124204fc597..fd040313179d 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -1553,8 +1553,9 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
// Bail out on large immediates. This happens when no proper
// combining/constant folding was performed.
if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
- DEBUG((dbgs() << N
- << ": Found large shift immediate, this should not happen\n"));
+ LLVM_DEBUG(
+ (dbgs() << N
+ << ": Found large shift immediate, this should not happen\n"));
return false;
}
@@ -1696,8 +1697,9 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
// Missing combines/constant folding may have left us with strange
// constants.
if (ShlImm >= VT.getSizeInBits()) {
- DEBUG((dbgs() << N
- << ": Found large shift immediate, this should not happen\n"));
+ LLVM_DEBUG(
+ (dbgs() << N
+ << ": Found large shift immediate, this should not happen\n"));
return false;
}
@@ -2657,7 +2659,7 @@ bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
void AArch64DAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
return;
}
@@ -2754,9 +2756,9 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
}
SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
Node->getOperand(0));
- DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
- DEBUG(Extract->dumpr(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
+ LLVM_DEBUG(Extract->dumpr(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
ReplaceNode(Node, Extract.getNode());
return;
}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 8dc13c6fb244..51063ee7fb4d 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1442,7 +1442,8 @@ static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
static bool isLegalArithImmed(uint64_t C) {
// Matches AArch64DAGToDAGISel::SelectArithImmed().
bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
- DEBUG(dbgs() << "Is imm " << C << " legal: " << (IsLegal ? "yes\n" : "no\n"));
+ LLVM_DEBUG(dbgs() << "Is imm " << C
+ << " legal: " << (IsLegal ? "yes\n" : "no\n"));
return IsLegal;
}
@@ -2644,8 +2645,8 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
- DEBUG(dbgs() << "Custom lowering: ");
- DEBUG(Op.dump());
+ LLVM_DEBUG(dbgs() << "Custom lowering: ");
+ LLVM_DEBUG(Op.dump());
switch (Op.getOpcode()) {
default:
@@ -3774,7 +3775,7 @@ SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
template <class NodeTy>
SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
unsigned Flags) const {
- DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
+ LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
SDLoc DL(N);
EVT Ty = getPointerTy(DAG.getDataLayout());
SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags);
@@ -3787,7 +3788,7 @@ SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
template <class NodeTy>
SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
unsigned Flags) const {
- DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
+ LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
SDLoc DL(N);
EVT Ty = getPointerTy(DAG.getDataLayout());
const unsigned char MO_NC = AArch64II::MO_NC;
@@ -3803,7 +3804,7 @@ SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
template <class NodeTy>
SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
unsigned Flags) const {
- DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
+ LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
SDLoc DL(N);
EVT Ty = getPointerTy(DAG.getDataLayout());
SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags);
@@ -5073,7 +5074,8 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
// FIXME: We should be able to handle f128 as well with a clever lowering.
if (Imm.isPosZero() && (VT == MVT::f64 || VT == MVT::f32 ||
(VT == MVT::f16 && Subtarget->hasFullFP16()))) {
- DEBUG(dbgs() << "Legal fp imm: materialize 0 using the zero register\n");
+ LLVM_DEBUG(
+ dbgs() << "Legal fp imm: materialize 0 using the zero register\n");
return true;
}
@@ -5094,14 +5096,17 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
}
if (IsLegal) {
- DEBUG(dbgs() << "Legal " << FPType << " imm value: " << ImmStrVal << "\n");
+ LLVM_DEBUG(dbgs() << "Legal " << FPType << " imm value: " << ImmStrVal
+ << "\n");
return true;
}
if (!FPType.empty())
- DEBUG(dbgs() << "Illegal " << FPType << " imm value: " << ImmStrVal << "\n");
+ LLVM_DEBUG(dbgs() << "Illegal " << FPType << " imm value: " << ImmStrVal
+ << "\n");
else
- DEBUG(dbgs() << "Illegal fp imm " << ImmStrVal << ": unsupported fp type\n");
+ LLVM_DEBUG(dbgs() << "Illegal fp imm " << ImmStrVal
+ << ": unsupported fp type\n");
return false;
}
@@ -5540,7 +5545,7 @@ static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
SelectionDAG &DAG) const {
assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
- DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
+ LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
SDLoc dl(Op);
EVT VT = Op.getValueType();
unsigned NumElts = VT.getVectorNumElements();
@@ -5576,10 +5581,11 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
continue;
else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(V.getOperand(1))) {
- DEBUG(dbgs() << "Reshuffle failed: "
- "a shuffle can only come from building a vector from "
- "various elements of other vectors, provided their "
- "indices are constant\n");
+ LLVM_DEBUG(
+ dbgs() << "Reshuffle failed: "
+ "a shuffle can only come from building a vector from "
+ "various elements of other vectors, provided their "
+ "indices are constant\n");
return SDValue();
}
@@ -5596,8 +5602,9 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
}
if (Sources.size() > 2) {
- DEBUG(dbgs() << "Reshuffle failed: currently only do something sane when at "
- "most two source vectors are involved\n");
+ LLVM_DEBUG(
+ dbgs() << "Reshuffle failed: currently only do something sane when at "
+ "most two source vectors are involved\n");
return SDValue();
}
@@ -5643,7 +5650,8 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
assert(SrcVT.getSizeInBits() == 2 * VT.getSizeInBits());
if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
- DEBUG(dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n");
+ LLVM_DEBUG(
+ dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n");
return SDValue();
}
@@ -5689,10 +5697,9 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
}
// Final sanity check before we try to actually produce a shuffle.
- DEBUG(
- for (auto Src : Sources)
- assert(Src.ShuffleVec.getValueType() == ShuffleVT);
- );
+ LLVM_DEBUG(for (auto Src
+ : Sources)
+ assert(Src.ShuffleVec.getValueType() == ShuffleVT););
// The stars all align, our next step is to produce the mask for the shuffle.
SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
@@ -5725,7 +5732,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
// Final check before we try to produce nonsense...
if (!isShuffleMaskLegal(Mask, ShuffleVT)) {
- DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n");
+ LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n");
return SDValue();
}
@@ -5737,12 +5744,8 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
ShuffleOps[1], Mask);
SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
- DEBUG(
- dbgs() << "Reshuffle, creating node: ";
- Shuffle.dump();
- dbgs() << "Reshuffle, creating node: ";
- V.dump();
- );
+ LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();
+ dbgs() << "Reshuffle, creating node: "; V.dump(););
return V;
}
@@ -6699,10 +6702,10 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(Intrin, DL, MVT::i32), X, Y,
Shift.getOperand(1));
- DEBUG(dbgs() << "aarch64-lower: transformed: \n");
- DEBUG(N->dump(&DAG));
- DEBUG(dbgs() << "into: \n");
- DEBUG(ResultSLI->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n");
+ LLVM_DEBUG(N->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "into: \n");
+ LLVM_DEBUG(ResultSLI->dump(&DAG));
++NumShiftInserts;
return ResultSLI;
@@ -6889,13 +6892,14 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
}
if (!Value.getNode()) {
- DEBUG(dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n");
+ LLVM_DEBUG(
+ dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n");
return DAG.getUNDEF(VT);
}
if (isOnlyLowElement) {
- DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "
- "SCALAR_TO_VECTOR node\n");
+ LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "
+ "SCALAR_TO_VECTOR node\n");
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
}
@@ -6966,7 +6970,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (!isConstant) {
if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Value.getValueType() != VT) {
- DEBUG(dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n");
+ LLVM_DEBUG(
+ dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n");
return DAG.getNode(AArch64ISD::DUP, dl, VT, Value);
}
@@ -6975,8 +6980,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
SDValue Lane = Value.getOperand(1);
Value = Value.getOperand(0);
if (Value.getValueSizeInBits() == 64) {
- DEBUG(dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
- "widening it\n");
+ LLVM_DEBUG(
+ dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
+ "widening it\n");
Value = WidenVector(Value, DAG);
}
@@ -6989,17 +6995,16 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
EVT EltTy = VT.getVectorElementType();
assert ((EltTy == MVT::f16 || EltTy == MVT::f32 || EltTy == MVT::f64) &&
"Unsupported floating-point vector type");
- DEBUG(dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "
- "BITCASTS, and try again\n");
+ LLVM_DEBUG(
+ dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "
+ "BITCASTS, and try again\n");
MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits());
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
- DEBUG(
- dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";
- Val.dump();
- );
+ LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";
+ Val.dump(););
Val = LowerBUILD_VECTOR(Val, DAG);
if (Val.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
@@ -7034,8 +7039,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
// This will generate a load from the constant pool.
if (isConstant) {
- DEBUG(dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "
- "expansion\n");
+ LLVM_DEBUG(
+ dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "
+ "expansion\n");
return SDValue();
}
@@ -7052,8 +7058,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
// shuffle is valid for the target) and materialization element by element
// on the stack followed by a load for everything else.
if (!isConstant && !usesOnlyOneValue) {
- DEBUG(dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "
- "of INSERT_VECTOR_ELT\n");
+ LLVM_DEBUG(
+ dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "
+ "of INSERT_VECTOR_ELT\n");
SDValue Vec = DAG.getUNDEF(VT);
SDValue Op0 = Op.getOperand(0);
@@ -7070,14 +7077,12 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
// extended (i32) and it is safe to cast them to the vector type by ignoring
// the upper bits of the lowest lane (e.g. v8i8, v4i16).
if (!Op0.isUndef()) {
- DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
+ LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
++i;
}
- DEBUG(
- if (i < NumElts)
- dbgs() << "Creating nodes for the other vector elements:\n";
- );
+ LLVM_DEBUG(if (i < NumElts) dbgs()
+ << "Creating nodes for the other vector elements:\n";);
for (; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
if (V.isUndef())
@@ -7088,8 +7093,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
return Vec;
}
- DEBUG(dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "
- "better alternative\n");
+ LLVM_DEBUG(
+ dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "
+ "better alternative\n");
return SDValue();
}
@@ -8216,15 +8222,16 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
// 12-bit optionally shifted immediates are legal for adds.
bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
if (Immed == std::numeric_limits<int64_t>::min()) {
- DEBUG(dbgs() << "Illegal add imm " << Immed << ": avoid UB for INT64_MIN\n");
+ LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed
+ << ": avoid UB for INT64_MIN\n");
return false;
}
// Same encoding for add/sub, just flip the sign.
Immed = std::abs(Immed);
bool IsLegal = ((Immed >> 12) == 0 ||
((Immed & 0xfff) == 0 && Immed >> 24 == 0));
- DEBUG(dbgs() << "Is " << Immed << " legal add imm: " <<
- (IsLegal ? "yes" : "no") << "\n");
+ LLVM_DEBUG(dbgs() << "Is " << Immed
+ << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n");
return IsLegal;
}
@@ -9028,7 +9035,8 @@ static SDValue performBitcastCombine(SDNode *N,
SVT.getVectorNumElements() != VT.getVectorNumElements() * 2)
return SDValue();
- DEBUG(dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n");
+ LLVM_DEBUG(
+ dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n");
// Create the simplified form to just extract the low or high half of the
// vector directly rather than bothering with the bitcasts.
@@ -9116,7 +9124,8 @@ static SDValue performConcatVectorsCombine(SDNode *N,
if (!RHSTy.isVector())
return SDValue();
- DEBUG(dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
+ LLVM_DEBUG(
+ dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(),
RHSTy.getVectorNumElements() * 2);
@@ -10758,7 +10767,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default:
- DEBUG(dbgs() << "Custom combining: skipping\n");
+ LLVM_DEBUG(dbgs() << "Custom combining: skipping\n");
break;
case ISD::ADD:
case ISD::SUB:
diff --git a/lib/Target/AArch64/AArch64InstructionSelector.cpp b/lib/Target/AArch64/AArch64InstructionSelector.cpp
index c7ece250aa87..4d7ca2349ed1 100644
--- a/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -174,7 +174,7 @@ static bool unsupportedBinOp(const MachineInstr &I,
const AArch64RegisterInfo &TRI) {
LLT Ty = MRI.getType(I.getOperand(0).getReg());
if (!Ty.isValid()) {
- DEBUG(dbgs() << "Generic binop register should be typed\n");
+ LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
return true;
}
@@ -182,7 +182,7 @@ static bool unsupportedBinOp(const MachineInstr &I,
for (auto &MO : I.operands()) {
// FIXME: Support non-register operands.
if (!MO.isReg()) {
- DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
+ LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
return true;
}
@@ -191,18 +191,18 @@ static bool unsupportedBinOp(const MachineInstr &I,
// bank out of the minimal class for the register.
// Either way, this needs to be documented (and possibly verified).
if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
- DEBUG(dbgs() << "Generic inst has physical register operand\n");
+ LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
return true;
}
const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
if (!OpBank) {
- DEBUG(dbgs() << "Generic register has no bank or class\n");
+ LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
return true;
}
if (PrevOpBank && OpBank != PrevOpBank) {
- DEBUG(dbgs() << "Generic inst operands have different banks\n");
+ LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
return true;
}
PrevOpBank = OpBank;
@@ -378,7 +378,7 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
const TargetRegisterClass *RC = getRegClassForTypeOnBank(
MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true);
if (!RC) {
- DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
+ LLVM_DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
return false;
}
@@ -412,8 +412,8 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
// we hit another of its use or its defs.
// Copies do not have constraints.
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
I.setDesc(TII.get(AArch64::COPY));
@@ -686,13 +686,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
if (!DefRC) {
if (!DefTy.isValid()) {
- DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
+ LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
return false;
}
const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
if (!DefRC) {
- DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
+ LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
return false;
}
}
@@ -710,7 +710,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
if (I.getNumOperands() != I.getNumExplicitOperands()) {
- DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
+ LLVM_DEBUG(
+ dbgs() << "Generic instruction has unexpected implicit operands\n");
return false;
}
@@ -726,8 +727,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
// We shouldn't need this on AArch64, but it would be implemented as an
// EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
// bit being tested is < 32.
- DEBUG(dbgs() << "G_BRCOND has type: " << Ty
- << ", expected at most 32-bits");
+ LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty
+ << ", expected at most 32-bits");
return false;
}
@@ -767,15 +768,16 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
// FIXME: Redundant check, but even less readable when factored out.
if (isFP) {
if (Ty != s32 && Ty != s64) {
- DEBUG(dbgs() << "Unable to materialize FP " << Ty
- << " constant, expected: " << s32 << " or " << s64
- << '\n');
+ LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
+ << " constant, expected: " << s32 << " or " << s64
+ << '\n');
return false;
}
if (RB.getID() != AArch64::FPRRegBankID) {
- DEBUG(dbgs() << "Unable to materialize FP " << Ty
- << " constant on bank: " << RB << ", expected: FPR\n");
+ LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
+ << " constant on bank: " << RB
+ << ", expected: FPR\n");
return false;
}
@@ -786,15 +788,16 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
} else {
// s32 and s64 are covered by tablegen.
if (Ty != p0) {
- DEBUG(dbgs() << "Unable to materialize integer " << Ty
- << " constant, expected: " << s32 << ", " << s64 << ", or "
- << p0 << '\n');
+ LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
+ << " constant, expected: " << s32 << ", " << s64
+ << ", or " << p0 << '\n');
return false;
}
if (RB.getID() != AArch64::GPRRegBankID) {
- DEBUG(dbgs() << "Unable to materialize integer " << Ty
- << " constant on bank: " << RB << ", expected: GPR\n");
+ LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
+ << " constant on bank: " << RB
+ << ", expected: GPR\n");
return false;
}
}
@@ -820,7 +823,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
.addUse(DefGPRReg);
if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
return false;
}
@@ -908,8 +911,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
case TargetOpcode::G_FRAME_INDEX: {
// allocas and G_FRAME_INDEX are only supported in addrspace(0).
if (Ty != LLT::pointer(0, 64)) {
- DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
- << ", expected: " << LLT::pointer(0, 64) << '\n');
+ LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
+ << ", expected: " << LLT::pointer(0, 64) << '\n');
return false;
}
I.setDesc(TII.get(AArch64::ADDXri));
@@ -980,14 +983,14 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
if (PtrTy != LLT::pointer(0, 64)) {
- DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
- << ", expected: " << LLT::pointer(0, 64) << '\n');
+ LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
+ << ", expected: " << LLT::pointer(0, 64) << '\n');
return false;
}
auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
- DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
return false;
}
unsigned MemSizeInBits = MemOp.getSize() * 8;
@@ -1066,13 +1069,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
if (RB.getID() != AArch64::GPRRegBankID) {
- DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
+ LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
return false;
}
if (Ty != LLT::scalar(64)) {
- DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
- << ", expected: " << LLT::scalar(64) << '\n');
+ LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
+ << ", expected: " << LLT::scalar(64) << '\n');
return false;
}
@@ -1138,7 +1141,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
if (DstRB.getID() != SrcRB.getID()) {
- DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
+ LLVM_DEBUG(
+ dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
return false;
}
@@ -1155,7 +1159,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
return false;
}
@@ -1169,7 +1173,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
SrcRC == &AArch64::GPR64RegClass) {
I.getOperand(1).setSubReg(AArch64::sub_32);
} else {
- DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
+ LLVM_DEBUG(
+ dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
return false;
}
@@ -1192,26 +1197,28 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
if (RBDst.getID() != AArch64::GPRRegBankID) {
- DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n");
+ LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
+ << ", expected: GPR\n");
return false;
}
const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
if (RBSrc.getID() != AArch64::GPRRegBankID) {
- DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n");
+ LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
+ << ", expected: GPR\n");
return false;
}
const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
if (DstSize == 0) {
- DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
+ LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
return false;
}
if (DstSize != 64 && DstSize > 32) {
- DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
- << ", expected: 32 or 64\n");
+ LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
+ << ", expected: 32 or 64\n");
return false;
}
// At this point G_ANYEXT is just like a plain COPY, but we need
@@ -1239,8 +1246,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
if (RB.getID() != AArch64::GPRRegBankID) {
- DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
- << ", expected: GPR\n");
+ LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
+ << ", expected: GPR\n");
return false;
}
@@ -1248,8 +1255,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
if (DstTy == LLT::scalar(64)) {
// FIXME: Can we avoid manually doing this?
if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
+ << " operand\n");
return false;
}
@@ -1317,8 +1324,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
case TargetOpcode::G_SELECT: {
if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
- DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
- << ", expected: " << LLT::scalar(1) << '\n');
+ LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
+ << ", expected: " << LLT::scalar(1) << '\n');
return false;
}
@@ -1356,8 +1363,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
}
case TargetOpcode::G_ICMP: {
if (Ty != LLT::scalar(32)) {
- DEBUG(dbgs() << "G_ICMP result has type: " << Ty
- << ", expected: " << LLT::scalar(32) << '\n');
+ LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
+ << ", expected: " << LLT::scalar(32) << '\n');
return false;
}
@@ -1403,8 +1410,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
case TargetOpcode::G_FCMP: {
if (Ty != LLT::scalar(32)) {
- DEBUG(dbgs() << "G_FCMP result has type: " << Ty
- << ", expected: " << LLT::scalar(32) << '\n');
+ LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty
+ << ", expected: " << LLT::scalar(32) << '\n');
return false;
}
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 14b9e6751768..4a19ecd69103 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -706,13 +706,13 @@ AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
.setMIFlags(I->mergeFlagsWith(*MergeMI));
(void)MIB;
- DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ");
- DEBUG(I->print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG(MergeMI->print(dbgs()));
- DEBUG(dbgs() << " with instruction:\n ");
- DEBUG(((MachineInstr *)MIB)->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ");
+ LLVM_DEBUG(I->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG(MergeMI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " with instruction:\n ");
+ LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
// Erase the old instructions.
I->eraseFromParent();
@@ -824,11 +824,12 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
(void)MIB;
- DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
- DEBUG(I->print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG(Paired->print(dbgs()));
- DEBUG(dbgs() << " with instruction:\n ");
+ LLVM_DEBUG(
+ dbgs() << "Creating pair load/store. Replacing instructions:\n ");
+ LLVM_DEBUG(I->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG(Paired->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " with instruction:\n ");
if (SExtIdx != -1) {
// Generate the sign extension for the proper result of the ldp.
// I.e., with X1, that would be:
@@ -842,8 +843,8 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
// Update the result of LDP to use the W instead of the X variant.
DstMO.setReg(DstRegW);
- DEBUG(((MachineInstr *)MIB)->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
// Make the machine verifier happy by providing a definition for
// the X register.
// Insert this definition right after the generated LDP, i.e., before
@@ -860,12 +861,12 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
.addImm(0)
.addImm(31);
(void)MIBSXTW;
- DEBUG(dbgs() << " Extend operand:\n ");
- DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " Extend operand:\n ");
+ LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
} else {
- DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
// Erase the old instructions.
I->eraseFromParent();
@@ -903,9 +904,9 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
break;
}
}
- DEBUG(dbgs() << "Remove load instruction:\n ");
- DEBUG(LoadI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Remove load instruction:\n ");
+ LLVM_DEBUG(LoadI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
LoadI->eraseFromParent();
return NextI;
}
@@ -979,15 +980,15 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
break;
}
- DEBUG(dbgs() << "Promoting load by replacing :\n ");
- DEBUG(StoreI->print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG(LoadI->print(dbgs()));
- DEBUG(dbgs() << " with instructions:\n ");
- DEBUG(StoreI->print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG((BitExtMI)->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Promoting load by replacing :\n ");
+ LLVM_DEBUG(StoreI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG(LoadI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " with instructions:\n ");
+ LLVM_DEBUG(StoreI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG((BitExtMI)->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
// Erase the old instructions.
LoadI->eraseFromParent();
@@ -1355,18 +1356,18 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
if (IsPreIdx) {
++NumPreFolded;
- DEBUG(dbgs() << "Creating pre-indexed load/store.");
+ LLVM_DEBUG(dbgs() << "Creating pre-indexed load/store.");
} else {
++NumPostFolded;
- DEBUG(dbgs() << "Creating post-indexed load/store.");
+ LLVM_DEBUG(dbgs() << "Creating post-indexed load/store.");
}
- DEBUG(dbgs() << " Replacing instructions:\n ");
- DEBUG(I->print(dbgs()));
- DEBUG(dbgs() << " ");
- DEBUG(Update->print(dbgs()));
- DEBUG(dbgs() << " with instruction:\n ");
- DEBUG(((MachineInstr *)MIB)->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Replacing instructions:\n ");
+ LLVM_DEBUG(I->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " ");
+ LLVM_DEBUG(Update->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " with instruction:\n ");
+ LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
// Erase the old instructions for the block.
I->eraseFromParent();
diff --git a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
index ee6703aed1e2..ccf646575296 100644
--- a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
+++ b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
@@ -164,10 +164,10 @@ bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd,
LiveIntervals &LIs = G.getMetadata().LIS;
if (TRI->isPhysicalRegister(Rd) || TRI->isPhysicalRegister(Ra)) {
- DEBUG(dbgs() << "Rd is a physical reg:" << TRI->isPhysicalRegister(Rd)
- << '\n');
- DEBUG(dbgs() << "Ra is a physical reg:" << TRI->isPhysicalRegister(Ra)
- << '\n');
+ LLVM_DEBUG(dbgs() << "Rd is a physical reg:" << TRI->isPhysicalRegister(Rd)
+ << '\n');
+ LLVM_DEBUG(dbgs() << "Ra is a physical reg:" << TRI->isPhysicalRegister(Ra)
+ << '\n');
return false;
}
@@ -247,14 +247,14 @@ void A57ChainingConstraint::addInterChainConstraint(PBQPRAGraph &G, unsigned Rd,
// Do some Chain management
if (Chains.count(Ra)) {
if (Rd != Ra) {
- DEBUG(dbgs() << "Moving acc chain from " << printReg(Ra, TRI) << " to "
- << printReg(Rd, TRI) << '\n';);
+ LLVM_DEBUG(dbgs() << "Moving acc chain from " << printReg(Ra, TRI)
+ << " to " << printReg(Rd, TRI) << '\n';);
Chains.remove(Ra);
Chains.insert(Rd);
}
} else {
- DEBUG(dbgs() << "Creating new acc chain for " << printReg(Rd, TRI)
- << '\n';);
+ LLVM_DEBUG(dbgs() << "Creating new acc chain for " << printReg(Rd, TRI)
+ << '\n';);
Chains.insert(Rd);
}
@@ -279,7 +279,7 @@ void A57ChainingConstraint::addInterChainConstraint(PBQPRAGraph &G, unsigned Rd,
assert(edge != G.invalidEdgeId() &&
"PBQP error ! The edge should exist !");
- DEBUG(dbgs() << "Refining constraint !\n";);
+ LLVM_DEBUG(dbgs() << "Refining constraint !\n";);
if (G.getEdgeNode1Id(edge) == node2) {
std::swap(node1, node2);
@@ -329,7 +329,7 @@ void A57ChainingConstraint::apply(PBQPRAGraph &G) {
LiveIntervals &LIs = G.getMetadata().LIS;
TRI = MF.getSubtarget().getRegisterInfo();
- DEBUG(MF.dump());
+ LLVM_DEBUG(MF.dump());
for (const auto &MBB: MF) {
Chains.clear(); // FIXME: really needed ? Could not work at MF level ?
@@ -340,8 +340,8 @@ void A57ChainingConstraint::apply(PBQPRAGraph &G) {
for (auto r : Chains) {
SmallVector<unsigned, 8> toDel;
if(regJustKilledBefore(LIs, r, MI)) {
- DEBUG(dbgs() << "Killing chain " << printReg(r, TRI) << " at ";
- MI.print(dbgs()););
+ LLVM_DEBUG(dbgs() << "Killing chain " << printReg(r, TRI) << " at ";
+ MI.print(dbgs()););
toDel.push_back(r);
}
diff --git a/lib/Target/AArch64/AArch64PromoteConstant.cpp b/lib/Target/AArch64/AArch64PromoteConstant.cpp
index a8dc6e74ef6a..01d8a35bbc23 100644
--- a/lib/Target/AArch64/AArch64PromoteConstant.cpp
+++ b/lib/Target/AArch64/AArch64PromoteConstant.cpp
@@ -119,7 +119,7 @@ public:
/// Iterate over the functions and promote the interesting constants into
/// global variables with module scope.
bool runOnModule(Module &M) override {
- DEBUG(dbgs() << getPassName() << '\n');
+ LLVM_DEBUG(dbgs() << getPassName() << '\n');
if (skipModule(M))
return false;
bool Changed = false;
@@ -380,9 +380,9 @@ bool AArch64PromoteConstant::isDominated(Instruction *NewPt, Instruction *User,
(IPI.first->getParent() != NewPt->getParent() &&
DT.dominates(IPI.first->getParent(), NewPt->getParent()))) {
// No need to insert this point. Just record the dominated use.
- DEBUG(dbgs() << "Insertion point dominated by:\n");
- DEBUG(IPI.first->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Insertion point dominated by:\n");
+ LLVM_DEBUG(IPI.first->print(dbgs()));
+ LLVM_DEBUG(dbgs() << '\n');
IPI.second.emplace_back(User, OpNo);
return true;
}
@@ -408,9 +408,9 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Instruction *User,
// Instructions are in the same block.
// By construction, NewPt is dominating the other.
// Indeed, isDominated returned false with the exact same arguments.
- DEBUG(dbgs() << "Merge insertion point with:\n");
- DEBUG(IPI->first->print(dbgs()));
- DEBUG(dbgs() << "\nat considered insertion point.\n");
+ LLVM_DEBUG(dbgs() << "Merge insertion point with:\n");
+ LLVM_DEBUG(IPI->first->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\nat considered insertion point.\n");
appendAndTransferDominatedUses(NewPt, User, OpNo, IPI, InsertPts);
return true;
}
@@ -430,11 +430,11 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Instruction *User,
}
// else, CommonDominator is the block of NewBB, hence NewBB is the last
// possible insertion point in that block.
- DEBUG(dbgs() << "Merge insertion point with:\n");
- DEBUG(IPI->first->print(dbgs()));
- DEBUG(dbgs() << '\n');
- DEBUG(NewPt->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Merge insertion point with:\n");
+ LLVM_DEBUG(IPI->first->print(dbgs()));
+ LLVM_DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(NewPt->print(dbgs()));
+ LLVM_DEBUG(dbgs() << '\n');
appendAndTransferDominatedUses(NewPt, User, OpNo, IPI, InsertPts);
return true;
}
@@ -443,15 +443,15 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Instruction *User,
void AArch64PromoteConstant::computeInsertionPoint(
Instruction *User, unsigned OpNo, InsertionPoints &InsertPts) {
- DEBUG(dbgs() << "Considered use, opidx " << OpNo << ":\n");
- DEBUG(User->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Considered use, opidx " << OpNo << ":\n");
+ LLVM_DEBUG(User->print(dbgs()));
+ LLVM_DEBUG(dbgs() << '\n');
Instruction *InsertionPoint = findInsertionPoint(*User, OpNo);
- DEBUG(dbgs() << "Considered insertion point:\n");
- DEBUG(InsertionPoint->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Considered insertion point:\n");
+ LLVM_DEBUG(InsertionPoint->print(dbgs()));
+ LLVM_DEBUG(dbgs() << '\n');
if (isDominated(InsertionPoint, User, OpNo, InsertPts))
return;
@@ -460,7 +460,7 @@ void AArch64PromoteConstant::computeInsertionPoint(
if (tryAndMerge(InsertionPoint, User, OpNo, InsertPts))
return;
- DEBUG(dbgs() << "Keep considered insertion point\n");
+ LLVM_DEBUG(dbgs() << "Keep considered insertion point\n");
// It is definitely useful by its own
InsertPts[InsertionPoint].emplace_back(User, OpNo);
@@ -476,9 +476,9 @@ static void ensurePromotedGV(Function &F, Constant &C,
*F.getParent(), C.getType(), true, GlobalValue::InternalLinkage, nullptr,
"_PromotedConst", nullptr, GlobalVariable::NotThreadLocal);
PC.GV->setInitializer(&C);
- DEBUG(dbgs() << "Global replacement: ");
- DEBUG(PC.GV->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Global replacement: ");
+ LLVM_DEBUG(PC.GV->print(dbgs()));
+ LLVM_DEBUG(dbgs() << '\n');
++NumPromoted;
}
@@ -495,10 +495,10 @@ void AArch64PromoteConstant::insertDefinitions(Function &F,
// Create the load of the global variable.
IRBuilder<> Builder(IPI.first);
LoadInst *LoadedCst = Builder.CreateLoad(&PromotedGV);
- DEBUG(dbgs() << "**********\n");
- DEBUG(dbgs() << "New def: ");
- DEBUG(LoadedCst->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "**********\n");
+ LLVM_DEBUG(dbgs() << "New def: ");
+ LLVM_DEBUG(LoadedCst->print(dbgs()));
+ LLVM_DEBUG(dbgs() << '\n');
// Update the dominated uses.
for (auto Use : IPI.second) {
@@ -507,11 +507,11 @@ void AArch64PromoteConstant::insertDefinitions(Function &F,
findInsertionPoint(*Use.first, Use.second)) &&
"Inserted definition does not dominate all its uses!");
#endif
- DEBUG({
- dbgs() << "Use to update " << Use.second << ":";
- Use.first->print(dbgs());
- dbgs() << '\n';
- });
+ LLVM_DEBUG({
+ dbgs() << "Use to update " << Use.second << ":";
+ Use.first->print(dbgs());
+ dbgs() << '\n';
+ });
Use.first->setOperand(Use.second, LoadedCst);
++NumPromotedUses;
}
@@ -523,7 +523,7 @@ void AArch64PromoteConstant::promoteConstants(
PromotionCacheTy &PromotionCache) {
// Promote the constants.
for (auto U = Updates.begin(), E = Updates.end(); U != E;) {
- DEBUG(dbgs() << "** Compute insertion points **\n");
+ LLVM_DEBUG(dbgs() << "** Compute insertion points **\n");
auto First = U;
Constant *C = First->C;
InsertionPoints InsertPts;
diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
index e5822b114324..fac9ec3e74cb 100644
--- a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
+++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -427,9 +427,9 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) {
}
if (IsCopy)
- DEBUG(dbgs() << "Remove redundant Copy : " << *MI);
+ LLVM_DEBUG(dbgs() << "Remove redundant Copy : " << *MI);
else
- DEBUG(dbgs() << "Remove redundant Move : " << *MI);
+ LLVM_DEBUG(dbgs() << "Remove redundant Move : " << *MI);
MI->eraseFromParent();
Changed = true;
@@ -473,8 +473,8 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) {
// Clear kills in the range where changes were made. This is conservative,
// but should be okay since kill markers are being phased out.
- DEBUG(dbgs() << "Clearing kill flags.\n\tFirstUse: " << *FirstUse
- << "\tLastChange: " << *LastChange);
+ LLVM_DEBUG(dbgs() << "Clearing kill flags.\n\tFirstUse: " << *FirstUse
+ << "\tLastChange: " << *LastChange);
for (MachineInstr &MMI : make_range(FirstUse, PredMBB->end()))
MMI.clearKillInfo();
for (MachineInstr &MMI : make_range(MBB->begin(), LastChange))
diff --git a/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 2f7a597f3bf1..fc7b5984fe3e 100644
--- a/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -91,9 +91,9 @@ bool AArch64StorePairSuppress::shouldAddSTPToBlock(const MachineBasicBlock *BB)
if (SCDesc->isValid() && !SCDesc->isVariant()) {
unsigned ResLenWithSTP = BBTrace.getResourceLength(None, SCDesc);
if (ResLenWithSTP > ResLength) {
- DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber()
- << " resources " << ResLength << " -> " << ResLenWithSTP
- << "\n");
+ LLVM_DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber()
+ << " resources " << ResLength << " -> " << ResLenWithSTP
+ << "\n");
return false;
}
}
@@ -131,10 +131,10 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n');
if (!SchedModel.hasInstrSchedModel()) {
- DEBUG(dbgs() << " Skipping pass: no machine model present.\n");
+ LLVM_DEBUG(dbgs() << " Skipping pass: no machine model present.\n");
return false;
}
@@ -156,7 +156,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
break;
// Otherwise, continue unpairing the stores in this block.
- DEBUG(dbgs() << "Unpairing store " << MI << "\n");
+ LLVM_DEBUG(dbgs() << "Unpairing store " << MI << "\n");
SuppressSTP = true;
TII->suppressLdStPair(MI);
}
diff --git a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 098272dc2e2d..7eb4c1c96679 100644
--- a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -728,14 +728,14 @@ getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
};
int StridedLoads = countStridedLoads(L, SE);
- DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
- << " strided loads\n");
+ LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
+ << " strided loads\n");
// Pick the largest power of 2 unroll count that won't result in too many
// strided loads.
if (StridedLoads) {
UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
- DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " << UP.MaxCount
- << '\n');
+ LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
+ << UP.MaxCount << '\n');
}
}
diff --git a/lib/Target/AMDGPU/AMDGPUInline.cpp b/lib/Target/AMDGPU/AMDGPUInline.cpp
index ca77795ce109..35dd9eb0a478 100644
--- a/lib/Target/AMDGPU/AMDGPUInline.cpp
+++ b/lib/Target/AMDGPU/AMDGPUInline.cpp
@@ -161,8 +161,8 @@ static bool isWrapperOnlyCall(CallSite CS) {
return false;
}
if (isa<ReturnInst>(*std::next(I->getIterator()))) {
- DEBUG(dbgs() << " Wrapper only call detected: "
- << Callee->getName() << '\n');
+ LLVM_DEBUG(dbgs() << " Wrapper only call detected: "
+ << Callee->getName() << '\n');
return true;
}
}
diff --git a/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index dd45ced6eccd..7a7ed7a4f065 100644
--- a/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -765,8 +765,7 @@ bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) {
ArrayRef<double> tmp(DVal);
nval = ConstantDataVector::get(context, tmp);
}
- DEBUG(errs() << "AMDIC: " << *CI
- << " ---> " << *nval << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
replaceCall(nval);
return true;
}
@@ -776,8 +775,7 @@ bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) {
for (int i = 0; i < sz; ++i) {
if (CF->isExactlyValue(ftbl[i].input)) {
Value *nval = ConstantFP::get(CF->getType(), ftbl[i].result);
- DEBUG(errs() << "AMDIC: " << *CI
- << " ---> " << *nval << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
replaceCall(nval);
return true;
}
@@ -798,11 +796,11 @@ bool AMDGPULibCalls::replaceWithNative(CallInst *CI, const FuncInfo &FInfo) {
AMDGPULibFunc nf = FInfo;
nf.setPrefix(AMDGPULibFunc::NATIVE);
if (Constant *FPExpr = getFunction(M, nf)) {
- DEBUG(dbgs() << "AMDIC: " << *CI << " ---> ");
+ LLVM_DEBUG(dbgs() << "AMDIC: " << *CI << " ---> ");
CI->setCalledFunction(FPExpr);
- DEBUG(dbgs() << *CI << '\n');
+ LLVM_DEBUG(dbgs() << *CI << '\n');
return true;
}
@@ -820,8 +818,7 @@ bool AMDGPULibCalls::fold_recip(CallInst *CI, IRBuilder<> &B,
Value *nval = B.CreateFDiv(ConstantFP::get(CF->getType(), 1.0),
opr0,
"recip2div");
- DEBUG(errs() << "AMDIC: " << *CI
- << " ---> " << *nval << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
replaceCall(nval);
return true;
}
@@ -899,7 +896,7 @@ bool AMDGPULibCalls::fold_pow(CallInst *CI, IRBuilder<> &B,
if ((CF && CF->isZero()) || (CINT && ci_opr1 == 0) || CZero) {
// pow/powr/pown(x, 0) == 1
- DEBUG(errs() << "AMDIC: " << *CI << " ---> 1\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> 1\n");
Constant *cnval = ConstantFP::get(eltType, 1.0);
if (getVecSize(FInfo) > 1) {
cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
@@ -909,23 +906,21 @@ bool AMDGPULibCalls::fold_pow(CallInst *CI, IRBuilder<> &B,
}
if ((CF && CF->isExactlyValue(1.0)) || (CINT && ci_opr1 == 1)) {
// pow/powr/pown(x, 1.0) = x
- DEBUG(errs() << "AMDIC: " << *CI
- << " ---> " << *opr0 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << "\n");
replaceCall(opr0);
return true;
}
if ((CF && CF->isExactlyValue(2.0)) || (CINT && ci_opr1 == 2)) {
// pow/powr/pown(x, 2.0) = x*x
- DEBUG(errs() << "AMDIC: " << *CI
- << " ---> " << *opr0 << " * " << *opr0 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << " * " << *opr0
+ << "\n");
Value *nval = B.CreateFMul(opr0, opr0, "__pow2");
replaceCall(nval);
return true;
}
if ((CF && CF->isExactlyValue(-1.0)) || (CINT && ci_opr1 == -1)) {
// pow/powr/pown(x, -1.0) = 1.0/x
- DEBUG(errs() << "AMDIC: " << *CI
- << " ---> 1 / " << *opr0 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> 1 / " << *opr0 << "\n");
Constant *cnval = ConstantFP::get(eltType, 1.0);
if (getVecSize(FInfo) > 1) {
cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
@@ -942,8 +937,8 @@ bool AMDGPULibCalls::fold_pow(CallInst *CI, IRBuilder<> &B,
if (Constant *FPExpr = getFunction(M,
AMDGPULibFunc(issqrt ? AMDGPULibFunc::EI_SQRT
: AMDGPULibFunc::EI_RSQRT, FInfo))) {
- DEBUG(errs() << "AMDIC: " << *CI << " ---> "
- << FInfo.getName().c_str() << "(" << *opr0 << ")\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
+ << FInfo.getName().c_str() << "(" << *opr0 << ")\n");
Value *nval = CreateCallEx(B,FPExpr, opr0, issqrt ? "__pow2sqrt"
: "__pow2rsqrt");
replaceCall(nval);
@@ -999,8 +994,9 @@ bool AMDGPULibCalls::fold_pow(CallInst *CI, IRBuilder<> &B,
}
nval = B.CreateFDiv(cnval, nval, "__1powprod");
}
- DEBUG(errs() << "AMDIC: " << *CI << " ---> "
- << ((ci_opr1 < 0) ? "1/prod(" : "prod(") << *opr0 << ")\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
+ << ((ci_opr1 < 0) ? "1/prod(" : "prod(") << *opr0
+ << ")\n");
replaceCall(nval);
return true;
}
@@ -1137,8 +1133,8 @@ bool AMDGPULibCalls::fold_pow(CallInst *CI, IRBuilder<> &B,
nval = B.CreateBitCast(nval, opr0->getType());
}
- DEBUG(errs() << "AMDIC: " << *CI << " ---> "
- << "exp2(" << *opr1 << " * log2(" << *opr0 << "))\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
+ << "exp2(" << *opr1 << " * log2(" << *opr0 << "))\n");
replaceCall(nval);
return true;
@@ -1155,8 +1151,7 @@ bool AMDGPULibCalls::fold_rootn(CallInst *CI, IRBuilder<> &B,
}
int ci_opr1 = (int)CINT->getSExtValue();
if (ci_opr1 == 1) { // rootn(x, 1) = x
- DEBUG(errs() << "AMDIC: " << *CI
- << " ---> " << *opr0 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << "\n");
replaceCall(opr0);
return true;
}
@@ -1166,7 +1161,7 @@ bool AMDGPULibCalls::fold_rootn(CallInst *CI, IRBuilder<> &B,
Module *M = CI->getModule();
if (Constant *FPExpr = getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_SQRT,
FInfo))) {
- DEBUG(errs() << "AMDIC: " << *CI << " ---> sqrt(" << *opr0 << ")\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> sqrt(" << *opr0 << ")\n");
Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2sqrt");
replaceCall(nval);
return true;
@@ -1175,13 +1170,13 @@ bool AMDGPULibCalls::fold_rootn(CallInst *CI, IRBuilder<> &B,
Module *M = CI->getModule();
if (Constant *FPExpr = getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_CBRT,
FInfo))) {
- DEBUG(errs() << "AMDIC: " << *CI << " ---> cbrt(" << *opr0 << ")\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> cbrt(" << *opr0 << ")\n");
Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2cbrt");
replaceCall(nval);
return true;
}
} else if (ci_opr1 == -1) { // rootn(x, -1) = 1.0/x
- DEBUG(errs() << "AMDIC: " << *CI << " ---> 1.0 / " << *opr0 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> 1.0 / " << *opr0 << "\n");
Value *nval = B.CreateFDiv(ConstantFP::get(opr0->getType(), 1.0),
opr0,
"__rootn2div");
@@ -1193,7 +1188,8 @@ bool AMDGPULibCalls::fold_rootn(CallInst *CI, IRBuilder<> &B,
Module *M = CI->getModule();
if (Constant *FPExpr = getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_RSQRT,
FInfo))) {
- DEBUG(errs() << "AMDIC: " << *CI << " ---> rsqrt(" << *opr0 << ")\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> rsqrt(" << *opr0
+ << ")\n");
Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2rsqrt");
replaceCall(nval);
return true;
@@ -1212,22 +1208,22 @@ bool AMDGPULibCalls::fold_fma_mad(CallInst *CI, IRBuilder<> &B,
ConstantFP *CF1 = dyn_cast<ConstantFP>(opr1);
if ((CF0 && CF0->isZero()) || (CF1 && CF1->isZero())) {
// fma/mad(a, b, c) = c if a=0 || b=0
- DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr2 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr2 << "\n");
replaceCall(opr2);
return true;
}
if (CF0 && CF0->isExactlyValue(1.0f)) {
// fma/mad(a, b, c) = b+c if a=1
- DEBUG(errs() << "AMDIC: " << *CI << " ---> "
- << *opr1 << " + " << *opr2 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr1 << " + " << *opr2
+ << "\n");
Value *nval = B.CreateFAdd(opr1, opr2, "fmaadd");
replaceCall(nval);
return true;
}
if (CF1 && CF1->isExactlyValue(1.0f)) {
// fma/mad(a, b, c) = a+c if b=1
- DEBUG(errs() << "AMDIC: " << *CI << " ---> "
- << *opr0 << " + " << *opr2 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << " + " << *opr2
+ << "\n");
Value *nval = B.CreateFAdd(opr0, opr2, "fmaadd");
replaceCall(nval);
return true;
@@ -1235,8 +1231,8 @@ bool AMDGPULibCalls::fold_fma_mad(CallInst *CI, IRBuilder<> &B,
if (ConstantFP *CF = dyn_cast<ConstantFP>(opr2)) {
if (CF->isZero()) {
// fma/mad(a, b, c) = a*b if c=0
- DEBUG(errs() << "AMDIC: " << *CI << " ---> "
- << *opr0 << " * " << *opr1 << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << " * "
+ << *opr1 << "\n");
Value *nval = B.CreateFMul(opr0, opr1, "fmamul");
replaceCall(nval);
return true;
@@ -1263,8 +1259,8 @@ bool AMDGPULibCalls::fold_sqrt(CallInst *CI, IRBuilder<> &B,
if (Constant *FPExpr = getNativeFunction(
CI->getModule(), AMDGPULibFunc(AMDGPULibFunc::EI_SQRT, FInfo))) {
Value *opr0 = CI->getArgOperand(0);
- DEBUG(errs() << "AMDIC: " << *CI << " ---> "
- << "sqrt(" << *opr0 << ")\n");
+ LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
+ << "sqrt(" << *opr0 << ")\n");
Value *nval = CreateCallEx(B,FPExpr, opr0, "__sqrt");
replaceCall(nval);
return true;
@@ -1355,8 +1351,8 @@ bool AMDGPULibCalls::fold_sincos(CallInst *CI, IRBuilder<> &B,
P = B.CreateAddrSpaceCast(Alloc, PTy);
CallInst *Call = CreateCallEx2(B, Fsincos, UI->getArgOperand(0), P);
- DEBUG(errs() << "AMDIC: fold_sincos (" << *CI << ", " << *UI
- << ") with " << *Call << "\n");
+ LLVM_DEBUG(errs() << "AMDIC: fold_sincos (" << *CI << ", " << *UI << ") with "
+ << *Call << "\n");
if (!isSin) { // CI->cos, UI->sin
B.SetInsertPoint(&*ItOld);
@@ -1719,9 +1715,8 @@ bool AMDGPUSimplifyLibCalls::runOnFunction(Function &F) {
bool Changed = false;
auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- DEBUG(dbgs() << "AMDIC: process function ";
- F.printAsOperand(dbgs(), false, F.getParent());
- dbgs() << '\n';);
+ LLVM_DEBUG(dbgs() << "AMDIC: process function ";
+ F.printAsOperand(dbgs(), false, F.getParent()); dbgs() << '\n';);
if (!EnablePreLink)
Changed |= setFastFlags(F, Options);
@@ -1737,8 +1732,8 @@ bool AMDGPUSimplifyLibCalls::runOnFunction(Function &F) {
Function *Callee = CI->getCalledFunction();
if (Callee == 0) continue;
- DEBUG(dbgs() << "AMDIC: try folding " << *CI << "\n";
- dbgs().flush());
+ LLVM_DEBUG(dbgs() << "AMDIC: try folding " << *CI << "\n";
+ dbgs().flush());
if(Simplifier.fold(CI, AA))
Changed = true;
}
diff --git a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index b1d61fd3edef..612777981f9b 100644
--- a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -659,7 +659,7 @@ RegionMRT *MRT::buildMRT(MachineFunction &MF,
continue;
}
- DEBUG(dbgs() << "Visiting " << printMBBReference(*MBB) << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting " << printMBBReference(*MBB) << "\n");
MBBMRT *NewMBB = new MBBMRT(MBB);
MachineRegion *Region = RegionInfo->getRegionFor(MBB);
@@ -696,18 +696,19 @@ void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
if (TRI->isVirtualRegister(Reg)) {
- DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
+ << "\n");
// If this is a source register to a PHI we are chaining, it
// must be live out.
if (PHIInfo.isSource(Reg)) {
- DEBUG(dbgs() << "Add LiveOut (PHI): " << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Add LiveOut (PHI): " << printReg(Reg, TRI) << "\n");
addLiveOut(Reg);
} else {
// If this is live out of the MBB
for (auto &UI : MRI->use_operands(Reg)) {
if (UI.getParent()->getParent() != MBB) {
- DEBUG(dbgs() << "Add LiveOut (MBB " << printMBBReference(*MBB)
- << "): " << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Add LiveOut (MBB " << printMBBReference(*MBB)
+ << "): " << printReg(Reg, TRI) << "\n");
addLiveOut(Reg);
} else {
// If the use is in the same MBB we have to make sure
@@ -718,8 +719,8 @@ void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
MIE = UseInstr->getParent()->instr_end();
MII != MIE; ++MII) {
if ((&(*MII)) == DefInstr) {
- DEBUG(dbgs() << "Add LiveOut (Loop): " << printReg(Reg, TRI)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Add LiveOut (Loop): " << printReg(Reg, TRI)
+ << "\n");
addLiveOut(Reg);
}
}
@@ -735,11 +736,12 @@ void LinearizedRegion::storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
if (TRI->isVirtualRegister(Reg)) {
- DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Considering Register: " << printReg(Reg, TRI)
+ << "\n");
for (auto &UI : MRI->use_operands(Reg)) {
if (!Region->contains(UI.getParent()->getParent())) {
- DEBUG(dbgs() << "Add LiveOut (Region " << (void *)Region
- << "): " << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Add LiveOut (Region " << (void *)Region
+ << "): " << printReg(Reg, TRI) << "\n");
addLiveOut(Reg);
}
}
@@ -750,8 +752,8 @@ void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI,
PHILinearize &PHIInfo) {
- DEBUG(dbgs() << "-Store Live Outs Begin (" << printMBBReference(*MBB)
- << ")-\n");
+ LLVM_DEBUG(dbgs() << "-Store Live Outs Begin (" << printMBBReference(*MBB)
+ << ")-\n");
for (auto &II : *MBB) {
for (auto &RI : II.defs()) {
storeLiveOutReg(MBB, RI.getReg(), RI.getParent(), MRI, TRI, PHIInfo);
@@ -775,9 +777,10 @@ void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB,
for (int i = 0; i < numPreds; ++i) {
if (getPHIPred(PHI, i) == MBB) {
unsigned PHIReg = getPHISourceReg(PHI, i);
- DEBUG(dbgs() << "Add LiveOut (PhiSource " << printMBBReference(*MBB)
- << " -> " << printMBBReference(*(*SI))
- << "): " << printReg(PHIReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Add LiveOut (PhiSource " << printMBBReference(*MBB)
+ << " -> " << printMBBReference(*(*SI))
+ << "): " << printReg(PHIReg, TRI) << "\n");
addLiveOut(PHIReg);
}
}
@@ -785,7 +788,7 @@ void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB,
}
}
- DEBUG(dbgs() << "-Store Live Outs Endn-\n");
+ LLVM_DEBUG(dbgs() << "-Store Live Outs Endn-\n");
}
void LinearizedRegion::storeMBBLiveOuts(MachineBasicBlock *MBB,
@@ -845,8 +848,8 @@ void LinearizedRegion::storeLiveOuts(RegionMRT *Region,
for (int i = 0; i < numPreds; ++i) {
if (Region->contains(getPHIPred(PHI, i))) {
unsigned PHIReg = getPHISourceReg(PHI, i);
- DEBUG(dbgs() << "Add Region LiveOut (" << (void *)Region
- << "): " << printReg(PHIReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Add Region LiveOut (" << (void *)Region
+ << "): " << printReg(PHIReg, TRI) << "\n");
addLiveOut(PHIReg);
}
}
@@ -910,20 +913,21 @@ void LinearizedRegion::replaceRegister(unsigned Register, unsigned NewRegister,
bool IncludeLoopPHI) {
assert(Register != NewRegister && "Cannot replace a reg with itself");
- DEBUG(dbgs() << "Pepareing to replace register (region): "
- << printReg(Register, MRI->getTargetRegisterInfo()) << " with "
- << printReg(NewRegister, MRI->getTargetRegisterInfo()) << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Pepareing to replace register (region): "
+ << printReg(Register, MRI->getTargetRegisterInfo()) << " with "
+ << printReg(NewRegister, MRI->getTargetRegisterInfo()) << "\n");
// If we are replacing outside, we also need to update the LiveOuts
if (ReplaceOutside &&
(isLiveOut(Register) || this->getParent()->isLiveOut(Register))) {
LinearizedRegion *Current = this;
while (Current != nullptr && Current->getEntry() != nullptr) {
- DEBUG(dbgs() << "Region before register replace\n");
- DEBUG(Current->print(dbgs(), MRI->getTargetRegisterInfo()));
+ LLVM_DEBUG(dbgs() << "Region before register replace\n");
+ LLVM_DEBUG(Current->print(dbgs(), MRI->getTargetRegisterInfo()));
Current->replaceLiveOut(Register, NewRegister);
- DEBUG(dbgs() << "Region after register replace\n");
- DEBUG(Current->print(dbgs(), MRI->getTargetRegisterInfo()));
+ LLVM_DEBUG(dbgs() << "Region after register replace\n");
+ LLVM_DEBUG(Current->print(dbgs(), MRI->getTargetRegisterInfo()));
Current = Current->getParent();
}
}
@@ -947,16 +951,16 @@ void LinearizedRegion::replaceRegister(unsigned Register, unsigned NewRegister,
if (ShouldReplace) {
if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
- DEBUG(dbgs() << "Trying to substitute physical register: "
- << printReg(NewRegister, MRI->getTargetRegisterInfo())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
+ << printReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
llvm_unreachable("Cannot substitute physical registers");
} else {
- DEBUG(dbgs() << "Replacing register (region): "
- << printReg(Register, MRI->getTargetRegisterInfo())
- << " with "
- << printReg(NewRegister, MRI->getTargetRegisterInfo())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing register (region): "
+ << printReg(Register, MRI->getTargetRegisterInfo())
+ << " with "
+ << printReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
O.setReg(NewRegister);
}
}
@@ -1023,18 +1027,18 @@ void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) {
if (hasNoDef(Reg, MRI))
continue;
if (!MRI->hasOneDef(Reg)) {
- DEBUG(this->getEntry()->getParent()->dump());
- DEBUG(dbgs() << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(this->getEntry()->getParent()->dump());
+ LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << "\n");
}
if (MRI->def_begin(Reg) == MRI->def_end()) {
- DEBUG(dbgs() << "Register "
- << printReg(Reg, MRI->getTargetRegisterInfo())
- << " has NO defs\n");
+ LLVM_DEBUG(dbgs() << "Register "
+ << printReg(Reg, MRI->getTargetRegisterInfo())
+ << " has NO defs\n");
} else if (!MRI->hasOneDef(Reg)) {
- DEBUG(dbgs() << "Register "
- << printReg(Reg, MRI->getTargetRegisterInfo())
- << " has multiple defs\n");
+ LLVM_DEBUG(dbgs() << "Register "
+ << printReg(Reg, MRI->getTargetRegisterInfo())
+ << " has multiple defs\n");
}
assert(MRI->hasOneDef(Reg) && "Register has multiple definitions");
@@ -1042,8 +1046,8 @@ void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) {
MachineOperand *UseOperand = &(RI);
bool UseIsOutsideDefMBB = Def->getParent()->getParent() != MBB;
if (UseIsOutsideDefMBB && UseOperand->isKill()) {
- DEBUG(dbgs() << "Removing kill flag on register: "
- << printReg(Reg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Removing kill flag on register: "
+ << printReg(Reg, TRI) << "\n");
UseOperand->setIsKill(false);
}
}
@@ -1416,8 +1420,8 @@ void AMDGPUMachineCFGStructurizer::extractKilledPHIs(MachineBasicBlock *MBB) {
MachineInstr &Instr = *I;
if (Instr.isPHI()) {
unsigned PHIDestReg = getPHIDestReg(Instr);
- DEBUG(dbgs() << "Extractking killed phi:\n");
- DEBUG(Instr.dump());
+ LLVM_DEBUG(dbgs() << "Extractking killed phi:\n");
+ LLVM_DEBUG(Instr.dump());
PHIs.insert(&Instr);
PHIInfo.addDest(PHIDestReg, Instr.getDebugLoc());
storePHILinearizationInfoDest(PHIDestReg, Instr);
@@ -1449,9 +1453,10 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
MachineBasicBlock *SourceMBB,
SmallVector<unsigned, 2> &PHIIndices,
unsigned *ReplaceReg) {
- DEBUG(dbgs() << "Shrink PHI: ");
- DEBUG(PHI.dump());
- DEBUG(dbgs() << " to " << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
+ LLVM_DEBUG(dbgs() << "Shrink PHI: ");
+ LLVM_DEBUG(PHI.dump());
+ LLVM_DEBUG(dbgs() << " to " << printReg(getPHIDestReg(PHI), TRI)
+ << " = PHI(");
bool Replaced = false;
unsigned NumInputs = getPHINumInputs(PHI);
@@ -1481,8 +1486,8 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
if (SourceMBB) {
MIB.addReg(CombinedSourceReg);
MIB.addMBB(SourceMBB);
- DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
- << printMBBReference(*SourceMBB));
+ LLVM_DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
+ << printMBBReference(*SourceMBB));
}
for (unsigned i = 0; i < NumInputs; ++i) {
@@ -1493,10 +1498,10 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
MIB.addReg(SourceReg);
MIB.addMBB(SourcePred);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
- << printMBBReference(*SourcePred));
+ LLVM_DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*SourcePred));
}
- DEBUG(dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << ")\n");
}
PHI.eraseFromParent();
return Replaced;
@@ -1505,9 +1510,10 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
void AMDGPUMachineCFGStructurizer::replacePHI(
MachineInstr &PHI, unsigned CombinedSourceReg, MachineBasicBlock *LastMerge,
SmallVector<unsigned, 2> &PHIRegionIndices) {
- DEBUG(dbgs() << "Replace PHI: ");
- DEBUG(PHI.dump());
- DEBUG(dbgs() << " with " << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
+ LLVM_DEBUG(dbgs() << "Replace PHI: ");
+ LLVM_DEBUG(PHI.dump());
+ LLVM_DEBUG(dbgs() << " with " << printReg(getPHIDestReg(PHI), TRI)
+ << " = PHI(");
bool HasExternalEdge = false;
unsigned NumInputs = getPHINumInputs(PHI);
@@ -1524,8 +1530,8 @@ void AMDGPUMachineCFGStructurizer::replacePHI(
getPHIDestReg(PHI));
MIB.addReg(CombinedSourceReg);
MIB.addMBB(LastMerge);
- DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
- << printMBBReference(*LastMerge));
+ LLVM_DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
+ << printMBBReference(*LastMerge));
for (unsigned i = 0; i < NumInputs; ++i) {
if (isPHIRegionIndex(PHIRegionIndices, i)) {
continue;
@@ -1534,10 +1540,10 @@ void AMDGPUMachineCFGStructurizer::replacePHI(
MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
MIB.addReg(SourceReg);
MIB.addMBB(SourcePred);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
- << printMBBReference(*SourcePred));
+ LLVM_DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*SourcePred));
}
- DEBUG(dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << ")\n");
} else {
replaceRegisterWith(getPHIDestReg(PHI), CombinedSourceReg);
}
@@ -1547,9 +1553,9 @@ void AMDGPUMachineCFGStructurizer::replacePHI(
void AMDGPUMachineCFGStructurizer::replaceEntryPHI(
MachineInstr &PHI, unsigned CombinedSourceReg, MachineBasicBlock *IfMBB,
SmallVector<unsigned, 2> &PHIRegionIndices) {
- DEBUG(dbgs() << "Replace entry PHI: ");
- DEBUG(PHI.dump());
- DEBUG(dbgs() << " with ");
+ LLVM_DEBUG(dbgs() << "Replace entry PHI: ");
+ LLVM_DEBUG(PHI.dump());
+ LLVM_DEBUG(dbgs() << " with ");
unsigned NumInputs = getPHINumInputs(PHI);
unsigned NumNonRegionInputs = NumInputs;
@@ -1562,18 +1568,19 @@ void AMDGPUMachineCFGStructurizer::replaceEntryPHI(
if (NumNonRegionInputs == 0) {
auto DestReg = getPHIDestReg(PHI);
replaceRegisterWith(DestReg, CombinedSourceReg);
- DEBUG(dbgs() << " register " << printReg(CombinedSourceReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << " register " << printReg(CombinedSourceReg, TRI)
+ << "\n");
PHI.eraseFromParent();
} else {
- DEBUG(dbgs() << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
+ LLVM_DEBUG(dbgs() << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
MachineBasicBlock *MBB = PHI.getParent();
MachineInstrBuilder MIB =
BuildMI(*MBB, PHI, PHI.getDebugLoc(), TII->get(TargetOpcode::PHI),
getPHIDestReg(PHI));
MIB.addReg(CombinedSourceReg);
MIB.addMBB(IfMBB);
- DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
- << printMBBReference(*IfMBB));
+ LLVM_DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", "
+ << printMBBReference(*IfMBB));
unsigned NumInputs = getPHINumInputs(PHI);
for (unsigned i = 0; i < NumInputs; ++i) {
if (isPHIRegionIndex(PHIRegionIndices, i)) {
@@ -1583,10 +1590,10 @@ void AMDGPUMachineCFGStructurizer::replaceEntryPHI(
MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
MIB.addReg(SourceReg);
MIB.addMBB(SourcePred);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
- << printMBBReference(*SourcePred));
+ LLVM_DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*SourcePred));
}
- DEBUG(dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << ")\n");
PHI.eraseFromParent();
}
}
@@ -1608,8 +1615,9 @@ void AMDGPUMachineCFGStructurizer::replaceLiveOutRegs(
}
}
- DEBUG(dbgs() << "Register " << printReg(Reg, TRI) << " is "
- << (IsDead ? "dead" : "alive") << " after PHI replace\n");
+ LLVM_DEBUG(dbgs() << "Register " << printReg(Reg, TRI) << " is "
+ << (IsDead ? "dead" : "alive")
+ << " after PHI replace\n");
if (IsDead) {
LRegion->removeLiveOut(Reg);
}
@@ -1683,8 +1691,8 @@ void AMDGPUMachineCFGStructurizer::rewriteRegionEntryPHIs(LinearizedRegion *Regi
void AMDGPUMachineCFGStructurizer::insertUnconditionalBranch(MachineBasicBlock *MBB,
MachineBasicBlock *Dest,
const DebugLoc &DL) {
- DEBUG(dbgs() << "Inserting unconditional branch: " << MBB->getNumber()
- << " -> " << Dest->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Inserting unconditional branch: " << MBB->getNumber()
+ << " -> " << Dest->getNumber() << "\n");
MachineBasicBlock::instr_iterator Terminator = MBB->getFirstInstrTerminator();
bool HasTerminator = Terminator != MBB->instr_end();
if (HasTerminator) {
@@ -1733,7 +1741,8 @@ AMDGPUMachineCFGStructurizer::createLinearizedExitBlock(RegionMRT *Region) {
MF->insert(ExitIter, LastMerge);
LastMerge->addSuccessor(Exit);
insertUnconditionalBranch(LastMerge, Exit);
- DEBUG(dbgs() << "Created exit block: " << LastMerge->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Created exit block: " << LastMerge->getNumber()
+ << "\n");
}
return LastMerge;
}
@@ -1749,11 +1758,12 @@ void AMDGPUMachineCFGStructurizer::insertMergePHI(MachineBasicBlock *IfBB,
if (MergeBB->succ_begin() == MergeBB->succ_end()) {
return;
}
- DEBUG(dbgs() << "Merge PHI (" << printMBBReference(*MergeBB)
- << "): " << printReg(DestRegister, TRI) << " = PHI("
- << printReg(IfSourceRegister, TRI) << ", "
- << printMBBReference(*IfBB) << printReg(CodeSourceRegister, TRI)
- << ", " << printMBBReference(*CodeBB) << ")\n");
+ LLVM_DEBUG(dbgs() << "Merge PHI (" << printMBBReference(*MergeBB)
+ << "): " << printReg(DestRegister, TRI) << " = PHI("
+ << printReg(IfSourceRegister, TRI) << ", "
+ << printMBBReference(*IfBB)
+ << printReg(CodeSourceRegister, TRI) << ", "
+ << printMBBReference(*CodeBB) << ")\n");
const DebugLoc &DL = MergeBB->findDebugLoc(MergeBB->begin());
MachineInstrBuilder MIB = BuildMI(*MergeBB, MergeBB->instr_begin(), DL,
TII->get(TargetOpcode::PHI), DestRegister);
@@ -1811,8 +1821,8 @@ static void removeExternalCFGEdges(MachineBasicBlock *StartMBB,
for (auto SI : Succs) {
std::pair<MachineBasicBlock *, MachineBasicBlock *> Edge = SI;
- DEBUG(dbgs() << "Removing edge: " << printMBBReference(*Edge.first)
- << " -> " << printMBBReference(*Edge.second) << "\n");
+ LLVM_DEBUG(dbgs() << "Removing edge: " << printMBBReference(*Edge.first)
+ << " -> " << printMBBReference(*Edge.second) << "\n");
Edge.first->removeSuccessor(Edge.second);
}
}
@@ -1845,13 +1855,13 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfBlock(
IfBB->addSuccessor(MergeBB);
IfBB->addSuccessor(CodeBBStart);
- DEBUG(dbgs() << "Created If block: " << IfBB->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Created If block: " << IfBB->getNumber() << "\n");
// Ensure that the MergeBB is a successor of the CodeEndBB.
if (!CodeBBEnd->isSuccessor(MergeBB))
CodeBBEnd->addSuccessor(MergeBB);
- DEBUG(dbgs() << "Moved " << printMBBReference(*CodeBBStart) << " through "
- << printMBBReference(*CodeBBEnd) << "\n");
+ LLVM_DEBUG(dbgs() << "Moved " << printMBBReference(*CodeBBStart)
+ << " through " << printMBBReference(*CodeBBEnd) << "\n");
// If we have a single predecessor we can find a reasonable debug location
MachineBasicBlock *SinglePred =
@@ -1936,16 +1946,18 @@ void AMDGPUMachineCFGStructurizer::rewriteCodeBBTerminator(MachineBasicBlock *Co
MachineInstr *AMDGPUMachineCFGStructurizer::getDefInstr(unsigned Reg) {
if (MRI->def_begin(Reg) == MRI->def_end()) {
- DEBUG(dbgs() << "Register " << printReg(Reg, MRI->getTargetRegisterInfo())
- << " has NO defs\n");
+ LLVM_DEBUG(dbgs() << "Register "
+ << printReg(Reg, MRI->getTargetRegisterInfo())
+ << " has NO defs\n");
} else if (!MRI->hasOneDef(Reg)) {
- DEBUG(dbgs() << "Register " << printReg(Reg, MRI->getTargetRegisterInfo())
- << " has multiple defs\n");
- DEBUG(dbgs() << "DEFS BEGIN:\n");
+ LLVM_DEBUG(dbgs() << "Register "
+ << printReg(Reg, MRI->getTargetRegisterInfo())
+ << " has multiple defs\n");
+ LLVM_DEBUG(dbgs() << "DEFS BEGIN:\n");
for (auto DI = MRI->def_begin(Reg), DE = MRI->def_end(); DI != DE; ++DI) {
- DEBUG(DI->getParent()->dump());
+ LLVM_DEBUG(DI->getParent()->dump());
}
- DEBUG(dbgs() << "DEFS END\n");
+ LLVM_DEBUG(dbgs() << "DEFS END\n");
}
assert(MRI->hasOneDef(Reg) && "Register has multiple definitions");
@@ -1987,7 +1999,7 @@ void AMDGPUMachineCFGStructurizer::insertChainedPHI(MachineBasicBlock *IfBB,
const TargetRegisterClass *RegClass = MRI->getRegClass(DestReg);
unsigned NextDestReg = MRI->createVirtualRegister(RegClass);
bool IsLastDef = PHIInfo.getNumSources(DestReg) == 1;
- DEBUG(dbgs() << "Insert Chained PHI\n");
+ LLVM_DEBUG(dbgs() << "Insert Chained PHI\n");
insertMergePHI(IfBB, InnerRegion->getExit(), MergeBB, DestReg, NextDestReg,
SourceReg, IsLastDef);
@@ -2023,16 +2035,16 @@ void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB,
}
for (auto LI : OldLiveOuts) {
- DEBUG(dbgs() << "LiveOut: " << printReg(LI, TRI));
+ LLVM_DEBUG(dbgs() << "LiveOut: " << printReg(LI, TRI));
if (!containsDef(CodeBB, InnerRegion, LI) ||
(!IsSingleBB && (getDefInstr(LI)->getParent() == LRegion->getExit()))) {
// If the register simly lives through the CodeBB, we don't have
// to rewrite anything since the register is not defined in this
// part of the code.
- DEBUG(dbgs() << "- through");
+ LLVM_DEBUG(dbgs() << "- through");
continue;
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
unsigned Reg = LI;
if (/*!PHIInfo.isSource(Reg) &&*/ Reg != InnerRegion->getBBSelectRegOut()) {
// If the register is live out, we do want to create a phi,
@@ -2049,12 +2061,12 @@ void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB,
unsigned IfSourceReg = MRI->createVirtualRegister(RegClass);
// Create initializer, this value is never used, but is needed
// to satisfy SSA.
- DEBUG(dbgs() << "Initializer for reg: " << printReg(Reg) << "\n");
+ LLVM_DEBUG(dbgs() << "Initializer for reg: " << printReg(Reg) << "\n");
TII->materializeImmediate(*IfBB, IfBB->getFirstTerminator(), DebugLoc(),
IfSourceReg, 0);
InnerRegion->replaceRegisterOutsideRegion(Reg, PHIDestReg, true, MRI);
- DEBUG(dbgs() << "Insert Non-Chained Live out PHI\n");
+ LLVM_DEBUG(dbgs() << "Insert Non-Chained Live out PHI\n");
insertMergePHI(IfBB, InnerRegion->getExit(), MergeBB, PHIDestReg,
IfSourceReg, Reg, true);
}
@@ -2064,22 +2076,22 @@ void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB,
// is a source block for a definition.
SmallVector<unsigned, 4> Sources;
if (PHIInfo.findSourcesFromMBB(CodeBB, Sources)) {
- DEBUG(dbgs() << "Inserting PHI Live Out from " << printMBBReference(*CodeBB)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Inserting PHI Live Out from "
+ << printMBBReference(*CodeBB) << "\n");
for (auto SI : Sources) {
unsigned DestReg;
PHIInfo.findDest(SI, CodeBB, DestReg);
insertChainedPHI(IfBB, CodeBB, MergeBB, InnerRegion, DestReg, SI);
}
- DEBUG(dbgs() << "Insertion done.\n");
+ LLVM_DEBUG(dbgs() << "Insertion done.\n");
}
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(PHIInfo.dump(MRI));
}
void AMDGPUMachineCFGStructurizer::prunePHIInfo(MachineBasicBlock *MBB) {
- DEBUG(dbgs() << "Before PHI Prune\n");
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(dbgs() << "Before PHI Prune\n");
+ LLVM_DEBUG(PHIInfo.dump(MRI));
SmallVector<std::tuple<unsigned, unsigned, MachineBasicBlock *>, 4>
ElimiatedSources;
for (auto DRI = PHIInfo.dests_begin(), DE = PHIInfo.dests_end(); DRI != DE;
@@ -2119,8 +2131,8 @@ void AMDGPUMachineCFGStructurizer::prunePHIInfo(MachineBasicBlock *MBB) {
PHIInfo.removeSource(std::get<0>(SourceInfo), std::get<1>(SourceInfo),
std::get<2>(SourceInfo));
}
- DEBUG(dbgs() << "After PHI Prune\n");
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(dbgs() << "After PHI Prune\n");
+ LLVM_DEBUG(PHIInfo.dump(MRI));
}
void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegion,
@@ -2128,8 +2140,8 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
MachineBasicBlock *Entry = CurrentRegion->getEntry();
MachineBasicBlock *Exit = CurrentRegion->getExit();
- DEBUG(dbgs() << "RegionExit: " << Exit->getNumber()
- << " Pred: " << (*(Entry->pred_begin()))->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "RegionExit: " << Exit->getNumber() << " Pred: "
+ << (*(Entry->pred_begin()))->getNumber() << "\n");
int NumSources = 0;
auto SE = PHIInfo.sources_end(DestReg);
@@ -2146,7 +2158,7 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
const DebugLoc &DL = Entry->findDebugLoc(Entry->begin());
MachineInstrBuilder MIB = BuildMI(*Entry, Entry->instr_begin(), DL,
TII->get(TargetOpcode::PHI), DestReg);
- DEBUG(dbgs() << "Entry PHI " << printReg(DestReg, TRI) << " = PHI(");
+ LLVM_DEBUG(dbgs() << "Entry PHI " << printReg(DestReg, TRI) << " = PHI(");
unsigned CurrentBackedgeReg = 0;
@@ -2170,19 +2182,19 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
BackedgePHI.addReg(getPHISourceReg(*PHIDefInstr, 1));
BackedgePHI.addMBB((*SRI).second);
CurrentBackedgeReg = NewBackedgeReg;
- DEBUG(dbgs() << "Inserting backedge PHI: "
- << printReg(NewBackedgeReg, TRI) << " = PHI("
- << printReg(CurrentBackedgeReg, TRI) << ", "
- << printMBBReference(*getPHIPred(*PHIDefInstr, 0))
- << ", "
- << printReg(getPHISourceReg(*PHIDefInstr, 1), TRI)
- << ", " << printMBBReference(*(*SRI).second));
+ LLVM_DEBUG(dbgs()
+ << "Inserting backedge PHI: "
+ << printReg(NewBackedgeReg, TRI) << " = PHI("
+ << printReg(CurrentBackedgeReg, TRI) << ", "
+ << printMBBReference(*getPHIPred(*PHIDefInstr, 0)) << ", "
+ << printReg(getPHISourceReg(*PHIDefInstr, 1), TRI) << ", "
+ << printMBBReference(*(*SRI).second));
}
} else {
MIB.addReg(SourceReg);
MIB.addMBB((*SRI).second);
- DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
- << printMBBReference(*(*SRI).second) << ", ");
+ LLVM_DEBUG(dbgs() << printReg(SourceReg, TRI) << ", "
+ << printMBBReference(*(*SRI).second) << ", ");
}
}
@@ -2190,16 +2202,16 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
if (CurrentBackedgeReg != 0) {
MIB.addReg(CurrentBackedgeReg);
MIB.addMBB(Exit);
- DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", "
- << printMBBReference(*Exit) << ")\n");
+ LLVM_DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", "
+ << printMBBReference(*Exit) << ")\n");
} else {
- DEBUG(dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << ")\n");
}
}
}
void AMDGPUMachineCFGStructurizer::createEntryPHIs(LinearizedRegion *CurrentRegion) {
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(PHIInfo.dump(MRI));
for (auto DRI = PHIInfo.dests_begin(), DE = PHIInfo.dests_end(); DRI != DE;
++DRI) {
@@ -2220,19 +2232,19 @@ void AMDGPUMachineCFGStructurizer::replaceRegisterWith(unsigned Register,
MachineOperand &O = *I;
++I;
if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
- DEBUG(dbgs() << "Trying to substitute physical register: "
- << printReg(NewRegister, MRI->getTargetRegisterInfo())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Trying to substitute physical register: "
+ << printReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
llvm_unreachable("Cannot substitute physical registers");
// We don't handle physical registers, but if we need to
// in the future This is how we do it:
// O.substPhysReg(NewRegister, *TRI);
} else {
- DEBUG(dbgs() << "Replacing register: "
- << printReg(Register, MRI->getTargetRegisterInfo())
- << " with "
- << printReg(NewRegister, MRI->getTargetRegisterInfo())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing register: "
+ << printReg(Register, MRI->getTargetRegisterInfo())
+ << " with "
+ << printReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
O.setReg(NewRegister);
}
}
@@ -2240,20 +2252,20 @@ void AMDGPUMachineCFGStructurizer::replaceRegisterWith(unsigned Register,
getRegionMRT()->replaceLiveOutReg(Register, NewRegister);
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(PHIInfo.dump(MRI));
}
void AMDGPUMachineCFGStructurizer::resolvePHIInfos(MachineBasicBlock *FunctionEntry) {
- DEBUG(dbgs() << "Resolve PHI Infos\n");
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(dbgs() << "Resolve PHI Infos\n");
+ LLVM_DEBUG(PHIInfo.dump(MRI));
for (auto DRI = PHIInfo.dests_begin(), DE = PHIInfo.dests_end(); DRI != DE;
++DRI) {
unsigned DestReg = *DRI;
- DEBUG(dbgs() << "DestReg: " << printReg(DestReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "DestReg: " << printReg(DestReg, TRI) << "\n");
auto SRI = PHIInfo.sources_begin(DestReg);
unsigned SourceReg = (*SRI).first;
- DEBUG(dbgs() << "DestReg: " << printReg(DestReg, TRI)
- << " SourceReg: " << printReg(SourceReg, TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "DestReg: " << printReg(DestReg, TRI)
+ << " SourceReg: " << printReg(SourceReg, TRI) << "\n");
assert(PHIInfo.sources_end(DestReg) == ++SRI &&
"More than one phi source in entry node");
@@ -2327,9 +2339,9 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
MachineOperand RegOp =
MachineOperand::CreateReg(Reg, false, false, true);
ArrayRef<MachineOperand> Cond(RegOp);
- DEBUG(dbgs() << "RegionExitReg: ");
- DEBUG(Cond[0].print(dbgs(), TRI));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "RegionExitReg: ");
+ LLVM_DEBUG(Cond[0].print(dbgs(), TRI));
+ LLVM_DEBUG(dbgs() << "\n");
TII->insertBranch(*RegionExit, CurrentRegion->getEntry(), RegionExit,
Cond, DebugLoc());
RegionExit->addSuccessor(CurrentRegion->getEntry());
@@ -2339,12 +2351,12 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
LinearizedRegion InnerRegion(CodeBB, MRI, TRI, PHIInfo);
InnerRegion.setParent(CurrentRegion);
- DEBUG(dbgs() << "Insert BB Select PHI (BB)\n");
+ LLVM_DEBUG(dbgs() << "Insert BB Select PHI (BB)\n");
insertMergePHI(IfBB, CodeBB, MergeBB, BBSelectRegOut, BBSelectRegIn,
CodeBBSelectReg);
InnerRegion.addMBB(MergeBB);
- DEBUG(InnerRegion.print(dbgs(), TRI));
+ LLVM_DEBUG(InnerRegion.print(dbgs(), TRI));
rewriteLiveOutRegs(IfBB, CodeBB, MergeBB, &InnerRegion, CurrentRegion);
extractKilledPHIs(CodeBB);
if (IsRegionEntryBB) {
@@ -2385,16 +2397,16 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
CurrentRegion->getRegionMRT()->getEntry()->getNumber());
MachineOperand RegOp = MachineOperand::CreateReg(Reg, false, false, true);
ArrayRef<MachineOperand> Cond(RegOp);
- DEBUG(dbgs() << "RegionExitReg: ");
- DEBUG(Cond[0].print(dbgs(), TRI));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "RegionExitReg: ");
+ LLVM_DEBUG(Cond[0].print(dbgs(), TRI));
+ LLVM_DEBUG(dbgs() << "\n");
TII->insertBranch(*RegionExit, CurrentRegion->getEntry(), RegionExit,
Cond, DebugLoc());
RegionExit->addSuccessor(IfBB);
}
}
CurrentRegion->addMBBs(InnerRegion);
- DEBUG(dbgs() << "Insert BB Select PHI (region)\n");
+ LLVM_DEBUG(dbgs() << "Insert BB Select PHI (region)\n");
insertMergePHI(IfBB, CodeExitBB, MergeBB, BBSelectRegOut, BBSelectRegIn,
CodeBBSelectReg);
@@ -2440,15 +2452,16 @@ void AMDGPUMachineCFGStructurizer::splitLoopPHI(MachineInstr &PHI,
MachineInstrBuilder MIB =
BuildMI(*EntrySucc, EntrySucc->instr_begin(), PHI.getDebugLoc(),
TII->get(TargetOpcode::PHI), NewDestReg);
- DEBUG(dbgs() << "Split Entry PHI " << printReg(NewDestReg, TRI) << " = PHI(");
+ LLVM_DEBUG(dbgs() << "Split Entry PHI " << printReg(NewDestReg, TRI)
+ << " = PHI(");
MIB.addReg(PHISource);
MIB.addMBB(Entry);
- DEBUG(dbgs() << printReg(PHISource, TRI) << ", "
- << printMBBReference(*Entry));
+ LLVM_DEBUG(dbgs() << printReg(PHISource, TRI) << ", "
+ << printMBBReference(*Entry));
MIB.addReg(RegionSourceReg);
MIB.addMBB(RegionSourceMBB);
- DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", "
- << printMBBReference(*RegionSourceMBB) << ")\n");
+ LLVM_DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", "
+ << printMBBReference(*RegionSourceMBB) << ")\n");
}
void AMDGPUMachineCFGStructurizer::splitLoopPHIs(MachineBasicBlock *Entry,
@@ -2481,7 +2494,8 @@ AMDGPUMachineCFGStructurizer::splitExit(LinearizedRegion *LRegion) {
LRegion->addMBB(NewExit);
LRegion->setExit(NewExit);
- DEBUG(dbgs() << "Created new exit block: " << NewExit->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Created new exit block: " << NewExit->getNumber()
+ << "\n");
// Replace any PHI Predecessors in the successor with NewExit
for (auto &II : *Succ) {
@@ -2529,9 +2543,9 @@ AMDGPUMachineCFGStructurizer::splitEntry(LinearizedRegion *LRegion) {
MachineBasicBlock *EntrySucc = split(Entry->getFirstNonPHI());
MachineBasicBlock *Exit = LRegion->getExit();
- DEBUG(dbgs() << "Split " << printMBBReference(*Entry) << " to "
- << printMBBReference(*Entry) << " -> "
- << printMBBReference(*EntrySucc) << "\n");
+ LLVM_DEBUG(dbgs() << "Split " << printMBBReference(*Entry) << " to "
+ << printMBBReference(*Entry) << " -> "
+ << printMBBReference(*EntrySucc) << "\n");
LRegion->addMBB(EntrySucc);
// Make the backedge go to Entry Succ
@@ -2622,21 +2636,21 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
rewriteRegionExitPHIs(Region, LastMerge, LRegion);
removeOldExitPreds(Region);
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(PHIInfo.dump(MRI));
SetVector<MRT *> *Children = Region->getChildren();
- DEBUG(dbgs() << "===========If Region Start===============\n");
+ LLVM_DEBUG(dbgs() << "===========If Region Start===============\n");
if (LRegion->getHasLoop()) {
- DEBUG(dbgs() << "Has Backedge: Yes\n");
+ LLVM_DEBUG(dbgs() << "Has Backedge: Yes\n");
} else {
- DEBUG(dbgs() << "Has Backedge: No\n");
+ LLVM_DEBUG(dbgs() << "Has Backedge: No\n");
}
unsigned BBSelectRegIn;
unsigned BBSelectRegOut;
for (auto CI = Children->begin(), CE = Children->end(); CI != CE; ++CI) {
- DEBUG(dbgs() << "CurrentRegion: \n");
- DEBUG(LRegion->print(dbgs(), TRI));
+ LLVM_DEBUG(dbgs() << "CurrentRegion: \n");
+ LLVM_DEBUG(LRegion->print(dbgs(), TRI));
auto CNI = CI;
++CNI;
@@ -2650,9 +2664,9 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
// We found the block is the exit of an inner region, we need
// to put it in the current linearized region.
- DEBUG(dbgs() << "Linearizing region: ");
- DEBUG(InnerLRegion->print(dbgs(), TRI));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Linearizing region: ");
+ LLVM_DEBUG(InnerLRegion->print(dbgs(), TRI));
+ LLVM_DEBUG(dbgs() << "\n");
MachineBasicBlock *InnerEntry = InnerLRegion->getEntry();
if ((&(*(InnerEntry->getParent()->begin()))) == InnerEntry) {
@@ -2670,10 +2684,10 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
BBSelectRegOut = Child->getBBSelectRegOut();
BBSelectRegIn = Child->getBBSelectRegIn();
- DEBUG(dbgs() << "BBSelectRegIn: " << printReg(BBSelectRegIn, TRI)
- << "\n");
- DEBUG(dbgs() << "BBSelectRegOut: " << printReg(BBSelectRegOut, TRI)
- << "\n");
+ LLVM_DEBUG(dbgs() << "BBSelectRegIn: " << printReg(BBSelectRegIn, TRI)
+ << "\n");
+ LLVM_DEBUG(dbgs() << "BBSelectRegOut: " << printReg(BBSelectRegOut, TRI)
+ << "\n");
MachineBasicBlock *IfEnd = CurrentMerge;
CurrentMerge = createIfRegion(CurrentMerge, InnerLRegion, LRegion,
@@ -2682,7 +2696,7 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
TII->convertNonUniformIfRegion(CurrentMerge, IfEnd);
} else {
MachineBasicBlock *MBB = Child->getMBBMRT()->getMBB();
- DEBUG(dbgs() << "Linearizing block: " << MBB->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Linearizing block: " << MBB->getNumber() << "\n");
if (MBB == getSingleExitNode(*(MBB->getParent()))) {
// If this is the exit block then we need to skip to the next.
@@ -2694,10 +2708,10 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
BBSelectRegOut = Child->getBBSelectRegOut();
BBSelectRegIn = Child->getBBSelectRegIn();
- DEBUG(dbgs() << "BBSelectRegIn: " << printReg(BBSelectRegIn, TRI)
- << "\n");
- DEBUG(dbgs() << "BBSelectRegOut: " << printReg(BBSelectRegOut, TRI)
- << "\n");
+ LLVM_DEBUG(dbgs() << "BBSelectRegIn: " << printReg(BBSelectRegIn, TRI)
+ << "\n");
+ LLVM_DEBUG(dbgs() << "BBSelectRegOut: " << printReg(BBSelectRegOut, TRI)
+ << "\n");
MachineBasicBlock *IfEnd = CurrentMerge;
// This is a basic block that is not part of an inner region, we
@@ -2708,7 +2722,7 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
TII->convertNonUniformIfRegion(CurrentMerge, IfEnd);
}
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(PHIInfo.dump(MRI));
}
}
@@ -2729,7 +2743,7 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
NewInReg, Region->getEntry()->getNumber());
// Need to be careful about updating the registers inside the region.
LRegion->replaceRegisterInsideRegion(InReg, InnerSelectReg, false, MRI);
- DEBUG(dbgs() << "Loop BBSelect Merge PHI:\n");
+ LLVM_DEBUG(dbgs() << "Loop BBSelect Merge PHI:\n");
insertMergePHI(LRegion->getEntry(), LRegion->getExit(), NewSucc,
InnerSelectReg, NewInReg,
LRegion->getRegionMRT()->getInnerOutputRegister());
@@ -2741,11 +2755,11 @@ bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
TII->insertReturn(*LastMerge);
}
- DEBUG(Region->getEntry()->getParent()->dump());
- DEBUG(LRegion->print(dbgs(), TRI));
- DEBUG(PHIInfo.dump(MRI));
+ LLVM_DEBUG(Region->getEntry()->getParent()->dump());
+ LLVM_DEBUG(LRegion->print(dbgs(), TRI));
+ LLVM_DEBUG(PHIInfo.dump(MRI));
- DEBUG(dbgs() << "===========If Region End===============\n");
+ LLVM_DEBUG(dbgs() << "===========If Region End===============\n");
Region->setLinearizedRegion(LRegion);
return true;
@@ -2785,12 +2799,12 @@ bool AMDGPUMachineCFGStructurizer::structurizeRegions(RegionMRT *Region,
}
void AMDGPUMachineCFGStructurizer::initFallthroughMap(MachineFunction &MF) {
- DEBUG(dbgs() << "Fallthrough Map:\n");
+ LLVM_DEBUG(dbgs() << "Fallthrough Map:\n");
for (auto &MBBI : MF) {
MachineBasicBlock *MBB = MBBI.getFallThrough();
if (MBB != nullptr) {
- DEBUG(dbgs() << "Fallthrough: " << MBBI.getNumber() << " -> "
- << MBB->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Fallthrough: " << MBBI.getNumber() << " -> "
+ << MBB->getNumber() << "\n");
}
FallthroughMap[&MBBI] = MBB;
}
@@ -2801,8 +2815,8 @@ void AMDGPUMachineCFGStructurizer::createLinearizedRegion(RegionMRT *Region,
LinearizedRegion *LRegion = new LinearizedRegion();
if (SelectOut) {
LRegion->addLiveOut(SelectOut);
- DEBUG(dbgs() << "Add LiveOut (BBSelect): " << printReg(SelectOut, TRI)
- << "\n");
+ LLVM_DEBUG(dbgs() << "Add LiveOut (BBSelect): " << printReg(SelectOut, TRI)
+ << "\n");
}
LRegion->setRegionMRT(Region);
Region->setLinearizedRegion(LRegion);
@@ -2864,19 +2878,19 @@ bool AMDGPUMachineCFGStructurizer::runOnMachineFunction(MachineFunction &MF) {
initFallthroughMap(MF);
checkRegOnlyPHIInputs(MF);
- DEBUG(dbgs() << "----STRUCTURIZER START----\n");
- DEBUG(MF.dump());
+ LLVM_DEBUG(dbgs() << "----STRUCTURIZER START----\n");
+ LLVM_DEBUG(MF.dump());
Regions = &(getAnalysis<MachineRegionInfoPass>().getRegionInfo());
- DEBUG(Regions->dump());
+ LLVM_DEBUG(Regions->dump());
RegionMRT *RTree = MRT::buildMRT(MF, Regions, TII, MRI);
setRegionMRT(RTree);
initializeSelectRegisters(RTree, 0, MRI, TII);
- DEBUG(RTree->dump(TRI));
+ LLVM_DEBUG(RTree->dump(TRI));
bool result = structurizeRegions(RTree, true);
delete RTree;
- DEBUG(dbgs() << "----STRUCTURIZER END----\n");
+ LLVM_DEBUG(dbgs() << "----STRUCTURIZER END----\n");
initFallthroughMap(MF);
return result;
}
diff --git a/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp b/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp
index 265104a86438..4ff6c6e01c6b 100644
--- a/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp
@@ -114,7 +114,7 @@ bool AMDGPUOpenCLEnqueuedBlockLowering::runOnModule(Module &M) {
M.getDataLayout());
F.setName(Name);
}
- DEBUG(dbgs() << "found enqueued kernel: " << F.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "found enqueued kernel: " << F.getName() << '\n');
auto RuntimeHandle = (F.getName() + ".runtime_handle").str();
auto T = Type::getInt8Ty(C)->getPointerTo(AMDGPUAS::GLOBAL_ADDRESS);
auto *GV = new GlobalVariable(
@@ -124,7 +124,7 @@ bool AMDGPUOpenCLEnqueuedBlockLowering::runOnModule(Module &M) {
/*InsertBefore=*/nullptr, GlobalValue::NotThreadLocal,
AMDGPUAS::GLOBAL_ADDRESS,
/*IsExternallyInitialized=*/false);
- DEBUG(dbgs() << "runtime handle created: " << *GV << '\n');
+ LLVM_DEBUG(dbgs() << "runtime handle created: " << *GV << '\n');
for (auto U : F.users()) {
auto *UU = &*U;
@@ -145,7 +145,7 @@ bool AMDGPUOpenCLEnqueuedBlockLowering::runOnModule(Module &M) {
if (F->getCallingConv() != CallingConv::AMDGPU_KERNEL)
continue;
F->addFnAttr("calls-enqueue-kernel");
- DEBUG(dbgs() << "mark enqueue_kernel caller:" << F->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "mark enqueue_kernel caller:" << F->getName() << '\n');
}
return Changed;
}
diff --git a/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 7d28c3c82599..9264cb11fb8a 100644
--- a/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -343,13 +343,13 @@ static bool canVectorizeInst(Instruction *Inst, User *User) {
static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) {
if (DisablePromoteAllocaToVector) {
- DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
+ LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
return false;
}
ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
- DEBUG(dbgs() << "Alloca candidate for vectorization\n");
+ LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
// FIXME: There is no reason why we can't support larger arrays, we
// are just being conservative for now.
@@ -359,7 +359,7 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) {
AllocaTy->getNumElements() > 16 ||
AllocaTy->getNumElements() < 2 ||
!VectorType::isValidElementType(AllocaTy->getElementType())) {
- DEBUG(dbgs() << " Cannot convert type to vector\n");
+ LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
return false;
}
@@ -380,7 +380,8 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) {
// If we can't compute a vector index from this GEP, then we can't
// promote this alloca to vector.
if (!Index) {
- DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP << '\n');
+ LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
+ << '\n');
return false;
}
@@ -395,8 +396,8 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) {
VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
- DEBUG(dbgs() << " Converting alloca to vector "
- << *AllocaTy << " -> " << *VectorTy << '\n');
+ LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
+ << *VectorTy << '\n');
for (Value *V : WorkList) {
Instruction *Inst = cast<Instruction>(V);
@@ -485,7 +486,8 @@ bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
// important part is both must have the same address space at
// the end.
if (OtherObj != BaseAlloca) {
- DEBUG(dbgs() << "Found a binary instruction with another alloca object\n");
+ LLVM_DEBUG(
+ dbgs() << "Found a binary instruction with another alloca object\n");
return false;
}
@@ -607,8 +609,8 @@ bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
if (PtrTy && PtrTy->getAddressSpace() == AS.LOCAL_ADDRESS) {
LocalMemLimit = 0;
- DEBUG(dbgs() << "Function has local memory argument. Promoting to "
- "local memory disabled.\n");
+ LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
+ "local memory disabled.\n");
return false;
}
}
@@ -677,13 +679,12 @@ bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
LocalMemLimit = MaxSizeWithWaveCount;
- DEBUG(
- dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n"
- << " Rounding size to " << MaxSizeWithWaveCount
- << " with a maximum occupancy of " << MaxOccupancy << '\n'
- << " and " << (LocalMemLimit - CurrentLocalMemUsage)
- << " available for promotion\n"
- );
+ LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
+ << " bytes of LDS\n"
+ << " Rounding size to " << MaxSizeWithWaveCount
+ << " with a maximum occupancy of " << MaxOccupancy << '\n'
+ << " and " << (LocalMemLimit - CurrentLocalMemUsage)
+ << " available for promotion\n");
return true;
}
@@ -700,7 +701,7 @@ bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
// First try to replace the alloca with a vector
Type *AllocaTy = I.getAllocatedType();
- DEBUG(dbgs() << "Trying to promote " << I << '\n');
+ LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
if (tryPromoteAllocaToVector(&I, AS))
return true; // Promoted to vector.
@@ -716,7 +717,9 @@ bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
case CallingConv::SPIR_KERNEL:
break;
default:
- DEBUG(dbgs() << " promote alloca to LDS not supported with calling convention.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " promote alloca to LDS not supported with calling convention.\n");
return false;
}
@@ -745,8 +748,8 @@ bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
NewSize += AllocSize;
if (NewSize > LocalMemLimit) {
- DEBUG(dbgs() << " " << AllocSize
- << " bytes of local memory not available to promote\n");
+ LLVM_DEBUG(dbgs() << " " << AllocSize
+ << " bytes of local memory not available to promote\n");
return false;
}
@@ -755,11 +758,11 @@ bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
std::vector<Value*> WorkList;
if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
- DEBUG(dbgs() << " Do not know how to convert all uses\n");
+ LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
return false;
}
- DEBUG(dbgs() << "Promoting alloca to local memory\n");
+ LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
Function *F = I.getParent()->getParent();
diff --git a/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
index 83e56a9ab495..a861762a8c9e 100644
--- a/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
+++ b/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
@@ -249,8 +249,8 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
SmallVector<Argument *, 4> OutArgs;
for (Argument &Arg : F.args()) {
if (isOutArgumentCandidate(Arg)) {
- DEBUG(dbgs() << "Found possible out argument " << Arg
- << " in function " << F.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Found possible out argument " << Arg
+ << " in function " << F.getName() << '\n');
OutArgs.push_back(&Arg);
}
}
@@ -310,7 +310,7 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
SI = dyn_cast<StoreInst>(Q.getInst());
if (SI) {
- DEBUG(dbgs() << "Found out argument store: " << *SI << '\n');
+ LLVM_DEBUG(dbgs() << "Found out argument store: " << *SI << '\n');
ReplaceableStores.emplace_back(RI, SI);
} else {
ThisReplaceable = false;
@@ -328,7 +328,8 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
if (llvm::find_if(ValVec,
[OutArg](const std::pair<Argument *, Value *> &Entry) {
return Entry.first == OutArg;}) != ValVec.end()) {
- DEBUG(dbgs() << "Saw multiple out arg stores" << *OutArg << '\n');
+ LLVM_DEBUG(dbgs()
+ << "Saw multiple out arg stores" << *OutArg << '\n');
// It is possible to see stores to the same argument multiple times,
// but we expect these would have been optimized out already.
ThisReplaceable = false;
@@ -358,7 +359,7 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
F.getFunctionType()->params(),
F.isVarArg());
- DEBUG(dbgs() << "Computed new return type: " << *NewRetTy << '\n');
+ LLVM_DEBUG(dbgs() << "Computed new return type: " << *NewRetTy << '\n');
Function *NewFunc = Function::Create(NewFuncTy, Function::PrivateLinkage,
F.getName() + ".body");
diff --git a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 3ea7a82ea7a0..ce17f027b522 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -124,8 +124,9 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
continue;
if (dependsOnLocalPhi(L, Br->getCondition())) {
UP.Threshold += UnrollThresholdIf;
- DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
- << " for loop:\n" << *L << " due to " << *Br << '\n');
+ LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
+ << " for loop:\n"
+ << *L << " due to " << *Br << '\n');
if (UP.Threshold >= MaxBoost)
return;
}
@@ -201,8 +202,9 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
// Don't use the maximum allowed value here as it will make some
// programs way too big.
UP.Threshold = Threshold;
- DEBUG(dbgs() << "Set unroll threshold " << Threshold << " for loop:\n"
- << *L << " due to " << *GEP << '\n');
+ LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
+ << " for loop:\n"
+ << *L << " due to " << *GEP << '\n');
if (UP.Threshold >= MaxBoost)
return;
}
diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
index 1793de63b598..cd1801a82446 100644
--- a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
@@ -78,23 +78,18 @@ namespace {
//
//===----------------------------------------------------------------------===//
-#define SHOWNEWINSTR(i) \
- DEBUG(dbgs() << "New instr: " << *i << "\n");
-
-#define SHOWNEWBLK(b, msg) \
-DEBUG( \
- dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
- dbgs() << "\n"; \
-);
-
-#define SHOWBLK_DETAIL(b, msg) \
-DEBUG( \
- if (b) { \
- dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
- b->print(dbgs()); \
- dbgs() << "\n"; \
- } \
-);
+#define SHOWNEWINSTR(i) LLVM_DEBUG(dbgs() << "New instr: " << *i << "\n");
+
+#define SHOWNEWBLK(b, msg) \
+ LLVM_DEBUG(dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+ dbgs() << "\n";);
+
+#define SHOWBLK_DETAIL(b, msg) \
+ LLVM_DEBUG(if (b) { \
+ dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+ b->print(dbgs()); \
+ dbgs() << "\n"; \
+ });
#define INVALIDSCCNUM -1
@@ -158,19 +153,19 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override {
TII = MF.getSubtarget<R600Subtarget>().getInstrInfo();
TRI = &TII->getRegisterInfo();
- DEBUG(MF.dump(););
+ LLVM_DEBUG(MF.dump(););
OrderedBlks.clear();
Visited.clear();
FuncRep = &MF;
MLI = &getAnalysis<MachineLoopInfo>();
- DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
+ LLVM_DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
MDT = &getAnalysis<MachineDominatorTree>();
- DEBUG(MDT->print(dbgs(), (const Module*)nullptr););
+ LLVM_DEBUG(MDT->print(dbgs(), (const Module *)nullptr););
PDT = &getAnalysis<MachinePostDominatorTree>();
- DEBUG(PDT->print(dbgs()););
+ LLVM_DEBUG(PDT->print(dbgs()););
prepare();
run();
- DEBUG(MF.dump(););
+ LLVM_DEBUG(MF.dump(););
return true;
}
@@ -650,9 +645,8 @@ bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
if (MI)
assert(IsReturn);
else if (IsReturn)
- DEBUG(
- dbgs() << "BB" << MBB->getNumber()
- <<" is return block without RETURN instr\n";);
+ LLVM_DEBUG(dbgs() << "BB" << MBB->getNumber()
+ << " is return block without RETURN instr\n";);
return IsReturn;
}
@@ -714,7 +708,7 @@ bool AMDGPUCFGStructurizer::prepare() {
//FIXME: if not reducible flow graph, make it so ???
- DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";);
+ LLVM_DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";);
orderBlocks(FuncRep);
@@ -757,14 +751,14 @@ bool AMDGPUCFGStructurizer::prepare() {
bool AMDGPUCFGStructurizer::run() {
//Assume reducible CFG...
- DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n");
+ LLVM_DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n");
#ifdef STRESSTEST
//Use the worse block ordering to test the algorithm.
ReverseVector(orderedBlks);
#endif
- DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks(););
+ LLVM_DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks(););
int NumIter = 0;
bool Finish = false;
MachineBasicBlock *MBB;
@@ -774,10 +768,8 @@ bool AMDGPUCFGStructurizer::run() {
do {
++NumIter;
- DEBUG(
- dbgs() << "numIter = " << NumIter
- << ", numRemaintedBlk = " << NumRemainedBlk << "\n";
- );
+ LLVM_DEBUG(dbgs() << "numIter = " << NumIter
+ << ", numRemaintedBlk = " << NumRemainedBlk << "\n";);
SmallVectorImpl<MachineBasicBlock *>::const_iterator It =
OrderedBlks.begin();
@@ -799,10 +791,8 @@ bool AMDGPUCFGStructurizer::run() {
SccBeginMBB = MBB;
SccNumIter = 0;
SccNumBlk = NumRemainedBlk; // Init to maximum possible number.
- DEBUG(
- dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB);
- dbgs() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB);
+ dbgs() << "\n";);
}
if (!isRetiredBlock(MBB))
@@ -817,20 +807,16 @@ bool AMDGPUCFGStructurizer::run() {
++SccNumIter;
int sccRemainedNumBlk = countActiveBlock(SccBeginIter, It);
if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= SccNumBlk) {
- DEBUG(
- dbgs() << "Can't reduce SCC " << getSCCNum(MBB)
- << ", sccNumIter = " << SccNumIter;
- dbgs() << "doesn't make any progress\n";
- );
+ LLVM_DEBUG(dbgs() << "Can't reduce SCC " << getSCCNum(MBB)
+ << ", sccNumIter = " << SccNumIter;
+ dbgs() << "doesn't make any progress\n";);
ContNextScc = true;
} else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < SccNumBlk) {
SccNumBlk = sccRemainedNumBlk;
It = SccBeginIter;
ContNextScc = false;
- DEBUG(
- dbgs() << "repeat processing SCC" << getSCCNum(MBB)
- << "sccNumIter = " << SccNumIter << '\n';
- );
+ LLVM_DEBUG(dbgs() << "repeat processing SCC" << getSCCNum(MBB)
+ << "sccNumIter = " << SccNumIter << '\n';);
} else {
// Finish the current scc.
ContNextScc = true;
@@ -848,9 +834,7 @@ bool AMDGPUCFGStructurizer::run() {
*GraphTraits<MachineFunction *>::nodes_begin(FuncRep);
if (EntryMBB->succ_size() == 0) {
Finish = true;
- DEBUG(
- dbgs() << "Reduce to one block\n";
- );
+ LLVM_DEBUG(dbgs() << "Reduce to one block\n";);
} else {
int NewnumRemainedBlk
= countActiveBlock(OrderedBlks.begin(), OrderedBlks.end());
@@ -860,9 +844,7 @@ bool AMDGPUCFGStructurizer::run() {
NumRemainedBlk = NewnumRemainedBlk;
} else {
MakeProgress = false;
- DEBUG(
- dbgs() << "No progress\n";
- );
+ LLVM_DEBUG(dbgs() << "No progress\n";);
}
}
} while (!Finish && MakeProgress);
@@ -875,9 +857,7 @@ bool AMDGPUCFGStructurizer::run() {
It != E; ++It) {
if ((*It).second && (*It).second->IsRetired) {
assert(((*It).first)->getNumber() != -1);
- DEBUG(
- dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n";);
(*It).first->eraseFromParent(); //Remove from the parent Function.
}
delete (*It).second;
@@ -886,7 +866,7 @@ bool AMDGPUCFGStructurizer::run() {
LLInfoMap.clear();
if (!Finish) {
- DEBUG(FuncRep->viewCFG());
+ LLVM_DEBUG(FuncRep->viewCFG());
report_fatal_error("IRREDUCIBLE_CFG");
}
@@ -920,17 +900,13 @@ int AMDGPUCFGStructurizer::patternMatch(MachineBasicBlock *MBB) {
int NumMatch = 0;
int CurMatch;
- DEBUG(
- dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n";);
while ((CurMatch = patternMatchGroup(MBB)) > 0)
NumMatch += CurMatch;
- DEBUG(
- dbgs() << "End patternMatch BB" << MBB->getNumber()
- << ", numMatch = " << NumMatch << "\n";
- );
+ LLVM_DEBUG(dbgs() << "End patternMatch BB" << MBB->getNumber()
+ << ", numMatch = " << NumMatch << "\n";);
return NumMatch;
}
@@ -1050,7 +1026,7 @@ int AMDGPUCFGStructurizer::loopendPatternMatch() {
for (MachineLoop *ExaminedLoop : NestedLoops) {
if (ExaminedLoop->getNumBlocks() == 0 || Visited[ExaminedLoop])
continue;
- DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump(););
+ LLVM_DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump(););
int NumBreak = mergeLoop(ExaminedLoop);
if (NumBreak == -1)
break;
@@ -1064,7 +1040,8 @@ int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
MBBVector ExitingMBBs;
LoopRep->getExitingBlocks(ExitingMBBs);
assert(!ExitingMBBs.empty() && "Infinite Loop not supported");
- DEBUG(dbgs() << "Loop has " << ExitingMBBs.size() << " exiting blocks\n";);
+ LLVM_DEBUG(dbgs() << "Loop has " << ExitingMBBs.size()
+ << " exiting blocks\n";);
// We assume a single ExitBlk
MBBVector ExitBlks;
LoopRep->getExitBlocks(ExitBlks);
@@ -1106,11 +1083,9 @@ bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak(
if (LoopRep&& LoopRep == MLI->getLoopFor(Src2MBB)) {
MachineBasicBlock *&TheEntry = LLInfoMap[LoopRep];
if (TheEntry) {
- DEBUG(
- dbgs() << "isLoopContBreakBlock yes src1 = BB"
- << Src1MBB->getNumber()
- << " src2 = BB" << Src2MBB->getNumber() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "isLoopContBreakBlock yes src1 = BB"
+ << Src1MBB->getNumber() << " src2 = BB"
+ << Src2MBB->getNumber() << "\n";);
return true;
}
}
@@ -1122,9 +1097,8 @@ int AMDGPUCFGStructurizer::handleJumpintoIf(MachineBasicBlock *HeadMBB,
MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
int Num = handleJumpintoIfImp(HeadMBB, TrueMBB, FalseMBB);
if (Num == 0) {
- DEBUG(
- dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
- );
+ LLVM_DEBUG(dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk"
+ << "\n";);
Num = handleJumpintoIfImp(HeadMBB, FalseMBB, TrueMBB);
}
return Num;
@@ -1138,22 +1112,16 @@ int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
//trueBlk could be the common post dominator
DownBlk = TrueMBB;
- DEBUG(
- dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber()
- << " true = BB" << TrueMBB->getNumber()
- << ", numSucc=" << TrueMBB->succ_size()
- << " false = BB" << FalseMBB->getNumber() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber()
+ << " true = BB" << TrueMBB->getNumber()
+ << ", numSucc=" << TrueMBB->succ_size() << " false = BB"
+ << FalseMBB->getNumber() << "\n";);
while (DownBlk) {
- DEBUG(
- dbgs() << "check down = BB" << DownBlk->getNumber();
- );
+ LLVM_DEBUG(dbgs() << "check down = BB" << DownBlk->getNumber(););
if (singlePathTo(FalseMBB, DownBlk) == SinglePath_InPath) {
- DEBUG(
- dbgs() << " working\n";
- );
+ LLVM_DEBUG(dbgs() << " working\n";);
Num += cloneOnSideEntryTo(HeadMBB, TrueMBB, DownBlk);
Num += cloneOnSideEntryTo(HeadMBB, FalseMBB, DownBlk);
@@ -1166,9 +1134,7 @@ int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
break;
}
- DEBUG(
- dbgs() << " not working\n";
- );
+ LLVM_DEBUG(dbgs() << " not working\n";);
DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : nullptr;
} // walk down the postDomTree
@@ -1247,10 +1213,9 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
if (!MigrateFalse && FalseMBB && FalseMBB->pred_size() > 1)
MigrateFalse = true;
- DEBUG(
- dbgs() << "before improveSimpleJumpintoIf: ";
- showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
- );
+ LLVM_DEBUG(
+ dbgs() << "before improveSimpleJumpintoIf: ";
+ showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0););
// org: headBlk => if () {trueBlk} else {falseBlk} => landBlk
//
@@ -1385,10 +1350,9 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
report_fatal_error("Extra register needed to handle CFG");
}
}
- DEBUG(
- dbgs() << "result from improveSimpleJumpintoIf: ";
- showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
- );
+ LLVM_DEBUG(
+ dbgs() << "result from improveSimpleJumpintoIf: ";
+ showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0););
// update landBlk
*LandMBBPtr = LandBlk;
@@ -1398,10 +1362,8 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB,
MachineBasicBlock *SrcMBB) {
- DEBUG(
- dbgs() << "serialPattern BB" << DstMBB->getNumber()
- << " <= BB" << SrcMBB->getNumber() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "serialPattern BB" << DstMBB->getNumber() << " <= BB"
+ << SrcMBB->getNumber() << "\n";);
DstMBB->splice(DstMBB->end(), SrcMBB, SrcMBB->begin(), SrcMBB->end());
DstMBB->removeSuccessor(SrcMBB, true);
@@ -1416,26 +1378,15 @@ void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI,
MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB) {
assert (TrueMBB);
- DEBUG(
- dbgs() << "ifPattern BB" << MBB->getNumber();
- dbgs() << "{ ";
- if (TrueMBB) {
- dbgs() << "BB" << TrueMBB->getNumber();
- }
- dbgs() << " } else ";
- dbgs() << "{ ";
- if (FalseMBB) {
- dbgs() << "BB" << FalseMBB->getNumber();
- }
- dbgs() << " }\n ";
- dbgs() << "landBlock: ";
- if (!LandMBB) {
- dbgs() << "NULL";
- } else {
- dbgs() << "BB" << LandMBB->getNumber();
- }
- dbgs() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "ifPattern BB" << MBB->getNumber(); dbgs() << "{ ";
+ if (TrueMBB) { dbgs() << "BB" << TrueMBB->getNumber(); } dbgs()
+ << " } else ";
+ dbgs() << "{ "; if (FalseMBB) {
+ dbgs() << "BB" << FalseMBB->getNumber();
+ } dbgs() << " }\n ";
+ dbgs() << "landBlock: "; if (!LandMBB) { dbgs() << "NULL"; } else {
+ dbgs() << "BB" << LandMBB->getNumber();
+ } dbgs() << "\n";);
int OldOpcode = BranchMI->getOpcode();
DebugLoc BranchDL = BranchMI->getDebugLoc();
@@ -1481,8 +1432,8 @@ void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI,
void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk,
MachineBasicBlock *LandMBB) {
- DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber()
- << " land = BB" << LandMBB->getNumber() << "\n";);
+ LLVM_DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber()
+ << " land = BB" << LandMBB->getNumber() << "\n";);
insertInstrBefore(DstBlk, AMDGPU::WHILELOOP, DebugLoc());
insertInstrEnd(DstBlk, AMDGPU::ENDLOOP, DebugLoc());
@@ -1491,8 +1442,9 @@ void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk,
void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
MachineBasicBlock *LandMBB) {
- DEBUG(dbgs() << "loopbreakPattern exiting = BB" << ExitingMBB->getNumber()
- << " land = BB" << LandMBB->getNumber() << "\n";);
+ LLVM_DEBUG(dbgs() << "loopbreakPattern exiting = BB"
+ << ExitingMBB->getNumber() << " land = BB"
+ << LandMBB->getNumber() << "\n";);
MachineInstr *BranchMI = getLoopendBlockBranchInstr(ExitingMBB);
assert(BranchMI && isCondBranch(BranchMI));
DebugLoc DL = BranchMI->getDebugLoc();
@@ -1511,9 +1463,9 @@ void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
void AMDGPUCFGStructurizer::settleLoopcontBlock(MachineBasicBlock *ContingMBB,
MachineBasicBlock *ContMBB) {
- DEBUG(dbgs() << "settleLoopcontBlock conting = BB"
- << ContingMBB->getNumber()
- << ", cont = BB" << ContMBB->getNumber() << "\n";);
+ LLVM_DEBUG(dbgs() << "settleLoopcontBlock conting = BB"
+ << ContingMBB->getNumber() << ", cont = BB"
+ << ContMBB->getNumber() << "\n";);
MachineInstr *MI = getLoopendBlockBranchInstr(ContingMBB);
if (MI) {
@@ -1587,10 +1539,9 @@ AMDGPUCFGStructurizer::cloneBlockForPredecessor(MachineBasicBlock *MBB,
numClonedInstr += MBB->size();
- DEBUG(
- dbgs() << "Cloned block: " << "BB"
- << MBB->getNumber() << "size " << MBB->size() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "Cloned block: "
+ << "BB" << MBB->getNumber() << "size " << MBB->size()
+ << "\n";);
SHOWNEWBLK(CloneMBB, "result of Cloned block: ");
@@ -1603,26 +1554,22 @@ void AMDGPUCFGStructurizer::migrateInstruction(MachineBasicBlock *SrcMBB,
//look for the input branchinstr, not the AMDGPU branchinstr
MachineInstr *BranchMI = getNormalBlockBranchInstr(SrcMBB);
if (!BranchMI) {
- DEBUG(
- dbgs() << "migrateInstruction don't see branch instr\n";
- );
+ LLVM_DEBUG(dbgs() << "migrateInstruction don't see branch instr\n";);
SpliceEnd = SrcMBB->end();
} else {
- DEBUG(dbgs() << "migrateInstruction see branch instr: " << *BranchMI);
+ LLVM_DEBUG(dbgs() << "migrateInstruction see branch instr: " << *BranchMI);
SpliceEnd = BranchMI;
}
- DEBUG(
- dbgs() << "migrateInstruction before splice dstSize = " << DstMBB->size()
- << "srcSize = " << SrcMBB->size() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "migrateInstruction before splice dstSize = "
+ << DstMBB->size() << "srcSize = " << SrcMBB->size()
+ << "\n";);
//splice insert before insertPos
DstMBB->splice(I, SrcMBB, SrcMBB->begin(), SpliceEnd);
- DEBUG(
- dbgs() << "migrateInstruction after splice dstSize = " << DstMBB->size()
- << "srcSize = " << SrcMBB->size() << '\n';
- );
+ LLVM_DEBUG(dbgs() << "migrateInstruction after splice dstSize = "
+ << DstMBB->size() << "srcSize = " << SrcMBB->size()
+ << '\n';);
}
MachineBasicBlock *
@@ -1640,7 +1587,7 @@ AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) {
MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
FuncRep->push_back(DummyExitBlk); //insert to function
SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
- DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
+ LLVM_DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
LLVMContext &Ctx = LoopHeader->getParent()->getFunction().getContext();
Ctx.emitError("Extra register needed to handle CFG");
return nullptr;
@@ -1653,7 +1600,7 @@ void AMDGPUCFGStructurizer::removeUnconditionalBranch(MachineBasicBlock *MBB) {
// test_fc_do_while_or.c need to fix the upstream on this to remove the loop.
while ((BranchMI = getLoopendBlockBranchInstr(MBB))
&& isUncondBranch(BranchMI)) {
- DEBUG(dbgs() << "Removing uncond branch instr: " << *BranchMI);
+ LLVM_DEBUG(dbgs() << "Removing uncond branch instr: " << *BranchMI);
BranchMI->eraseFromParent();
}
}
@@ -1669,7 +1616,7 @@ void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
assert(BranchMI && isCondBranch(BranchMI));
- DEBUG(dbgs() << "Removing unneeded cond branch instr: " << *BranchMI);
+ LLVM_DEBUG(dbgs() << "Removing unneeded cond branch instr: " << *BranchMI);
BranchMI->eraseFromParent();
SHOWNEWBLK(MBB1, "Removing redundant successor");
MBB->removeSuccessor(MBB1, true);
@@ -1688,10 +1635,8 @@ void AMDGPUCFGStructurizer::addDummyExitBlock(
if (MI)
MI->eraseFromParent();
MBB->addSuccessor(DummyExitBlk);
- DEBUG(
- dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber()
- << " successors\n";
- );
+ LLVM_DEBUG(dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber()
+ << " successors\n";);
}
SHOWNEWBLK(DummyExitBlk, "DummyExitBlock: ");
}
@@ -1710,9 +1655,7 @@ void AMDGPUCFGStructurizer::recordSccnum(MachineBasicBlock *MBB,
}
void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) {
- DEBUG(
- dbgs() << "Retiring BB" << MBB->getNumber() << "\n";
- );
+ LLVM_DEBUG(dbgs() << "Retiring BB" << MBB->getNumber() << "\n";);
BlockInformation *&SrcBlkInfo = BlockInfoMap[MBB];
diff --git a/lib/Target/AMDGPU/GCNILPSched.cpp b/lib/Target/AMDGPU/GCNILPSched.cpp
index ba8211b189cf..651091d44136 100644
--- a/lib/Target/AMDGPU/GCNILPSched.cpp
+++ b/lib/Target/AMDGPU/GCNILPSched.cpp
@@ -149,9 +149,9 @@ static int BUCompareLatency(const SUnit *left, const SUnit *right) {
int LDepth = left->getDepth();
int RDepth = right->getDepth();
if (LDepth != RDepth) {
- DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
- << ") depth " << LDepth << " vs SU (" << right->NodeNum
- << ") depth " << RDepth << "\n");
+ LLVM_DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
+ << ") depth " << LDepth << " vs SU (" << right->NodeNum
+ << ") depth " << RDepth << "\n");
return LDepth < RDepth ? 1 : -1;
}
if (left->Latency != right->Latency)
@@ -169,9 +169,9 @@ const SUnit *GCNILPScheduler::pickBest(const SUnit *left, const SUnit *right)
if (!DisableSchedCriticalPath) {
int spread = (int)left->getDepth() - (int)right->getDepth();
if (std::abs(spread) > MaxReorderWindow) {
- DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
- << left->getDepth() << " != SU(" << right->NodeNum << "): "
- << right->getDepth() << "\n");
+ LLVM_DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
+ << left->getDepth() << " != SU(" << right->NodeNum
+ << "): " << right->getDepth() << "\n");
return left->getDepth() < right->getDepth() ? right : left;
}
}
@@ -324,19 +324,18 @@ GCNILPScheduler::schedule(ArrayRef<const SUnit*> BotRoots,
if (AvailQueue.empty())
break;
- DEBUG(
- dbgs() << "\n=== Picking candidate\n"
- "Ready queue:";
- for (auto &C : AvailQueue)
- dbgs() << ' ' << C.SU->NodeNum;
- dbgs() << '\n';
- );
+ LLVM_DEBUG(dbgs() << "\n=== Picking candidate\n"
+ "Ready queue:";
+ for (auto &C
+ : AvailQueue) dbgs()
+ << ' ' << C.SU->NodeNum;
+ dbgs() << '\n';);
auto C = pickCandidate();
assert(C);
AvailQueue.remove(*C);
auto SU = C->SU;
- DEBUG(dbgs() << "Selected "; SU->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "Selected "; SU->dump(&DAG));
advanceToCycle(SU->getHeight());
diff --git a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index 182ce1e4f631..7f0d80f3d77e 100644
--- a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -200,8 +200,8 @@ public:
void schedule() {
assert(Sch.RegionBegin == Rgn.Begin && Sch.RegionEnd == Rgn.End);
- DEBUG(dbgs() << "\nScheduling ";
- printRegion(dbgs(), Rgn.Begin, Rgn.End, Sch.LIS, 2));
+ LLVM_DEBUG(dbgs() << "\nScheduling ";
+ printRegion(dbgs(), Rgn.Begin, Rgn.End, Sch.LIS, 2));
Sch.BaseClass::schedule();
// Unfortunatelly placeDebugValues incorrectly modifies RegionEnd, restore
@@ -311,14 +311,13 @@ void GCNIterativeScheduler::enterRegion(MachineBasicBlock *BB, // overriden
void GCNIterativeScheduler::schedule() { // overriden
// do nothing
- DEBUG(
- printLivenessInfo(dbgs(), RegionBegin, RegionEnd, LIS);
- if (!Regions.empty() && Regions.back()->Begin == RegionBegin) {
- dbgs() << "Max RP: ";
- Regions.back()->MaxPressure.print(dbgs(), &MF.getSubtarget<SISubtarget>());
- }
- dbgs() << '\n';
- );
+ LLVM_DEBUG(printLivenessInfo(dbgs(), RegionBegin, RegionEnd, LIS);
+ if (!Regions.empty() && Regions.back()->Begin == RegionBegin) {
+ dbgs() << "Max RP: ";
+ Regions.back()->MaxPressure.print(
+ dbgs(), &MF.getSubtarget<SISubtarget>());
+ } dbgs()
+ << '\n';);
}
void GCNIterativeScheduler::finalizeSchedule() { // overriden
@@ -453,22 +452,22 @@ unsigned GCNIterativeScheduler::tryMaximizeOccupancy(unsigned TargetOcc) {
// TODO: assert Regions are sorted descending by pressure
const auto &ST = MF.getSubtarget<SISubtarget>();
const auto Occ = Regions.front()->MaxPressure.getOccupancy(ST);
- DEBUG(dbgs() << "Trying to improve occupancy, target = " << TargetOcc
- << ", current = " << Occ << '\n');
+ LLVM_DEBUG(dbgs() << "Trying to improve occupancy, target = " << TargetOcc
+ << ", current = " << Occ << '\n');
auto NewOcc = TargetOcc;
for (auto R : Regions) {
if (R->MaxPressure.getOccupancy(ST) >= NewOcc)
break;
- DEBUG(printRegion(dbgs(), R->Begin, R->End, LIS, 3);
- printLivenessInfo(dbgs(), R->Begin, R->End, LIS));
+ LLVM_DEBUG(printRegion(dbgs(), R->Begin, R->End, LIS, 3);
+ printLivenessInfo(dbgs(), R->Begin, R->End, LIS));
BuildDAG DAG(*R, *this);
const auto MinSchedule = makeMinRegSchedule(DAG.getTopRoots(), *this);
const auto MaxRP = getSchedulePressure(*R, MinSchedule);
- DEBUG(dbgs() << "Occupancy improvement attempt:\n";
- printSchedRP(dbgs(), R->MaxPressure, MaxRP));
+ LLVM_DEBUG(dbgs() << "Occupancy improvement attempt:\n";
+ printSchedRP(dbgs(), R->MaxPressure, MaxRP));
NewOcc = std::min(NewOcc, MaxRP.getOccupancy(ST));
if (NewOcc <= Occ)
@@ -476,8 +475,8 @@ unsigned GCNIterativeScheduler::tryMaximizeOccupancy(unsigned TargetOcc) {
setBestSchedule(*R, MinSchedule, MaxRP);
}
- DEBUG(dbgs() << "New occupancy = " << NewOcc
- << ", prev occupancy = " << Occ << '\n');
+ LLVM_DEBUG(dbgs() << "New occupancy = " << NewOcc
+ << ", prev occupancy = " << Occ << '\n');
return std::max(NewOcc, Occ);
}
@@ -497,8 +496,9 @@ void GCNIterativeScheduler::scheduleLegacyMaxOccupancy(
const int NumPasses = Occ < TgtOcc ? 2 : 1;
TgtOcc = std::min(Occ, TgtOcc);
- DEBUG(dbgs() << "Scheduling using default scheduler, "
- "target occupancy = " << TgtOcc << '\n');
+ LLVM_DEBUG(dbgs() << "Scheduling using default scheduler, "
+ "target occupancy = "
+ << TgtOcc << '\n');
GCNMaxOccupancySchedStrategy LStrgy(Context);
for (int I = 0; I < NumPasses; ++I) {
@@ -510,16 +510,16 @@ void GCNIterativeScheduler::scheduleLegacyMaxOccupancy(
Ovr.schedule();
const auto RP = getRegionPressure(*R);
- DEBUG(printSchedRP(dbgs(), R->MaxPressure, RP));
+ LLVM_DEBUG(printSchedRP(dbgs(), R->MaxPressure, RP));
if (RP.getOccupancy(ST) < TgtOcc) {
- DEBUG(dbgs() << "Didn't fit into target occupancy O" << TgtOcc);
+ LLVM_DEBUG(dbgs() << "Didn't fit into target occupancy O" << TgtOcc);
if (R->BestSchedule.get() &&
R->BestSchedule->MaxPressure.getOccupancy(ST) >= TgtOcc) {
- DEBUG(dbgs() << ", scheduling minimal register\n");
+ LLVM_DEBUG(dbgs() << ", scheduling minimal register\n");
scheduleBest(*R);
} else {
- DEBUG(dbgs() << ", restoring\n");
+ LLVM_DEBUG(dbgs() << ", restoring\n");
Ovr.restoreOrder();
assert(R->MaxPressure.getOccupancy(ST) >= TgtOcc);
}
@@ -545,7 +545,7 @@ void GCNIterativeScheduler::scheduleMinReg(bool force) {
const auto MinSchedule = makeMinRegSchedule(DAG.getTopRoots(), *this);
const auto RP = getSchedulePressure(*R, MinSchedule);
- DEBUG(if (R->MaxPressure.less(ST, RP, TgtOcc)) {
+ LLVM_DEBUG(if (R->MaxPressure.less(ST, RP, TgtOcc)) {
dbgs() << "\nWarning: Pressure becomes worse after minreg!";
printSchedRP(dbgs(), R->MaxPressure, RP);
});
@@ -554,7 +554,7 @@ void GCNIterativeScheduler::scheduleMinReg(bool force) {
break;
scheduleRegion(*R, MinSchedule, RP);
- DEBUG(printSchedResult(dbgs(), R, RP));
+ LLVM_DEBUG(printSchedResult(dbgs(), R, RP));
MaxPressure = RP;
}
@@ -577,26 +577,27 @@ void GCNIterativeScheduler::scheduleILP(
Occ = tryMaximizeOccupancy(TgtOcc);
TgtOcc = std::min(Occ, TgtOcc);
- DEBUG(dbgs() << "Scheduling using default scheduler, "
- "target occupancy = " << TgtOcc << '\n');
+ LLVM_DEBUG(dbgs() << "Scheduling using default scheduler, "
+ "target occupancy = "
+ << TgtOcc << '\n');
for (auto R : Regions) {
BuildDAG DAG(*R, *this);
const auto ILPSchedule = makeGCNILPScheduler(DAG.getBottomRoots(), *this);
const auto RP = getSchedulePressure(*R, ILPSchedule);
- DEBUG(printSchedRP(dbgs(), R->MaxPressure, RP));
+ LLVM_DEBUG(printSchedRP(dbgs(), R->MaxPressure, RP));
if (RP.getOccupancy(ST) < TgtOcc) {
- DEBUG(dbgs() << "Didn't fit into target occupancy O" << TgtOcc);
+ LLVM_DEBUG(dbgs() << "Didn't fit into target occupancy O" << TgtOcc);
if (R->BestSchedule.get() &&
R->BestSchedule->MaxPressure.getOccupancy(ST) >= TgtOcc) {
- DEBUG(dbgs() << ", scheduling minimal register\n");
+ LLVM_DEBUG(dbgs() << ", scheduling minimal register\n");
scheduleBest(*R);
}
} else {
scheduleRegion(*R, ILPSchedule, RP);
- DEBUG(printSchedResult(dbgs(), R, RP));
+ LLVM_DEBUG(printSchedResult(dbgs(), R, RP));
}
}
}
diff --git a/lib/Target/AMDGPU/GCNMinRegStrategy.cpp b/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
index 9904b5f0f4ba..192d534bb9cf 100644
--- a/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
+++ b/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
@@ -142,35 +142,38 @@ GCNMinRegScheduler::Candidate* GCNMinRegScheduler::pickCandidate() {
unsigned Num = RQ.size();
if (Num == 1) break;
- DEBUG(dbgs() << "\nSelecting max priority candidates among " << Num << '\n');
+ LLVM_DEBUG(dbgs() << "\nSelecting max priority candidates among " << Num
+ << '\n');
Num = findMax(Num, [=](const Candidate &C) { return C.Priority; });
if (Num == 1) break;
- DEBUG(dbgs() << "\nSelecting min non-ready producing candidate among "
- << Num << '\n');
+ LLVM_DEBUG(dbgs() << "\nSelecting min non-ready producing candidate among "
+ << Num << '\n');
Num = findMax(Num, [=](const Candidate &C) {
auto SU = C.SU;
int Res = getNotReadySuccessors(SU);
- DEBUG(dbgs() << "SU(" << SU->NodeNum << ") would left non-ready "
- << Res << " successors, metric = " << -Res << '\n');
+ LLVM_DEBUG(dbgs() << "SU(" << SU->NodeNum << ") would left non-ready "
+ << Res << " successors, metric = " << -Res << '\n');
return -Res;
});
if (Num == 1) break;
- DEBUG(dbgs() << "\nSelecting most producing candidate among "
- << Num << '\n');
+ LLVM_DEBUG(dbgs() << "\nSelecting most producing candidate among " << Num
+ << '\n');
Num = findMax(Num, [=](const Candidate &C) {
auto SU = C.SU;
auto Res = getReadySuccessors(SU);
- DEBUG(dbgs() << "SU(" << SU->NodeNum << ") would make ready "
- << Res << " successors, metric = " << Res << '\n');
+ LLVM_DEBUG(dbgs() << "SU(" << SU->NodeNum << ") would make ready " << Res
+ << " successors, metric = " << Res << '\n');
return Res;
});
if (Num == 1) break;
Num = Num ? Num : RQ.size();
- DEBUG(dbgs() << "\nCan't find best candidate, selecting in program order among "
- << Num << '\n');
+ LLVM_DEBUG(
+ dbgs()
+ << "\nCan't find best candidate, selecting in program order among "
+ << Num << '\n');
Num = findMax(Num, [=](const Candidate &C) { return -(int64_t)C.SU->NodeNum; });
assert(Num == 1);
} while (false);
@@ -202,17 +205,17 @@ void GCNMinRegScheduler::bumpPredsPriority(const SUnit *SchedSU, int Priority) {
Worklist.push_back(P.getSUnit());
}
}
- DEBUG(dbgs() << "Make the predecessors of SU(" << SchedSU->NodeNum
- << ")'s non-ready successors of " << Priority
- << " priority in ready queue: ");
+ LLVM_DEBUG(dbgs() << "Make the predecessors of SU(" << SchedSU->NodeNum
+ << ")'s non-ready successors of " << Priority
+ << " priority in ready queue: ");
const auto SetEnd = Set.end();
for (auto &C : RQ) {
if (Set.find(C.SU) != SetEnd) {
C.Priority = Priority;
- DEBUG(dbgs() << " SU(" << C.SU->NodeNum << ')');
+ LLVM_DEBUG(dbgs() << " SU(" << C.SU->NodeNum << ')');
}
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
void GCNMinRegScheduler::releaseSuccessors(const SUnit* SU, int Priority) {
@@ -243,19 +246,19 @@ GCNMinRegScheduler::schedule(ArrayRef<const SUnit*> TopRoots,
releaseSuccessors(&DAG.EntrySU, StepNo);
while (!RQ.empty()) {
- DEBUG(
- dbgs() << "\n=== Picking candidate, Step = " << StepNo << "\n"
- "Ready queue:";
- for (auto &C : RQ)
- dbgs() << ' ' << C.SU->NodeNum << "(P" << C.Priority << ')';
- dbgs() << '\n';
- );
+ LLVM_DEBUG(dbgs() << "\n=== Picking candidate, Step = " << StepNo
+ << "\n"
+ "Ready queue:";
+ for (auto &C
+ : RQ) dbgs()
+ << ' ' << C.SU->NodeNum << "(P" << C.Priority << ')';
+ dbgs() << '\n';);
auto C = pickCandidate();
assert(C);
RQ.remove(*C);
auto SU = C->SU;
- DEBUG(dbgs() << "Selected "; SU->dump(&DAG));
+ LLVM_DEBUG(dbgs() << "Selected "; SU->dump(&DAG));
releaseSuccessors(SU, StepNo);
Schedule.push_back(SU);
diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 12305446fa4a..c0a6765df34e 100644
--- a/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -200,34 +200,30 @@ SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
// See if BotCand is still valid (because we previously scheduled from Top).
- DEBUG(dbgs() << "Picking from Bot:\n");
+ LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
if (!BotCand.isValid() || BotCand.SU->isScheduled ||
BotCand.Policy != BotPolicy) {
BotCand.reset(CandPolicy());
pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
assert(BotCand.Reason != NoCand && "failed to find the first candidate");
} else {
- DEBUG(traceCandidate(BotCand));
+ LLVM_DEBUG(traceCandidate(BotCand));
}
// Check if the top Q has a better candidate.
- DEBUG(dbgs() << "Picking from Top:\n");
+ LLVM_DEBUG(dbgs() << "Picking from Top:\n");
if (!TopCand.isValid() || TopCand.SU->isScheduled ||
TopCand.Policy != TopPolicy) {
TopCand.reset(CandPolicy());
pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
assert(TopCand.Reason != NoCand && "failed to find the first candidate");
} else {
- DEBUG(traceCandidate(TopCand));
+ LLVM_DEBUG(traceCandidate(TopCand));
}
// Pick best from BotCand and TopCand.
- DEBUG(
- dbgs() << "Top Cand: ";
- traceCandidate(TopCand);
- dbgs() << "Bot Cand: ";
- traceCandidate(BotCand);
- );
+ LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);
+ dbgs() << "Bot Cand: "; traceCandidate(BotCand););
SchedCandidate Cand;
if (TopCand.Reason == BotCand.Reason) {
Cand = BotCand;
@@ -256,10 +252,7 @@ SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
}
}
}
- DEBUG(
- dbgs() << "Picking: ";
- traceCandidate(Cand);
- );
+ LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand););
IsTopNode = Cand.AtTop;
return Cand.SU;
@@ -305,7 +298,8 @@ SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
if (SU->isBottomReady())
Bot.removeReady(SU);
- DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
+ LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
+ << *SU->getInstr());
return SU;
}
@@ -319,7 +313,7 @@ GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
MFI.getMaxWavesPerEU())),
MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) {
- DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
+ LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
}
void GCNScheduleDAGMILive::schedule() {
@@ -339,12 +333,12 @@ void GCNScheduleDAGMILive::schedule() {
if (LIS) {
PressureBefore = Pressure[RegionIdx];
- DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
- GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
- dbgs() << "Region live-in pressure: ";
- llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
- dbgs() << "Region register pressure: ";
- PressureBefore.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
+ GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
+ dbgs() << "Region live-in pressure: ";
+ llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
+ dbgs() << "Region register pressure: ";
+ PressureBefore.print(dbgs()));
}
ScheduleDAGMILive::schedule();
@@ -357,12 +351,13 @@ void GCNScheduleDAGMILive::schedule() {
GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
auto PressureAfter = getRealRegPressure();
- DEBUG(dbgs() << "Pressure after scheduling: "; PressureAfter.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
+ PressureAfter.print(dbgs()));
if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
PressureAfter.getVGPRNum() <= S.VGPRCriticalLimit) {
Pressure[RegionIdx] = PressureAfter;
- DEBUG(dbgs() << "Pressure in desired limits, done.\n");
+ LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
return;
}
unsigned WavesAfter = getMaxWaves(PressureAfter.getSGPRNum(),
@@ -371,16 +366,16 @@ void GCNScheduleDAGMILive::schedule() {
PressureBefore.getVGPRNum(), MF);
WavesAfter = std::min(WavesAfter, MFI.getMaxWavesPerEU());
WavesBefore = std::min(WavesBefore, MFI.getMaxWavesPerEU());
- DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore <<
- ", after " << WavesAfter << ".\n");
+ LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
+ << ", after " << WavesAfter << ".\n");
// We could not keep current target occupancy because of the just scheduled
// region. Record new occupancy for next scheduling cycle.
unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
if (NewOccupancy < MinOccupancy) {
MinOccupancy = NewOccupancy;
- DEBUG(dbgs() << "Occupancy lowered for the function to "
- << MinOccupancy << ".\n");
+ LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
+ << MinOccupancy << ".\n");
}
if (WavesAfter >= WavesBefore) {
@@ -388,7 +383,7 @@ void GCNScheduleDAGMILive::schedule() {
return;
}
- DEBUG(dbgs() << "Attempting to revert scheduling.\n");
+ LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
RegionEnd = RegionBegin;
for (MachineInstr *MI : Unsched) {
if (MI->isDebugInstr())
@@ -418,7 +413,7 @@ void GCNScheduleDAGMILive::schedule() {
}
RegionEnd = MI->getIterator();
++RegionEnd;
- DEBUG(dbgs() << "Scheduling " << *MI);
+ LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
}
RegionBegin = Unsched.front()->getIterator();
Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
@@ -493,7 +488,7 @@ void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
void GCNScheduleDAGMILive::finalizeSchedule() {
GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
- DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
+ LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
LiveIns.resize(Regions.size());
Pressure.resize(Regions.size());
@@ -512,9 +507,10 @@ void GCNScheduleDAGMILive::finalizeSchedule() {
if (!LIS || StartingOccupancy <= MinOccupancy)
break;
- DEBUG(dbgs()
- << "Retrying function scheduling with lowest recorded occupancy "
- << MinOccupancy << ".\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Retrying function scheduling with lowest recorded occupancy "
+ << MinOccupancy << ".\n");
S.setTargetOccupancy(MinOccupancy);
}
@@ -540,12 +536,13 @@ void GCNScheduleDAGMILive::finalizeSchedule() {
continue;
}
- DEBUG(dbgs() << "********** MI Scheduling **********\n");
- DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
- << MBB->getName() << "\n From: " << *begin() << " To: ";
- if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
- else dbgs() << "End";
- dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
+ LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
+ << MBB->getName() << "\n From: " << *begin()
+ << " To: ";
+ if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
+ else dbgs() << "End";
+ dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
schedule();
diff --git a/lib/Target/AMDGPU/R600ClauseMergePass.cpp b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
index 5e1ba6b506da..a6838875622a 100644
--- a/lib/Target/AMDGPU/R600ClauseMergePass.cpp
+++ b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
@@ -121,7 +121,7 @@ bool R600ClauseMergePass::mergeIfPossible(MachineInstr &RootCFAlu,
LaterInstCount = getCFAluSize(LatrCFAlu);
unsigned CumuledInsts = RootInstCount + LaterInstCount;
if (CumuledInsts >= TII->getMaxAlusPerClause()) {
- DEBUG(dbgs() << "Excess inst counts\n");
+ LLVM_DEBUG(dbgs() << "Excess inst counts\n");
return false;
}
if (RootCFAlu.getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
@@ -139,7 +139,7 @@ bool R600ClauseMergePass::mergeIfPossible(MachineInstr &RootCFAlu,
RootCFAlu.getOperand(KBank0Idx).getImm() ||
LatrCFAlu.getOperand(KBank0LineIdx).getImm() !=
RootCFAlu.getOperand(KBank0LineIdx).getImm())) {
- DEBUG(dbgs() << "Wrong KC0\n");
+ LLVM_DEBUG(dbgs() << "Wrong KC0\n");
return false;
}
// Is KCache Bank 1 compatible ?
@@ -155,7 +155,7 @@ bool R600ClauseMergePass::mergeIfPossible(MachineInstr &RootCFAlu,
RootCFAlu.getOperand(KBank1Idx).getImm() ||
LatrCFAlu.getOperand(KBank1LineIdx).getImm() !=
RootCFAlu.getOperand(KBank1LineIdx).getImm())) {
- DEBUG(dbgs() << "Wrong KC0\n");
+ LLVM_DEBUG(dbgs() << "Wrong KC0\n");
return false;
}
if (LatrCFAlu.getOperand(Mode0Idx).getImm()) {
diff --git a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index 0fbc254486d7..b4ec8dfb343f 100644
--- a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -531,7 +531,7 @@ public:
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E;) {
if (TII->usesTextureCache(*I) || TII->usesVertexCache(*I)) {
- DEBUG(dbgs() << CfCount << ":"; I->dump(););
+ LLVM_DEBUG(dbgs() << CfCount << ":"; I->dump(););
FetchClauses.push_back(MakeFetchClause(MBB, I));
CfCount++;
LastAlu.back() = nullptr;
@@ -549,7 +549,8 @@ public:
switch (MI->getOpcode()) {
case AMDGPU::CF_ALU_PUSH_BEFORE:
if (RequiresWorkAround) {
- DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n");
+ LLVM_DEBUG(dbgs()
+ << "Applying bug work-around for ALU_PUSH_BEFORE\n");
BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG))
.addImm(CfCount + 1)
.addImm(1);
@@ -562,7 +563,7 @@ public:
case AMDGPU::CF_ALU:
I = MI;
AluClauses.push_back(MakeALUClause(MBB, I));
- DEBUG(dbgs() << CfCount << ":"; MI->dump(););
+ LLVM_DEBUG(dbgs() << CfCount << ":"; MI->dump(););
CfCount++;
break;
case AMDGPU::WHILELOOP: {
@@ -597,7 +598,7 @@ public:
.addImm(0)
.addImm(0);
IfThenElseStack.push_back(MIb);
- DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
+ LLVM_DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
MI->eraseFromParent();
CfCount++;
break;
@@ -610,7 +611,7 @@ public:
getHWInstrDesc(CF_ELSE))
.addImm(0)
.addImm(0);
- DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
+ LLVM_DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
IfThenElseStack.push_back(MIb);
MI->eraseFromParent();
CfCount++;
@@ -626,7 +627,7 @@ public:
.addImm(CfCount + 1)
.addImm(1);
(void)MIb;
- DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
+ LLVM_DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
CfCount++;
}
@@ -673,7 +674,7 @@ public:
}
default:
if (TII->isExport(MI->getOpcode())) {
- DEBUG(dbgs() << CfCount << ":"; MI->dump(););
+ LLVM_DEBUG(dbgs() << CfCount << ":"; MI->dump(););
CfCount++;
}
break;
diff --git a/lib/Target/AMDGPU/R600MachineScheduler.cpp b/lib/Target/AMDGPU/R600MachineScheduler.cpp
index f8d062ef52df..4bb4c037a447 100644
--- a/lib/Target/AMDGPU/R600MachineScheduler.cpp
+++ b/lib/Target/AMDGPU/R600MachineScheduler.cpp
@@ -78,7 +78,7 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
AllowSwitchFromAlu = true;
} else {
unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
- DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
+ LLVM_DEBUG(dbgs() << NeededWF << " approx. Wavefronts Required\n");
// We assume the local GPR requirements to be "dominated" by the requirement
// of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
// after TEX are indeed likely to consume or generate values from/for the
@@ -124,26 +124,24 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
NextInstKind = IDOther;
}
- DEBUG(
- if (SU) {
- dbgs() << " ** Pick node **\n";
- SU->dump(DAG);
- } else {
- dbgs() << "NO NODE \n";
- for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
- const SUnit &S = DAG->SUnits[i];
- if (!S.isScheduled)
- S.dump(DAG);
- }
- }
- );
+ LLVM_DEBUG(if (SU) {
+ dbgs() << " ** Pick node **\n";
+ SU->dump(DAG);
+ } else {
+ dbgs() << "NO NODE \n";
+ for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
+ const SUnit &S = DAG->SUnits[i];
+ if (!S.isScheduled)
+ S.dump(DAG);
+ }
+ });
return SU;
}
void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
if (NextInstKind != CurInstKind) {
- DEBUG(dbgs() << "Instruction Type Switch\n");
+ LLVM_DEBUG(dbgs() << "Instruction Type Switch\n");
if (NextInstKind != IDAlu)
OccupedSlotsMask |= 31;
CurEmitted = 0;
@@ -172,8 +170,7 @@ void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
++CurEmitted;
}
-
- DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
+ LLVM_DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
if (CurInstKind != IDFetch) {
MoveUnits(Pending[IDFetch], Available[IDFetch]);
@@ -190,11 +187,11 @@ isPhysicalRegCopy(MachineInstr *MI) {
}
void R600SchedStrategy::releaseTopNode(SUnit *SU) {
- DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
+ LLVM_DEBUG(dbgs() << "Top Releasing "; SU->dump(DAG););
}
void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
- DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
+ LLVM_DEBUG(dbgs() << "Bottom Releasing "; SU->dump(DAG););
if (isPhysicalRegCopy(SU->getInstr())) {
PhysicalRegCopy.push_back(SU);
return;
@@ -345,7 +342,7 @@ void R600SchedStrategy::LoadAlu() {
}
void R600SchedStrategy::PrepareNextSlot() {
- DEBUG(dbgs() << "New Slot\n");
+ LLVM_DEBUG(dbgs() << "New Slot\n");
assert (OccupedSlotsMask && "Slot wasn't filled");
OccupedSlotsMask = 0;
// if (HwGen == R600Subtarget::NORTHERN_ISLANDS)
diff --git a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 4a14d95f1cc4..cb46855f475c 100644
--- a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -228,20 +228,20 @@ MachineInstr *R600VectorRegMerger::RebuildVector(
UpdatedUndef.erase(ChanPos);
assert(!is_contained(UpdatedUndef, Chan) &&
"UpdatedUndef shouldn't contain Chan more than once!");
- DEBUG(dbgs() << " ->"; Tmp->dump(););
+ LLVM_DEBUG(dbgs() << " ->"; Tmp->dump(););
(void)Tmp;
SrcVec = DstReg;
}
MachineInstr *NewMI =
BuildMI(MBB, Pos, DL, TII->get(AMDGPU::COPY), Reg).addReg(SrcVec);
- DEBUG(dbgs() << " ->"; NewMI->dump(););
+ LLVM_DEBUG(dbgs() << " ->"; NewMI->dump(););
- DEBUG(dbgs() << " Updating Swizzle:\n");
+ LLVM_DEBUG(dbgs() << " Updating Swizzle:\n");
for (MachineRegisterInfo::use_instr_iterator It = MRI->use_instr_begin(Reg),
E = MRI->use_instr_end(); It != E; ++It) {
- DEBUG(dbgs() << " ";(*It).dump(); dbgs() << " ->");
+ LLVM_DEBUG(dbgs() << " "; (*It).dump(); dbgs() << " ->");
SwizzleInput(*It, RemapChan);
- DEBUG((*It).dump());
+ LLVM_DEBUG((*It).dump());
}
RSI->Instr->eraseFromParent();
@@ -372,14 +372,14 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
if (!areAllUsesSwizzeable(Reg))
continue;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Trying to optimize ";
MI.dump();
});
RegSeqInfo CandidateRSI;
std::vector<std::pair<unsigned, unsigned>> RemapChan;
- DEBUG(dbgs() << "Using common slots...\n";);
+ LLVM_DEBUG(dbgs() << "Using common slots...\n";);
if (tryMergeUsingCommonSlot(RSI, CandidateRSI, RemapChan)) {
// Remove CandidateRSI mapping
RemoveMI(CandidateRSI.Instr);
@@ -387,7 +387,7 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
trackRSI(RSI);
continue;
}
- DEBUG(dbgs() << "Using free slots...\n";);
+ LLVM_DEBUG(dbgs() << "Using free slots...\n";);
RemapChan.clear();
if (tryMergeUsingFreeSlot(RSI, CandidateRSI, RemapChan)) {
RemoveMI(CandidateRSI.Instr);
diff --git a/lib/Target/AMDGPU/R600Packetizer.cpp b/lib/Target/AMDGPU/R600Packetizer.cpp
index 7340318d2d88..069e9dcb1235 100644
--- a/lib/Target/AMDGPU/R600Packetizer.cpp
+++ b/lib/Target/AMDGPU/R600Packetizer.cpp
@@ -236,7 +236,7 @@ public:
if (ConsideredInstUsesAlreadyWrittenVectorElement &&
!TII->isVectorOnly(MI) && VLIW5) {
isTransSlot = true;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Considering as Trans Inst :";
MI.dump();
});
@@ -249,7 +249,7 @@ public:
// Are the Constants limitations met ?
CurrentPacketMIs.push_back(&MI);
if (!TII->fitsConstReadLimitations(CurrentPacketMIs)) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Couldn't pack :\n";
MI.dump();
dbgs() << "with the following packets :\n";
@@ -266,7 +266,7 @@ public:
// Is there a BankSwizzle set that meet Read Port limitations ?
if (!TII->fitsReadPortLimitations(CurrentPacketMIs,
PV, BS, isTransSlot)) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Couldn't pack :\n";
MI.dump();
dbgs() << "with the following packets :\n";
diff --git a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index 11fea5d6ee70..8ef4315e67f4 100644
--- a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -201,7 +201,7 @@ bool SIAnnotateControlFlow::isElse(PHINode *Phi) {
// Erase "Phi" if it is not used any more
void SIAnnotateControlFlow::eraseIfUnused(PHINode *Phi) {
if (RecursivelyDeleteDeadPHINode(Phi)) {
- DEBUG(dbgs() << "Erased unused condition phi\n");
+ LLVM_DEBUG(dbgs() << "Erased unused condition phi\n");
}
}
diff --git a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index e26bc99bd4b6..033d08a42b63 100644
--- a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -513,9 +513,9 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
if (MDT.dominates(MI1, MI2)) {
if (!intereferes(MI2, MI1)) {
- DEBUG(dbgs() << "Erasing from "
- << printMBBReference(*MI2->getParent()) << " "
- << *MI2);
+ LLVM_DEBUG(dbgs()
+ << "Erasing from "
+ << printMBBReference(*MI2->getParent()) << " " << *MI2);
MI2->eraseFromParent();
Defs.erase(I2++);
Changed = true;
@@ -523,9 +523,9 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
}
} else if (MDT.dominates(MI2, MI1)) {
if (!intereferes(MI1, MI2)) {
- DEBUG(dbgs() << "Erasing from "
- << printMBBReference(*MI1->getParent()) << " "
- << *MI1);
+ LLVM_DEBUG(dbgs()
+ << "Erasing from "
+ << printMBBReference(*MI1->getParent()) << " " << *MI1);
MI1->eraseFromParent();
Defs.erase(I1++);
Changed = true;
@@ -541,11 +541,12 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
if (!intereferes(MI1, I) && !intereferes(MI2, I)) {
- DEBUG(dbgs() << "Erasing from "
- << printMBBReference(*MI1->getParent()) << " " << *MI1
- << "and moving from "
- << printMBBReference(*MI2->getParent()) << " to "
- << printMBBReference(*I->getParent()) << " " << *MI2);
+ LLVM_DEBUG(dbgs()
+ << "Erasing from "
+ << printMBBReference(*MI1->getParent()) << " " << *MI1
+ << "and moving from "
+ << printMBBReference(*MI2->getParent()) << " to "
+ << printMBBReference(*I->getParent()) << " " << *MI2);
I->getParent()->splice(I, MI2->getParent(), MI2);
MI1->eraseFromParent();
Defs.erase(I1++);
@@ -633,7 +634,8 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
if (!predsHasDivergentTerminator(MBB0, TRI) &&
!predsHasDivergentTerminator(MBB1, TRI)) {
- DEBUG(dbgs() << "Not fixing PHI for uniform branch: " << MI << '\n');
+ LLVM_DEBUG(dbgs()
+ << "Not fixing PHI for uniform branch: " << MI << '\n');
break;
}
}
@@ -673,7 +675,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
SmallSet<unsigned, 8> Visited;
if (HasVGPROperand || !phiHasBreakDef(MI, MRI, Visited)) {
- DEBUG(dbgs() << "Fixing PHI: " << MI);
+ LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
TII->moveToVALU(MI);
}
break;
@@ -685,7 +687,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
continue;
}
- DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
+ LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
TII->moveToVALU(MI);
break;
@@ -696,7 +698,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
if (TRI->isSGPRClass(DstRC) &&
(TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
- DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
+ LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
TII->moveToVALU(MI);
}
break;
diff --git a/lib/Target/AMDGPU/SIFixVGPRCopies.cpp b/lib/Target/AMDGPU/SIFixVGPRCopies.cpp
index 7a3caf4db71c..dec880847080 100644
--- a/lib/Target/AMDGPU/SIFixVGPRCopies.cpp
+++ b/lib/Target/AMDGPU/SIFixVGPRCopies.cpp
@@ -58,7 +58,7 @@ bool SIFixVGPRCopies::runOnMachineFunction(MachineFunction &MF) {
if (TII->isVGPRCopy(MI) && !MI.readsRegister(AMDGPU::EXEC, TRI)) {
MI.addOperand(MF,
MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
- DEBUG(dbgs() << "Add exec use to " << MI);
+ LLVM_DEBUG(dbgs() << "Add exec use to " << MI);
Changed = true;
}
break;
diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp
index e4f121368a48..d41d151492d6 100644
--- a/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -637,14 +637,14 @@ static bool tryFoldInst(const SIInstrInfo *TII,
const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
if (Src1->isIdenticalTo(*Src0)) {
- DEBUG(dbgs() << "Folded " << *MI << " into ");
+ LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
if (Src2Idx != -1)
MI->RemoveOperand(Src2Idx);
MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
: getMovOpc(false)));
- DEBUG(dbgs() << *MI << '\n');
+ LLVM_DEBUG(dbgs() << *MI << '\n');
return true;
}
}
@@ -685,7 +685,7 @@ void SIFoldOperands::foldInstOperand(MachineInstr &MI,
// be folded due to multiple uses or operand constraints.
if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
- DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
+ LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
// Some constant folding cases change the same immediate's use to a new
// instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
@@ -752,8 +752,9 @@ void SIFoldOperands::foldInstOperand(MachineInstr &MI,
// copies.
MRI->clearKillFlags(Fold.OpToFold->getReg());
}
- DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
- static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
+ LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
+ << static_cast<int>(Fold.UseOpNo) << " of "
+ << *Fold.UseMI << '\n');
tryFoldInst(TII, Fold.UseMI);
} else if (Fold.isCommuted()) {
// Restoring instruction's original operand order if fold has failed.
@@ -833,7 +834,8 @@ bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
if (!DefClamp)
return false;
- DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
+ LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
+ << '\n');
// Clamp is applied after omod, so it is OK if omod is set.
DefClamp->setImm(1);
@@ -956,7 +958,7 @@ bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
return false;
- DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
+ LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
DefOMod->setImm(OMod);
MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
diff --git a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index e6aaaf947518..0e1c2bc31729 100644
--- a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -350,9 +350,7 @@ public:
void setWaitcnt(MachineInstr *WaitcntIn) { LfWaitcnt = WaitcntIn; }
MachineInstr *getWaitcnt() const { return LfWaitcnt; }
- void print() {
- DEBUG(dbgs() << " iteration " << IterCnt << '\n';);
- }
+ void print() { LLVM_DEBUG(dbgs() << " iteration " << IterCnt << '\n';); }
private:
// s_waitcnt added at the end of loop footer to stablize wait scores
@@ -515,7 +513,7 @@ void BlockWaitcntBrackets::setExpScore(const MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned OpNo, int32_t Val) {
RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo, false);
- DEBUG({
+ LLVM_DEBUG({
const MachineOperand &Opnd = MI->getOperand(OpNo);
assert(TRI->isVGPR(*MRI, Opnd.getReg()));
});
@@ -1206,8 +1204,9 @@ void SIInsertWaitcnts::generateWaitcntInstBefore(
ScoreBracket = BlockWaitcntBracketsMap[TBB].get();
}
ScoreBracket->setRevisitLoop(true);
- DEBUG(dbgs() << "set-revisit: Block"
- << ContainingLoop->getHeader()->getNumber() << '\n';);
+ LLVM_DEBUG(dbgs()
+ << "set-revisit: Block"
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
}
@@ -1242,26 +1241,29 @@ void SIInsertWaitcnts::generateWaitcntInstBefore(
if (insertSWaitInst) {
if (OldWaitcnt && OldWaitcnt->getOpcode() == AMDGPU::S_WAITCNT) {
if (ForceEmitZeroWaitcnts)
- DEBUG(dbgs() << "Force emit s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Force emit s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)\n");
if (IsForceEmitWaitcnt)
- DEBUG(dbgs() << "Force emit a s_waitcnt due to debug counter\n");
+ LLVM_DEBUG(dbgs()
+ << "Force emit a s_waitcnt due to debug counter\n");
OldWaitcnt->getOperand(0).setImm(Enc);
if (!OldWaitcnt->getParent())
MI.getParent()->insert(MI, OldWaitcnt);
- DEBUG(dbgs() << "updateWaitcntInBlock\n"
- << "Old Instr: " << MI << '\n'
- << "New Instr: " << *OldWaitcnt << '\n');
+ LLVM_DEBUG(dbgs() << "updateWaitcntInBlock\n"
+ << "Old Instr: " << MI << '\n'
+ << "New Instr: " << *OldWaitcnt << '\n');
} else {
auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(),
MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
.addImm(Enc);
TrackedWaitcntSet.insert(SWaitInst);
- DEBUG(dbgs() << "insertWaitcntInBlock\n"
- << "Old Instr: " << MI << '\n'
- << "New Instr: " << *SWaitInst << '\n');
+ LLVM_DEBUG(dbgs() << "insertWaitcntInBlock\n"
+ << "Old Instr: " << MI << '\n'
+ << "New Instr: " << *SWaitInst << '\n');
}
}
@@ -1670,7 +1672,7 @@ void SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
BlockWaitcntBrackets *ScoreBrackets = BlockWaitcntBracketsMap[&Block].get();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "*** Block" << Block.getNumber() << " ***";
ScoreBrackets->dump();
});
@@ -1731,7 +1733,7 @@ void SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
ScoreBrackets->clearWaitcnt();
- DEBUG({
+ LLVM_DEBUG({
Inst.print(dbgs());
ScoreBrackets->dump();
});
@@ -1771,7 +1773,7 @@ void SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
if (ContainingLoop && isLoopBottom(ContainingLoop, &Block)) {
LoopWaitcntData *WaitcntData = LoopWaitcntDataMap[ContainingLoop].get();
WaitcntData->print();
- DEBUG(dbgs() << '\n';);
+ LLVM_DEBUG(dbgs() << '\n';);
// The iterative waitcnt insertion algorithm aims for optimal waitcnt
// placement and doesn't always guarantee convergence for a loop. Each
@@ -1811,7 +1813,7 @@ void SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
}
if (SWaitInst) {
- DEBUG({
+ LLVM_DEBUG({
SWaitInst->print(dbgs());
dbgs() << "\nAdjusted score board:";
ScoreBrackets->dump();
@@ -1896,8 +1898,8 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
if ((std::count(BlockWaitcntProcessedSet.begin(),
BlockWaitcntProcessedSet.end(), &MBB) < Count)) {
BlockWaitcntBracketsMap[&MBB]->setRevisitLoop(true);
- DEBUG(dbgs() << "set-revisit: Block"
- << ContainingLoop->getHeader()->getNumber() << '\n';);
+ LLVM_DEBUG(dbgs() << "set-revisit: Block"
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
}
@@ -1931,7 +1933,7 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
}
LoopWaitcntData *WaitcntData = LoopWaitcntDataMap[ContainingLoop].get();
WaitcntData->incIterCnt();
- DEBUG(dbgs() << "revisit: Block" << EntryBB->getNumber() << '\n';);
+ LLVM_DEBUG(dbgs() << "revisit: Block" << EntryBB->getNumber() << '\n';);
continue;
} else {
LoopWaitcntData *WaitcntData = LoopWaitcntDataMap[ContainingLoop].get();
diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index e5304818afb5..1ebc45dc9eec 100644
--- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -553,7 +553,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
CI.I->eraseFromParent();
CI.Paired->eraseFromParent();
- DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
+ LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
return Next;
}
@@ -631,7 +631,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
CI.I->eraseFromParent();
CI.Paired->eraseFromParent();
- DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
+ LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
return Next;
}
@@ -950,7 +950,7 @@ bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
assert(MRI->isSSA() && "Must be run on SSA");
- DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
+ LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
bool Modified = false;
diff --git a/lib/Target/AMDGPU/SIMachineScheduler.cpp b/lib/Target/AMDGPU/SIMachineScheduler.cpp
index 0fd4c6bfed99..86f81136bc50 100644
--- a/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -1207,7 +1207,7 @@ void SIScheduleBlockCreator::createBlocksForVariant(SISchedulerBlockCreatorVaria
NextReservedID = 1;
NextNonReservedID = DAGSize + 1;
- DEBUG(dbgs() << "Coloring the graph\n");
+ LLVM_DEBUG(dbgs() << "Coloring the graph\n");
if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesGrouped)
colorHighLatenciesGroups();
@@ -1264,13 +1264,11 @@ void SIScheduleBlockCreator::createBlocksForVariant(SISchedulerBlockCreatorVaria
SIScheduleBlock *Block = CurrentBlocks[i];
Block->finalizeUnits();
}
- DEBUG(
- dbgs() << "Blocks created:\n\n";
- for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
- SIScheduleBlock *Block = CurrentBlocks[i];
- Block->printDebug(true);
- }
- );
+ LLVM_DEBUG(dbgs() << "Blocks created:\n\n";
+ for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
+ SIScheduleBlock *Block = CurrentBlocks[i];
+ Block->printDebug(true);
+ });
}
// Two functions taken from Codegen/MachineScheduler.cpp
@@ -1290,7 +1288,7 @@ void SIScheduleBlockCreator::topologicalSort() {
unsigned DAGSize = CurrentBlocks.size();
std::vector<int> WorkList;
- DEBUG(dbgs() << "Topological Sort\n");
+ LLVM_DEBUG(dbgs() << "Topological Sort\n");
WorkList.reserve(DAGSize);
TopDownIndex2Block.resize(DAGSize);
@@ -1337,11 +1335,11 @@ void SIScheduleBlockCreator::topologicalSort() {
void SIScheduleBlockCreator::scheduleInsideBlocks() {
unsigned DAGSize = CurrentBlocks.size();
- DEBUG(dbgs() << "\nScheduling Blocks\n\n");
+ LLVM_DEBUG(dbgs() << "\nScheduling Blocks\n\n");
// We do schedule a valid scheduling such that a Block corresponds
// to a range of instructions.
- DEBUG(dbgs() << "First phase: Fast scheduling for Reg Liveness\n");
+ LLVM_DEBUG(dbgs() << "First phase: Fast scheduling for Reg Liveness\n");
for (unsigned i = 0, e = DAGSize; i != e; ++i) {
SIScheduleBlock *Block = CurrentBlocks[i];
Block->fastSchedule();
@@ -1395,7 +1393,7 @@ void SIScheduleBlockCreator::scheduleInsideBlocks() {
Block->schedule((*SUs.begin())->getInstr(), (*SUs.rbegin())->getInstr());
}
- DEBUG(dbgs() << "Restoring MI Pos\n");
+ LLVM_DEBUG(dbgs() << "Restoring MI Pos\n");
// Restore old ordering (which prevents a LIS->handleMove bug).
for (unsigned i = PosOld.size(), e = 0; i != e; --i) {
MachineBasicBlock::iterator POld = PosOld[i-1];
@@ -1409,12 +1407,10 @@ void SIScheduleBlockCreator::scheduleInsideBlocks() {
}
}
- DEBUG(
- for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
- SIScheduleBlock *Block = CurrentBlocks[i];
- Block->printDebug(true);
- }
- );
+ LLVM_DEBUG(for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
+ SIScheduleBlock *Block = CurrentBlocks[i];
+ Block->printDebug(true);
+ });
}
void SIScheduleBlockCreator::fillStats() {
@@ -1565,13 +1561,10 @@ SIScheduleBlockScheduler::SIScheduleBlockScheduler(SIScheduleDAGMI *DAG,
blockScheduled(Block);
}
- DEBUG(
- dbgs() << "Block Order:";
- for (SIScheduleBlock* Block : BlocksScheduled) {
- dbgs() << ' ' << Block->getID();
- }
- dbgs() << '\n';
- );
+ LLVM_DEBUG(dbgs() << "Block Order:"; for (SIScheduleBlock *Block
+ : BlocksScheduled) {
+ dbgs() << ' ' << Block->getID();
+ } dbgs() << '\n';);
}
bool SIScheduleBlockScheduler::tryCandidateLatency(SIBlockSchedCandidate &Cand,
@@ -1634,18 +1627,17 @@ SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() {
maxVregUsage = VregCurrentUsage;
if (SregCurrentUsage > maxSregUsage)
maxSregUsage = SregCurrentUsage;
- DEBUG(
- dbgs() << "Picking New Blocks\n";
- dbgs() << "Available: ";
- for (SIScheduleBlock* Block : ReadyBlocks)
- dbgs() << Block->getID() << ' ';
- dbgs() << "\nCurrent Live:\n";
- for (unsigned Reg : LiveRegs)
- dbgs() << printVRegOrUnit(Reg, DAG->getTRI()) << ' ';
- dbgs() << '\n';
- dbgs() << "Current VGPRs: " << VregCurrentUsage << '\n';
- dbgs() << "Current SGPRs: " << SregCurrentUsage << '\n';
- );
+ LLVM_DEBUG(dbgs() << "Picking New Blocks\n"; dbgs() << "Available: ";
+ for (SIScheduleBlock *Block
+ : ReadyBlocks) dbgs()
+ << Block->getID() << ' ';
+ dbgs() << "\nCurrent Live:\n";
+ for (unsigned Reg
+ : LiveRegs) dbgs()
+ << printVRegOrUnit(Reg, DAG->getTRI()) << ' ';
+ dbgs() << '\n';
+ dbgs() << "Current VGPRs: " << VregCurrentUsage << '\n';
+ dbgs() << "Current SGPRs: " << SregCurrentUsage << '\n';);
Cand.Block = nullptr;
for (std::vector<SIScheduleBlock*>::iterator I = ReadyBlocks.begin(),
@@ -1677,20 +1669,18 @@ SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() {
if (TryCand.Reason != NoCand) {
Cand.setBest(TryCand);
Best = I;
- DEBUG(dbgs() << "Best Current Choice: " << Cand.Block->getID() << ' '
- << getReasonStr(Cand.Reason) << '\n');
+ LLVM_DEBUG(dbgs() << "Best Current Choice: " << Cand.Block->getID() << ' '
+ << getReasonStr(Cand.Reason) << '\n');
}
}
- DEBUG(
- dbgs() << "Picking: " << Cand.Block->getID() << '\n';
- dbgs() << "Is a block with high latency instruction: "
- << (Cand.IsHighLatency ? "yes\n" : "no\n");
- dbgs() << "Position of last high latency dependency: "
- << Cand.LastPosHighLatParentScheduled << '\n';
- dbgs() << "VGPRUsageDiff: " << Cand.VGPRUsageDiff << '\n';
- dbgs() << '\n';
- );
+ LLVM_DEBUG(dbgs() << "Picking: " << Cand.Block->getID() << '\n';
+ dbgs() << "Is a block with high latency instruction: "
+ << (Cand.IsHighLatency ? "yes\n" : "no\n");
+ dbgs() << "Position of last high latency dependency: "
+ << Cand.LastPosHighLatParentScheduled << '\n';
+ dbgs() << "VGPRUsageDiff: " << Cand.VGPRUsageDiff << '\n';
+ dbgs() << '\n';);
Block = Cand.Block;
ReadyBlocks.erase(Best);
@@ -1939,13 +1929,10 @@ void SIScheduleDAGMI::schedule()
{
SmallVector<SUnit*, 8> TopRoots, BotRoots;
SIScheduleBlockResult Best, Temp;
- DEBUG(dbgs() << "Preparing Scheduling\n");
+ LLVM_DEBUG(dbgs() << "Preparing Scheduling\n");
buildDAGWithRegPressure();
- DEBUG(
- for(SUnit& SU : SUnits)
- SU.dumpAll(this)
- );
+ LLVM_DEBUG(for (SUnit &SU : SUnits) SU.dumpAll(this));
topologicalSort();
findRootsAndBiasEdges(TopRoots, BotRoots);
@@ -2047,15 +2034,15 @@ void SIScheduleDAGMI::schedule()
scheduleMI(SU, true);
- DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
- << *SU->getInstr());
+ LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
+ << *SU->getInstr());
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
placeDebugValues();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "*** Final schedule for "
<< printMBBReference(*begin()->getParent()) << " ***\n";
dumpSchedule();
diff --git a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
index ddf45bbccb98..b68df539771d 100644
--- a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
+++ b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
@@ -243,11 +243,11 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
// Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
if (CopyToExecInst->getOperand(1).isKill() &&
isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
- DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
+ LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
PrepareExecInst->getOperand(0).setReg(AMDGPU::EXEC);
- DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
+ LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
CopyToExecInst->eraseFromParent();
}
@@ -257,7 +257,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
if (isLiveOut(MBB, CopyToExec)) {
// The copied register is live out and has a second use in another block.
- DEBUG(dbgs() << "Exec copy source register is live out\n");
+ LLVM_DEBUG(dbgs() << "Exec copy source register is live out\n");
continue;
}
@@ -269,7 +269,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
= std::next(CopyFromExecInst->getIterator()), JE = I->getIterator();
J != JE; ++J) {
if (SaveExecInst && J->readsRegister(AMDGPU::EXEC, TRI)) {
- DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
+ LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
// Make sure this is inserted after any VALU ops that may have been
// scheduled in between.
SaveExecInst = nullptr;
@@ -280,8 +280,8 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
if (J->modifiesRegister(CopyToExec, TRI)) {
if (SaveExecInst) {
- DEBUG(dbgs() << "Multiple instructions modify "
- << printReg(CopyToExec, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Multiple instructions modify "
+ << printReg(CopyToExec, TRI) << '\n');
SaveExecInst = nullptr;
break;
}
@@ -292,10 +292,11 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
if (ReadsCopyFromExec) {
SaveExecInst = &*J;
- DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
+ LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
continue;
} else {
- DEBUG(dbgs() << "Instruction does not read exec copy: " << *J << '\n');
+ LLVM_DEBUG(dbgs()
+ << "Instruction does not read exec copy: " << *J << '\n');
break;
}
} else if (ReadsCopyFromExec && !SaveExecInst) {
@@ -307,8 +308,8 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
// spill %sgpr0_sgpr1
// %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1
//
- DEBUG(dbgs() << "Found second use of save inst candidate: "
- << *J << '\n');
+ LLVM_DEBUG(dbgs() << "Found second use of save inst candidate: " << *J
+ << '\n');
break;
}
@@ -321,7 +322,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
if (!SaveExecInst)
continue;
- DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
+ LLVM_DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
MachineOperand &Src0 = SaveExecInst->getOperand(1);
MachineOperand &Src1 = SaveExecInst->getOperand(2);
diff --git a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
index b7bb80acb71b..c9e3e5696f37 100644
--- a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
+++ b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
@@ -143,7 +143,8 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
break;
- DEBUG(dbgs() << "Removing no effect instruction: " << *I << '\n');
+ LLVM_DEBUG(dbgs()
+ << "Removing no effect instruction: " << *I << '\n');
for (auto &Op : I->operands()) {
if (Op.isReg())
@@ -193,7 +194,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
!getOrExecSource(*NextLead, *TII, MRI))
continue;
- DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n');
+ LLVM_DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n');
auto SaveExec = getOrExecSource(*Lead, *TII, MRI);
unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII);
@@ -224,7 +225,7 @@ bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
break;
}
- DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n');
+ LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n');
}
if (SafeToReplace) {
diff --git a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 7a56b4d1bd63..6f9d75228728 100644
--- a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -846,7 +846,7 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
void SIPeepholeSDWA::matchSDWAOperands(MachineBasicBlock &MBB) {
for (MachineInstr &MI : MBB) {
if (auto Operand = matchSDWAOperand(MI)) {
- DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n');
+ LLVM_DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n');
SDWAOperands[&MI] = std::move(Operand);
++NumSDWAPatternsFound;
}
@@ -901,7 +901,7 @@ bool SIPeepholeSDWA::isConvertibleToSDWA(const MachineInstr &MI,
bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
const SDWAOperandsVector &SDWAOperands) {
- DEBUG(dbgs() << "Convert instruction:" << MI);
+ LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
// Convert to sdwa
int SDWAOpcode;
@@ -1050,7 +1050,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
// Apply all sdwa operand patterns.
bool Converted = false;
for (auto &Operand : SDWAOperands) {
- DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
+ LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
// There should be no intesection between SDWA operands and potential MIs
// e.g.:
// v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
@@ -1071,7 +1071,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
return false;
}
- DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
+ LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
++NumSDWAInstructionsPeepholed;
MI.eraseFromParent();
diff --git a/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 33fd5a30791d..3c4c3baf7994 100644
--- a/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -495,7 +495,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
}
// We can shrink this instruction
- DEBUG(dbgs() << "Shrinking " << MI);
+ LLVM_DEBUG(dbgs() << "Shrinking " << MI);
MachineInstrBuilder Inst32 =
BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
@@ -539,9 +539,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
MI.eraseFromParent();
foldImmediates(*Inst32, TII, MRI);
- DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
-
-
+ LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
}
}
return false;
diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 89e5d56d1e0b..7de132e0ed11 100644
--- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -679,7 +679,8 @@ void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
if (!isEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact)
return;
- DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB) << ":\n");
+ LLVM_DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB)
+ << ":\n");
unsigned SavedWQMReg = 0;
unsigned SavedNonWWMReg = 0;
@@ -882,7 +883,7 @@ bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
}
}
- DEBUG(printInfo());
+ LLVM_DEBUG(printInfo());
lowerCopyInstrs();
diff --git a/lib/Target/ARC/ARCBranchFinalize.cpp b/lib/Target/ARC/ARCBranchFinalize.cpp
index 9341e7bdda41..3b410fa383b7 100644
--- a/lib/Target/ARC/ARCBranchFinalize.cpp
+++ b/lib/Target/ARC/ARCBranchFinalize.cpp
@@ -112,7 +112,7 @@ static unsigned getCmpForPseudo(MachineInstr *MI) {
}
void ARCBranchFinalize::replaceWithBRcc(MachineInstr *MI) const {
- DEBUG(dbgs() << "Replacing pseudo branch with BRcc\n");
+ LLVM_DEBUG(dbgs() << "Replacing pseudo branch with BRcc\n");
unsigned CC = getCCForBRcc(MI->getOperand(3).getImm());
if (CC != -1U) {
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
@@ -128,8 +128,8 @@ void ARCBranchFinalize::replaceWithBRcc(MachineInstr *MI) const {
}
void ARCBranchFinalize::replaceWithCmpBcc(MachineInstr *MI) const {
- DEBUG(dbgs() << "Branch: " << *MI << "\n");
- DEBUG(dbgs() << "Replacing pseudo branch with Cmp + Bcc\n");
+ LLVM_DEBUG(dbgs() << "Branch: " << *MI << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing pseudo branch with Cmp + Bcc\n");
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(getCmpForPseudo(MI)))
.addReg(MI->getOperand(1).getReg())
@@ -141,8 +141,8 @@ void ARCBranchFinalize::replaceWithCmpBcc(MachineInstr *MI) const {
}
bool ARCBranchFinalize::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "Running ARC Branch Finalize on "
- << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Running ARC Branch Finalize on " << MF.getName()
+ << "\n");
std::vector<MachineInstr *> Branches;
bool Changed = false;
unsigned MaxSize = 0;
@@ -156,7 +156,7 @@ bool ARCBranchFinalize::runOnMachineFunction(MachineFunction &MF) {
for (auto &MI : MBB) {
unsigned Size = TII->getInstSizeInBytes(MI);
if (Size > 8 || Size == 0) {
- DEBUG(dbgs() << "Unknown (or size 0) size for: " << MI << "\n");
+ LLVM_DEBUG(dbgs() << "Unknown (or size 0) size for: " << MI << "\n");
} else {
MaxSize += Size;
}
@@ -172,8 +172,8 @@ bool ARCBranchFinalize::runOnMachineFunction(MachineFunction &MF) {
isInt<9>(MaxSize) ? replaceWithBRcc(P.first) : replaceWithCmpBcc(P.first);
}
- DEBUG(dbgs() << "Estimated function size for " << MF.getName()
- << ": " << MaxSize << "\n");
+ LLVM_DEBUG(dbgs() << "Estimated function size for " << MF.getName() << ": "
+ << MaxSize << "\n");
return Changed;
}
diff --git a/lib/Target/ARC/ARCFrameLowering.cpp b/lib/Target/ARC/ARCFrameLowering.cpp
index 195a781950be..ca59cb2baaa7 100644
--- a/lib/Target/ARC/ARCFrameLowering.cpp
+++ b/lib/Target/ARC/ARCFrameLowering.cpp
@@ -59,8 +59,8 @@ static void generateStackAdjustment(MachineBasicBlock &MBB,
Positive = true;
}
- DEBUG(dbgs() << "Internal: adjust stack by: " << Amount << "," << AbsAmount
- << "\n");
+ LLVM_DEBUG(dbgs() << "Internal: adjust stack by: " << Amount << ","
+ << AbsAmount << "\n");
assert((AbsAmount % 4 == 0) && "Stack adjustments must be 4-byte aligned.");
if (isUInt<6>(AbsAmount))
@@ -88,8 +88,7 @@ determineLastCalleeSave(const std::vector<CalleeSavedInfo> &CSI) {
void ARCFrameLowering::determineCalleeSaves(MachineFunction &MF,
BitVector &SavedRegs,
RegScavenger *RS) const {
- DEBUG(dbgs() << "Determine Callee Saves: " << MF.getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "Determine Callee Saves: " << MF.getName() << "\n");
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
SavedRegs.set(ARC::BLINK);
}
@@ -115,7 +114,7 @@ void ARCFrameLowering::adjustStackToMatchRecords(
/// registers onto the stack, when enough callee saved registers are required.
void ARCFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- DEBUG(dbgs() << "Emit Prologue: " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Emit Prologue: " << MF.getName() << "\n");
auto *AFI = MF.getInfo<ARCFunctionInfo>();
MachineModuleInfo &MMI = MF.getMMI();
MCContext &Context = MMI.getContext();
@@ -133,7 +132,7 @@ void ARCFrameLowering::emitPrologue(MachineFunction &MF,
unsigned AlreadyAdjusted = 0;
if (MF.getFunction().isVarArg()) {
// Add in the varargs area here first.
- DEBUG(dbgs() << "Varargs\n");
+ LLVM_DEBUG(dbgs() << "Varargs\n");
unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex());
BuildMI(MBB, MBBI, dl, TII->get(ARC::SUB_rru6))
.addReg(ARC::SP)
@@ -141,7 +140,7 @@ void ARCFrameLowering::emitPrologue(MachineFunction &MF,
.addImm(VarArgsBytes);
}
if (hasFP(MF)) {
- DEBUG(dbgs() << "Saving FP\n");
+ LLVM_DEBUG(dbgs() << "Saving FP\n");
BuildMI(MBB, MBBI, dl, TII->get(ARC::ST_AW_rs9))
.addReg(ARC::SP, RegState::Define)
.addReg(ARC::FP)
@@ -150,7 +149,7 @@ void ARCFrameLowering::emitPrologue(MachineFunction &MF,
AlreadyAdjusted += 4;
}
if (UseSaveRestoreFunclet && Last > ARC::R14) {
- DEBUG(dbgs() << "Creating store funclet.\n");
+ LLVM_DEBUG(dbgs() << "Creating store funclet.\n");
// BL to __save_r13_to_<TRI->getRegAsmName()>
StackSlotsUsedByFunclet = Last - ARC::R12;
BuildMI(MBB, MBBI, dl, TII->get(ARC::PUSH_S_BLINK));
@@ -166,20 +165,20 @@ void ARCFrameLowering::emitPrologue(MachineFunction &MF,
}
// If we haven't saved BLINK, but we need to...do that now.
if (MFI.hasCalls() && !SavedBlink) {
- DEBUG(dbgs() << "Creating save blink.\n");
+ LLVM_DEBUG(dbgs() << "Creating save blink.\n");
BuildMI(MBB, MBBI, dl, TII->get(ARC::PUSH_S_BLINK));
AlreadyAdjusted += 4;
}
if (AFI->MaxCallStackReq > 0)
MFI.setStackSize(MFI.getStackSize() + AFI->MaxCallStackReq);
// We have already saved some of the stack...
- DEBUG(dbgs() << "Adjusting stack by: "
- << (MFI.getStackSize() - AlreadyAdjusted) << "\n");
+ LLVM_DEBUG(dbgs() << "Adjusting stack by: "
+ << (MFI.getStackSize() - AlreadyAdjusted) << "\n");
generateStackAdjustment(MBB, MBBI, *ST.getInstrInfo(), dl,
-(MFI.getStackSize() - AlreadyAdjusted), ARC::SP);
if (hasFP(MF)) {
- DEBUG(dbgs() << "Setting FP from SP.\n");
+ LLVM_DEBUG(dbgs() << "Setting FP from SP.\n");
BuildMI(MBB, MBBI, dl,
TII->get(isUInt<6>(MFI.getStackSize()) ? ARC::ADD_rru6
: ARC::ADD_rrlimm),
@@ -235,7 +234,7 @@ void ARCFrameLowering::emitPrologue(MachineFunction &MF,
/// registers onto the stack, when enough callee saved registers are required.
void ARCFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- DEBUG(dbgs() << "Emit Epilogue: " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Emit Epilogue: " << MF.getName() << "\n");
auto *AFI = MF.getInfo<ARCFunctionInfo>();
const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
@@ -304,7 +303,7 @@ void ARCFrameLowering::emitEpilogue(MachineFunction &MF,
// Relieve the varargs area if necessary.
if (MF.getFunction().isVarArg()) {
// Add in the varargs area here first.
- DEBUG(dbgs() << "Varargs\n");
+ LLVM_DEBUG(dbgs() << "Varargs\n");
unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex());
BuildMI(MBB, MBBI, MBB.findDebugLoc(MBBI), TII->get(ARC::ADD_rru6))
.addReg(ARC::SP)
@@ -334,16 +333,16 @@ bool ARCFrameLowering::assignCalleeSavedSpillSlots(
if (hasFP(MF)) {
// Create a fixed slot at for FP
int StackObj = MFI.CreateFixedSpillStackObject(4, CurOffset, true);
- DEBUG(dbgs() << "Creating fixed object (" << StackObj << ") for FP at "
- << CurOffset << "\n");
+ LLVM_DEBUG(dbgs() << "Creating fixed object (" << StackObj << ") for FP at "
+ << CurOffset << "\n");
(void)StackObj;
CurOffset -= 4;
}
if (MFI.hasCalls() || (UseSaveRestoreFunclet && Last > ARC::R14)) {
// Create a fixed slot for BLINK.
int StackObj = MFI.CreateFixedSpillStackObject(4, CurOffset, true);
- DEBUG(dbgs() << "Creating fixed object (" << StackObj << ") for BLINK at "
- << CurOffset << "\n");
+ LLVM_DEBUG(dbgs() << "Creating fixed object (" << StackObj
+ << ") for BLINK at " << CurOffset << "\n");
(void)StackObj;
CurOffset -= 4;
}
@@ -366,12 +365,12 @@ bool ARCFrameLowering::assignCalleeSavedSpillSlots(
continue;
if (I.getFrameIdx() == 0) {
I.setFrameIdx(MFI.CreateFixedSpillStackObject(4, CurOffset, true));
- DEBUG(dbgs() << "Creating fixed object (" << I.getFrameIdx()
- << ") for other register at " << CurOffset << "\n");
+ LLVM_DEBUG(dbgs() << "Creating fixed object (" << I.getFrameIdx()
+ << ") for other register at " << CurOffset << "\n");
} else {
MFI.setObjectOffset(I.getFrameIdx(), CurOffset);
- DEBUG(dbgs() << "Updating fixed object (" << I.getFrameIdx()
- << ") for other register at " << CurOffset << "\n");
+ LLVM_DEBUG(dbgs() << "Updating fixed object (" << I.getFrameIdx()
+ << ") for other register at " << CurOffset << "\n");
}
CurOffset -= 4;
}
@@ -382,8 +381,8 @@ bool ARCFrameLowering::spillCalleeSavedRegisters(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
- DEBUG(dbgs() << "Spill callee saved registers: "
- << MBB.getParent()->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Spill callee saved registers: "
+ << MBB.getParent()->getName() << "\n");
// There are routines for saving at least 3 registers (r13 to r15, etc.)
unsigned Last = determineLastCalleeSave(CSI);
if (UseSaveRestoreFunclet && Last > ARC::R14) {
@@ -399,8 +398,8 @@ bool ARCFrameLowering::spillCalleeSavedRegisters(
bool ARCFrameLowering::restoreCalleeSavedRegisters(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const {
- DEBUG(dbgs() << "Restore callee saved registers: "
- << MBB.getParent()->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Restore callee saved registers: "
+ << MBB.getParent()->getName() << "\n");
// There are routines for saving at least 3 registers (r13 to r15, etc.)
unsigned Last = determineLastCalleeSave(CSI);
if (UseSaveRestoreFunclet && Last > ARC::R14) {
@@ -414,16 +413,17 @@ bool ARCFrameLowering::restoreCalleeSavedRegisters(
void ARCFrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
- DEBUG(dbgs() << "Process function before frame finalized: "
- << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Process function before frame finalized: "
+ << MF.getName() << "\n");
MachineFrameInfo &MFI = MF.getFrameInfo();
- DEBUG(dbgs() << "Current stack size: " << MFI.getStackSize() << "\n");
+ LLVM_DEBUG(dbgs() << "Current stack size: " << MFI.getStackSize() << "\n");
const TargetRegisterClass *RC = &ARC::GPR32RegClass;
if (MFI.hasStackObjects()) {
int RegScavFI = MFI.CreateStackObject(
RegInfo->getSpillSize(*RC), RegInfo->getSpillAlignment(*RC), false);
RS->addScavengingFrameIndex(RegScavFI);
- DEBUG(dbgs() << "Created scavenging index RegScavFI=" << RegScavFI << "\n");
+ LLVM_DEBUG(dbgs() << "Created scavenging index RegScavFI=" << RegScavFI
+ << "\n");
}
}
@@ -440,7 +440,7 @@ static void emitRegUpdate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator ARCFrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getName() << "\n");
const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo();
MachineInstr &Old = *I;
DebugLoc dl = Old.getDebugLoc();
diff --git a/lib/Target/ARC/ARCISelLowering.cpp b/lib/Target/ARC/ARCISelLowering.cpp
index 5991838a15c4..0ec953f341ab 100644
--- a/lib/Target/ARC/ARCISelLowering.cpp
+++ b/lib/Target/ARC/ARCISelLowering.cpp
@@ -486,8 +486,8 @@ SDValue ARCTargetLowering::LowerCallArguments(
EVT RegVT = VA.getLocVT();
switch (RegVT.getSimpleVT().SimpleTy) {
default: {
- DEBUG(errs() << "LowerFormalArguments Unhandled argument type: "
- << (unsigned)RegVT.getSimpleVT().SimpleTy << "\n");
+ LLVM_DEBUG(errs() << "LowerFormalArguments Unhandled argument type: "
+ << (unsigned)RegVT.getSimpleVT().SimpleTy << "\n");
llvm_unreachable("Unhandled LowerFormalArguments type.");
}
case MVT::i32:
diff --git a/lib/Target/ARC/ARCInstrInfo.cpp b/lib/Target/ARC/ARCInstrInfo.cpp
index f8d14d243f40..a8084f16893b 100644
--- a/lib/Target/ARC/ARCInstrInfo.cpp
+++ b/lib/Target/ARC/ARCInstrInfo.cpp
@@ -298,8 +298,8 @@ void ARCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
"Only support 4-byte stores to stack now.");
assert(ARC::GPR32RegClass.hasSubClassEq(RC) &&
"Only support GPR32 stores to stack now.");
- DEBUG(dbgs() << "Created store reg=" << printReg(SrcReg, TRI)
- << " to FrameIndex=" << FrameIndex << "\n");
+ LLVM_DEBUG(dbgs() << "Created store reg=" << printReg(SrcReg, TRI)
+ << " to FrameIndex=" << FrameIndex << "\n");
BuildMI(MBB, I, dl, get(ARC::ST_rs9))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FrameIndex)
@@ -325,8 +325,8 @@ void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
"Only support 4-byte loads from stack now.");
assert(ARC::GPR32RegClass.hasSubClassEq(RC) &&
"Only support GPR32 stores to stack now.");
- DEBUG(dbgs() << "Created load reg=" << printReg(DestReg, TRI)
- << " from FrameIndex=" << FrameIndex << "\n");
+ LLVM_DEBUG(dbgs() << "Created load reg=" << printReg(DestReg, TRI)
+ << " from FrameIndex=" << FrameIndex << "\n");
BuildMI(MBB, I, dl, get(ARC::LD_rs9))
.addReg(DestReg, RegState::Define)
.addFrameIndex(FrameIndex)
diff --git a/lib/Target/ARC/ARCRegisterInfo.cpp b/lib/Target/ARC/ARCRegisterInfo.cpp
index cb9f89d3499b..38ea3c93a2d4 100644
--- a/lib/Target/ARC/ARCRegisterInfo.cpp
+++ b/lib/Target/ARC/ARCRegisterInfo.cpp
@@ -66,9 +66,9 @@ static void ReplaceFrameIndex(MachineBasicBlock::iterator II,
MBB.getParent()->getSubtarget().getRegisterInfo();
BaseReg = RS->scavengeRegister(&ARC::GPR32RegClass, II, SPAdj);
assert(BaseReg && "Register scavenging failed.");
- DEBUG(dbgs() << "Scavenged register " << printReg(BaseReg, TRI)
- << " for FrameReg=" << printReg(FrameReg, TRI)
- << "+Offset=" << Offset << "\n");
+ LLVM_DEBUG(dbgs() << "Scavenged register " << printReg(BaseReg, TRI)
+ << " for FrameReg=" << printReg(FrameReg, TRI)
+ << "+Offset=" << Offset << "\n");
(void)TRI;
RS->setRegUsed(BaseReg);
}
@@ -88,7 +88,7 @@ static void ReplaceFrameIndex(MachineBasicBlock::iterator II,
assert((Offset % 2 == 0) && "LDH needs 2 byte alignment.");
case ARC::LDB_rs9:
case ARC::LDB_X_rs9:
- DEBUG(dbgs() << "Building LDFI\n");
+ LLVM_DEBUG(dbgs() << "Building LDFI\n");
BuildMI(MBB, II, dl, TII.get(MI.getOpcode()), Reg)
.addReg(BaseReg, KillState)
.addImm(Offset)
@@ -99,7 +99,7 @@ static void ReplaceFrameIndex(MachineBasicBlock::iterator II,
case ARC::STH_rs9:
assert((Offset % 2 == 0) && "STH needs 2 byte alignment.");
case ARC::STB_rs9:
- DEBUG(dbgs() << "Building STFI\n");
+ LLVM_DEBUG(dbgs() << "Building STFI\n");
BuildMI(MBB, II, dl, TII.get(MI.getOpcode()))
.addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
.addReg(BaseReg, KillState)
@@ -107,7 +107,7 @@ static void ReplaceFrameIndex(MachineBasicBlock::iterator II,
.addMemOperand(*MI.memoperands_begin());
break;
case ARC::GETFI:
- DEBUG(dbgs() << "Building GETFI\n");
+ LLVM_DEBUG(dbgs() << "Building GETFI\n");
BuildMI(MBB, II, dl,
TII.get(isUInt<6>(Offset) ? ARC::ADD_rru6 : ARC::ADD_rrlimm))
.addReg(Reg, RegState::Define)
@@ -175,14 +175,14 @@ void ARCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int StackSize = MF.getFrameInfo().getStackSize();
int LocalFrameSize = MF.getFrameInfo().getLocalFrameSize();
- DEBUG(dbgs() << "\nFunction : " << MF.getName() << "\n");
- DEBUG(dbgs() << "<--------->\n");
- DEBUG(dbgs() << MI << "\n");
- DEBUG(dbgs() << "FrameIndex : " << FrameIndex << "\n");
- DEBUG(dbgs() << "ObjSize : " << ObjSize << "\n");
- DEBUG(dbgs() << "FrameOffset : " << Offset << "\n");
- DEBUG(dbgs() << "StackSize : " << StackSize << "\n");
- DEBUG(dbgs() << "LocalFrameSize : " << LocalFrameSize << "\n");
+ LLVM_DEBUG(dbgs() << "\nFunction : " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "<--------->\n");
+ LLVM_DEBUG(dbgs() << MI << "\n");
+ LLVM_DEBUG(dbgs() << "FrameIndex : " << FrameIndex << "\n");
+ LLVM_DEBUG(dbgs() << "ObjSize : " << ObjSize << "\n");
+ LLVM_DEBUG(dbgs() << "FrameOffset : " << Offset << "\n");
+ LLVM_DEBUG(dbgs() << "StackSize : " << StackSize << "\n");
+ LLVM_DEBUG(dbgs() << "LocalFrameSize : " << LocalFrameSize << "\n");
(void)LocalFrameSize;
// Special handling of DBG_VALUE instructions.
@@ -200,8 +200,8 @@ void ARCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// ldb needs no alignment,
// ldh needs 2 byte alignment
// ld needs 4 byte alignment
- DEBUG(dbgs() << "Offset : " << Offset << "\n"
- << "<--------->\n");
+ LLVM_DEBUG(dbgs() << "Offset : " << Offset << "\n"
+ << "<--------->\n");
unsigned Reg = MI.getOperand(0).getReg();
assert(ARC::GPR32RegClass.contains(Reg) && "Unexpected register operand");
diff --git a/lib/Target/ARC/Disassembler/ARCDisassembler.cpp b/lib/Target/ARC/Disassembler/ARCDisassembler.cpp
index 3280d5ee6cfd..3fc5a033dd5d 100644
--- a/lib/Target/ARC/Disassembler/ARCDisassembler.cpp
+++ b/lib/Target/ARC/Disassembler/ARCDisassembler.cpp
@@ -122,7 +122,7 @@ static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder) {
if (RegNo >= 32) {
- DEBUG(dbgs() << "Not a GPR32 register.");
+ LLVM_DEBUG(dbgs() << "Not a GPR32 register.");
return MCDisassembler::Fail;
}
@@ -222,7 +222,7 @@ static DecodeStatus DecodeStLImmInstruction(MCInst &Inst, uint64_t Insn,
unsigned SrcC, DstB, LImm;
DstB = decodeBField(Insn);
if (DstB != 62) {
- DEBUG(dbgs() << "Decoding StLImm found non-limm register.");
+ LLVM_DEBUG(dbgs() << "Decoding StLImm found non-limm register.");
return MCDisassembler::Fail;
}
SrcC = decodeCField(Insn);
@@ -237,10 +237,10 @@ static DecodeStatus DecodeLdLImmInstruction(MCInst &Inst, uint64_t Insn,
uint64_t Address,
const void *Decoder) {
unsigned DstA, SrcB, LImm;
- DEBUG(dbgs() << "Decoding LdLImm:\n");
+ LLVM_DEBUG(dbgs() << "Decoding LdLImm:\n");
SrcB = decodeBField(Insn);
if (SrcB != 62) {
- DEBUG(dbgs() << "Decoding LdLImm found non-limm register.");
+ LLVM_DEBUG(dbgs() << "Decoding LdLImm found non-limm register.");
return MCDisassembler::Fail;
}
DstA = decodeAField(Insn);
@@ -255,13 +255,13 @@ static DecodeStatus DecodeLdRLImmInstruction(MCInst &Inst, uint64_t Insn,
uint64_t Address,
const void *Decoder) {
unsigned DstA, SrcB;
- DEBUG(dbgs() << "Decoding LdRLimm\n");
+ LLVM_DEBUG(dbgs() << "Decoding LdRLimm\n");
DstA = decodeAField(Insn);
DecodeGPR32RegisterClass(Inst, DstA, Address, Decoder);
SrcB = decodeBField(Insn);
DecodeGPR32RegisterClass(Inst, SrcB, Address, Decoder);
if (decodeCField(Insn) != 62) {
- DEBUG(dbgs() << "Decoding LdRLimm found non-limm register.");
+ LLVM_DEBUG(dbgs() << "Decoding LdRLimm found non-limm register.");
return MCDisassembler::Fail;
}
Inst.addOperand(MCOperand::createImm((uint32_t)(Insn >> 32)));
@@ -271,7 +271,7 @@ static DecodeStatus DecodeLdRLImmInstruction(MCInst &Inst, uint64_t Insn,
static DecodeStatus DecodeMoveHRegInstruction(MCInst &Inst, uint64_t Insn,
uint64_t Address,
const void *Decoder) {
- DEBUG(dbgs() << "Decoding MOV_S h-register\n");
+ LLVM_DEBUG(dbgs() << "Decoding MOV_S h-register\n");
using Field = decltype(Insn);
Field h = fieldFromInstruction(Insn, 5, 3) |
(fieldFromInstruction(Insn, 0, 2) << 3);
@@ -322,10 +322,10 @@ DecodeStatus ARCDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
Result =
decodeInstruction(DecoderTable64, Instr, Insn64, Address, this, STI);
if (Success == Result) {
- DEBUG(dbgs() << "Successfully decoded 64-bit instruction.");
+ LLVM_DEBUG(dbgs() << "Successfully decoded 64-bit instruction.");
return Result;
}
- DEBUG(dbgs() << "Not a 64-bit instruction, falling back to 32-bit.");
+ LLVM_DEBUG(dbgs() << "Not a 64-bit instruction, falling back to 32-bit.");
}
uint32_t Insn32;
if (!readInstruction32(Bytes, Address, Size, Insn32)) {
@@ -342,10 +342,12 @@ DecodeStatus ARCDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
Result =
decodeInstruction(DecoderTable48, Instr, Insn48, Address, this, STI);
if (Success == Result) {
- DEBUG(dbgs() << "Successfully decoded 16-bit instruction with limm.");
+ LLVM_DEBUG(
+ dbgs() << "Successfully decoded 16-bit instruction with limm.");
return Result;
}
- DEBUG(dbgs() << "Not a 16-bit instruction with limm, try without it.");
+ LLVM_DEBUG(
+ dbgs() << "Not a 16-bit instruction with limm, try without it.");
}
uint32_t Insn16;
diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp
index 16d5f74d19e3..b7f7cb20315d 100644
--- a/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/lib/Target/ARM/A15SDOptimizer.cpp
@@ -180,7 +180,7 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
SmallVector<MachineInstr *, 8> Front;
DeadInstr.insert(MI);
- DEBUG(dbgs() << "Deleting base instruction " << *MI << "\n");
+ LLVM_DEBUG(dbgs() << "Deleting base instruction " << *MI << "\n");
Front.push_back(MI);
while (Front.size() != 0) {
@@ -232,7 +232,7 @@ void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
if (!IsDead) continue;
- DEBUG(dbgs() << "Deleting instruction " << *Def << "\n");
+ LLVM_DEBUG(dbgs() << "Deleting instruction " << *Def << "\n");
DeadInstr.insert(Def);
}
}
@@ -264,7 +264,7 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) {
// Is it a subreg copy of ssub_0?
if (EC && EC->isCopy() &&
EC->getOperand(1).getSubReg() == ARM::ssub_0) {
- DEBUG(dbgs() << "Found a subreg copy: " << *SPRMI);
+ LLVM_DEBUG(dbgs() << "Found a subreg copy: " << *SPRMI);
// Find the thing we're subreg copying out of - is it of the same
// regclass as DPRMI? (i.e. a DPR or QPR).
@@ -272,8 +272,8 @@ unsigned A15SDOptimizer::optimizeSDPattern(MachineInstr *MI) {
const TargetRegisterClass *TRC =
MRI->getRegClass(MI->getOperand(1).getReg());
if (TRC->hasSuperClassEq(MRI->getRegClass(FullReg))) {
- DEBUG(dbgs() << "Subreg copy is compatible - returning ");
- DEBUG(dbgs() << printReg(FullReg) << "\n");
+ LLVM_DEBUG(dbgs() << "Subreg copy is compatible - returning ");
+ LLVM_DEBUG(dbgs() << printReg(FullReg) << "\n");
eraseInstrWithNoUses(MI);
return FullReg;
}
@@ -387,7 +387,7 @@ void A15SDOptimizer::elideCopiesAndPHIs(MachineInstr *MI,
continue;
Front.push_back(NewMI);
} else {
- DEBUG(dbgs() << "Found partial copy" << *MI <<"\n");
+ LLVM_DEBUG(dbgs() << "Found partial copy" << *MI << "\n");
Outs.push_back(MI);
}
}
@@ -642,9 +642,8 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
// to find.
MRI->constrainRegClass(NewReg, MRI->getRegClass((*I)->getReg()));
- DEBUG(dbgs() << "Replacing operand "
- << **I << " with "
- << printReg(NewReg) << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing operand " << **I << " with "
+ << printReg(NewReg) << "\n");
(*I)->substVirtReg(NewReg, 0, *TRI);
}
}
@@ -668,7 +667,7 @@ bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) {
MRI = &Fn.getRegInfo();
bool Modified = false;
- DEBUG(dbgs() << "Running on function " << Fn.getName()<< "\n");
+ LLVM_DEBUG(dbgs() << "Running on function " << Fn.getName() << "\n");
DeadInstr.clear();
Replacements.clear();
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index efe96f8d1b09..505588f1722e 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1469,7 +1469,7 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
return false;
// All clear, widen the COPY.
- DEBUG(dbgs() << "widening: " << MI);
+ LLVM_DEBUG(dbgs() << "widening: " << MI);
MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
// Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg
@@ -1498,7 +1498,7 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
MI.addRegisterKilled(SrcRegS, TRI, true);
}
- DEBUG(dbgs() << "replaced by: " << MI);
+ LLVM_DEBUG(dbgs() << "replaced by: " << MI);
return true;
}
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 4b9a4376adf8..43e8b7d66c62 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -838,10 +838,10 @@ bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI,
auto AFI = MF->getInfo<ARMFunctionInfo>();
auto It = AFI->getCoalescedWeight(MBB);
- DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
- << It->second << "\n");
- DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
- << NewRCWeight.RegWeight << "\n");
+ LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
+ << It->second << "\n");
+ LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
+ << NewRCWeight.RegWeight << "\n");
// This number is the largest round number that which meets the criteria:
// (1) addresses PR18825
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index 1e3655aac89a..de08eb8c6985 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -302,7 +302,7 @@ void ARMConstantIslands::verify() {
return BBInfo[LHS.getNumber()].postOffset() <
BBInfo[RHS.getNumber()].postOffset();
}));
- DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
+ LLVM_DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
CPUser &U = CPUsers[i];
unsigned UserOffset = getUserOffset(U);
@@ -310,12 +310,12 @@ void ARMConstantIslands::verify() {
// adjustment.
if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
/* DoDump = */ true)) {
- DEBUG(dbgs() << "OK\n");
+ LLVM_DEBUG(dbgs() << "OK\n");
continue;
}
- DEBUG(dbgs() << "Out of range.\n");
+ LLVM_DEBUG(dbgs() << "Out of range.\n");
dumpBBs();
- DEBUG(MF->dump());
+ LLVM_DEBUG(MF->dump());
llvm_unreachable("Constant pool entry out of range!");
}
#endif
@@ -324,7 +324,7 @@ void ARMConstantIslands::verify() {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// print block size and offset information - debugging
LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
- DEBUG({
+ LLVM_DEBUG({
for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
const BasicBlockInfo &BBI = BBInfo[J];
dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
@@ -341,9 +341,9 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MCP = mf.getConstantPool();
- DEBUG(dbgs() << "***** ARMConstantIslands: "
- << MCP->getConstants().size() << " CP entries, aligned to "
- << MCP->getConstantPoolAlignment() << " bytes *****\n");
+ LLVM_DEBUG(dbgs() << "***** ARMConstantIslands: "
+ << MCP->getConstants().size() << " CP entries, aligned to "
+ << MCP->getConstantPoolAlignment() << " bytes *****\n");
STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
TII = STI->getInstrInfo();
@@ -394,7 +394,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// constant pool users.
initializeFunctionInfo(CPEMIs);
CPEMIs.clear();
- DEBUG(dumpBBs());
+ LLVM_DEBUG(dumpBBs());
// Functions with jump tables need an alignment of 4 because they use the ADR
// instruction, which aligns the PC to 4 bytes before adding an offset.
@@ -408,7 +408,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// is no change.
unsigned NoCPIters = 0, NoBRIters = 0;
while (true) {
- DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
+ LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
bool CPChange = false;
for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
// For most inputs, it converges in no more than 5 iterations.
@@ -417,19 +417,19 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
if (CPChange && ++NoCPIters > CPMaxIteration)
report_fatal_error("Constant Island pass failed to converge!");
- DEBUG(dumpBBs());
+ LLVM_DEBUG(dumpBBs());
// Clear NewWaterList now. If we split a block for branches, it should
// appear as "new water" for the next iteration of constant pool placement.
NewWaterList.clear();
- DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
+ LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
bool BRChange = false;
for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
BRChange |= fixupImmediateBr(ImmBranches[i]);
if (BRChange && ++NoBRIters > 30)
report_fatal_error("Branch Fix Up pass failed to converge!");
- DEBUG(dumpBBs());
+ LLVM_DEBUG(dumpBBs());
if (!CPChange && !BRChange)
break;
@@ -465,7 +465,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
}
}
- DEBUG(dbgs() << '\n'; dumpBBs());
+ LLVM_DEBUG(dbgs() << '\n'; dumpBBs());
BBInfo.clear();
WaterList.clear();
@@ -534,10 +534,10 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs)
// Add a new CPEntry, but no corresponding CPUser yet.
CPEntries.emplace_back(1, CPEntry(CPEMI, i));
++NumCPEs;
- DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
- << Size << ", align = " << Align <<'\n');
+ LLVM_DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
+ << Size << ", align = " << Align << '\n');
}
- DEBUG(BB->dump());
+ LLVM_DEBUG(BB->dump());
}
/// Do initial placement of the jump tables. Because Thumb2's TBB and TBH
@@ -1071,7 +1071,7 @@ bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
unsigned CPEOffset = getOffsetOf(CPEMI);
if (DoDump) {
- DEBUG({
+ LLVM_DEBUG({
unsigned Block = MI->getParent()->getNumber();
const BasicBlockInfo &BBI = BBInfo[Block];
dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
@@ -1164,7 +1164,7 @@ int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
// Check to see if the CPE is already in-range.
if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
true)) {
- DEBUG(dbgs() << "In range\n");
+ LLVM_DEBUG(dbgs() << "In range\n");
return 1;
}
@@ -1180,8 +1180,8 @@ int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
continue;
if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
U.NegOk)) {
- DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
- << CPEs[i].CPI << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[i].CPI << "\n");
// Point the CPUser node to the replacement
U.CPEMI = CPEs[i].CPEMI;
// Change the CPI in the instruction operand to refer to the clone.
@@ -1266,8 +1266,8 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
// This is the least amount of required padding seen so far.
BestGrowth = Growth;
WaterIter = IP;
- DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
- << " Growth=" << Growth << '\n');
+ LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
+ << " Growth=" << Growth << '\n');
if (CloserWater && WaterBB == U.MI->getParent())
return true;
@@ -1310,8 +1310,8 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
- DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
- << format(", expected CPE offset %#x\n", CPEOffset));
+ LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
+ << format(", expected CPE offset %#x\n", CPEOffset));
NewMBB = &*++UserMBB->getIterator();
// Add an unconditional branch from UserMBB to fallthrough block. Record
// it for branch lengthening; this new branch will not get out of range,
@@ -1354,18 +1354,17 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
- DEBUG(dbgs() << format("Split in middle of big block before %#x",
- BaseInsertOffset));
+ LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
+ BaseInsertOffset));
// The 4 in the following is for the unconditional branch we'll be inserting
// (allows for long branch on Thumb1). Alignment of the island is handled
// inside isOffsetInRange.
BaseInsertOffset -= 4;
- DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
- << " la=" << LogAlign
- << " kb=" << KnownBits
- << " up=" << UPad << '\n');
+ LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
+ << " la=" << LogAlign << " kb=" << KnownBits
+ << " up=" << UPad << '\n');
// This could point off the end of the block if we've already got constant
// pool entries following this block; only the last one is in the water list.
@@ -1378,7 +1377,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
BaseInsertOffset =
std::max(UserBBI.postOffset() - UPad - 8,
UserOffset + TII->getInstSizeInBytes(*UserMI) + 1);
- DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
+ LLVM_DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
}
unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
CPEMI->getOperand(2).getImm();
@@ -1422,8 +1421,8 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
}
// We really must not split an IT block.
- DEBUG(unsigned PredReg;
- assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
+ LLVM_DEBUG(unsigned PredReg; assert(
+ !isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
NewMBB = splitBlockBeforeInstr(&*MI);
}
@@ -1457,7 +1456,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
MachineBasicBlock *NewMBB;
water_iterator IP;
if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
- DEBUG(dbgs() << "Found water in range\n");
+ LLVM_DEBUG(dbgs() << "Found water in range\n");
MachineBasicBlock *WaterBB = *IP;
// If the original WaterList entry was "new water" on this iteration,
@@ -1470,7 +1469,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
NewMBB = &*++WaterBB->getIterator();
} else {
// No water found.
- DEBUG(dbgs() << "No water found\n");
+ LLVM_DEBUG(dbgs() << "No water found\n");
createNewWater(CPUserIndex, UserOffset, NewMBB);
// splitBlockBeforeInstr adds to WaterList, which is important when it is
@@ -1533,8 +1532,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
break;
}
- DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
- << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
+ LLVM_DEBUG(
+ dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
+ << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
return true;
}
@@ -1589,11 +1589,11 @@ bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
unsigned BrOffset = getOffsetOf(MI) + PCAdj;
unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
- DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
- << " from " << printMBBReference(*MI->getParent())
- << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
- << " to " << DestOffset << " offset "
- << int(DestOffset - BrOffset) << "\t" << *MI);
+ LLVM_DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
+ << " from " << printMBBReference(*MI->getParent())
+ << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
+ << " to " << DestOffset << " offset "
+ << int(DestOffset - BrOffset) << "\t" << *MI);
if (BrOffset <= DestOffset) {
// Branch before the Dest.
@@ -1640,7 +1640,7 @@ ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
HasFarJump = true;
++NumUBrFixed;
- DEBUG(dbgs() << " Changed B to long jump " << *MI);
+ LLVM_DEBUG(dbgs() << " Changed B to long jump " << *MI);
return true;
}
@@ -1684,8 +1684,9 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
// b L1
MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
- DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
- << *BMI);
+ LLVM_DEBUG(
+ dbgs() << " Invert Bcc condition and swap its destination with "
+ << *BMI);
BMI->getOperand(0).setMBB(DestBB);
MI->getOperand(0).setMBB(NewDest);
MI->getOperand(1).setImm(CC);
@@ -1711,9 +1712,9 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
}
MachineBasicBlock *NextBB = &*++MBB->getIterator();
- DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
- << " also invert condition and change dest. to "
- << printMBBReference(*NextBB) << "\n");
+ LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
+ << " also invert condition and change dest. to "
+ << printMBBReference(*NextBB) << "\n");
// Insert a new conditional branch and a new unconditional branch.
// Also update the ImmBranch as well as adding a new entry for the new branch.
@@ -1806,7 +1807,7 @@ bool ARMConstantIslands::optimizeThumb2Instructions() {
// FIXME: Check if offset is multiple of scale if scale is not 4.
if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
- DEBUG(dbgs() << "Shrink: " << *U.MI);
+ LLVM_DEBUG(dbgs() << "Shrink: " << *U.MI);
U.MI->setDesc(TII->get(NewOpc));
MachineBasicBlock *MBB = U.MI->getParent();
BBInfo[MBB->getNumber()].Size -= 2;
@@ -1850,7 +1851,7 @@ bool ARMConstantIslands::optimizeThumb2Branches() {
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
- DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
+ LLVM_DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
Br.MI->setDesc(TII->get(NewOpc));
MachineBasicBlock *MBB = Br.MI->getParent();
BBInfo[MBB->getNumber()].Size -= 2;
@@ -1894,7 +1895,7 @@ bool ARMConstantIslands::optimizeThumb2Branches() {
CmpMI->getOperand(1).getImm() == 0 &&
isARMLowRegister(Reg)) {
MachineBasicBlock *MBB = Br.MI->getParent();
- DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
+ LLVM_DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
MachineInstr *NewBR =
BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
.addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
@@ -2063,7 +2064,7 @@ static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,
}
}
- DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd);
+ LLVM_DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd);
RemovableAdd->eraseFromParent();
DeadSize += 4;
}
@@ -2209,7 +2210,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
DeadSize += 4;
}
- DEBUG(dbgs() << "Shrink JT: " << *MI);
+ LLVM_DEBUG(dbgs() << "Shrink JT: " << *MI);
MachineInstr *CPEMI = User.CPEMI;
unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
if (!isThumb2)
@@ -2223,7 +2224,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
.addReg(IdxReg, getKillRegState(IdxRegKill))
.addJumpTableIndex(JTI, JTOP.getTargetFlags())
.addImm(CPEMI->getOperand(0).getImm());
- DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
CPEMI->setDesc(TII->get(JTOpc));
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index 720070fe650e..1b2c845c7b55 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1812,34 +1812,36 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
for (unsigned Reg : {ARM::R0, ARM::R1, ARM::R2, ARM::R3}) {
if (!MF.getRegInfo().isLiveIn(Reg)) {
--EntryRegDeficit;
- DEBUG(dbgs() << printReg(Reg, TRI)
- << " is unused argument register, EntryRegDeficit = "
- << EntryRegDeficit << "\n");
+ LLVM_DEBUG(dbgs()
+ << printReg(Reg, TRI)
+ << " is unused argument register, EntryRegDeficit = "
+ << EntryRegDeficit << "\n");
}
}
// Unused return registers can be clobbered in the epilogue for free.
int ExitRegDeficit = AFI->getReturnRegsCount() - 4;
- DEBUG(dbgs() << AFI->getReturnRegsCount()
- << " return regs used, ExitRegDeficit = " << ExitRegDeficit
- << "\n");
+ LLVM_DEBUG(dbgs() << AFI->getReturnRegsCount()
+ << " return regs used, ExitRegDeficit = "
+ << ExitRegDeficit << "\n");
int RegDeficit = std::max(EntryRegDeficit, ExitRegDeficit);
- DEBUG(dbgs() << "RegDeficit = " << RegDeficit << "\n");
+ LLVM_DEBUG(dbgs() << "RegDeficit = " << RegDeficit << "\n");
// r4-r6 can be used in the prologue if they are pushed by the first push
// instruction.
for (unsigned Reg : {ARM::R4, ARM::R5, ARM::R6}) {
if (SavedRegs.test(Reg)) {
--RegDeficit;
- DEBUG(dbgs() << printReg(Reg, TRI)
- << " is saved low register, RegDeficit = " << RegDeficit
- << "\n");
+ LLVM_DEBUG(dbgs() << printReg(Reg, TRI)
+ << " is saved low register, RegDeficit = "
+ << RegDeficit << "\n");
} else {
AvailableRegs.push_back(Reg);
- DEBUG(dbgs()
- << printReg(Reg, TRI)
- << " is non-saved low register, adding to AvailableRegs\n");
+ LLVM_DEBUG(
+ dbgs()
+ << printReg(Reg, TRI)
+ << " is non-saved low register, adding to AvailableRegs\n");
}
}
@@ -1847,12 +1849,13 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
if (!HasFP) {
if (SavedRegs.test(ARM::R7)) {
--RegDeficit;
- DEBUG(dbgs() << "%r7 is saved low register, RegDeficit = "
- << RegDeficit << "\n");
+ LLVM_DEBUG(dbgs() << "%r7 is saved low register, RegDeficit = "
+ << RegDeficit << "\n");
} else {
AvailableRegs.push_back(ARM::R7);
- DEBUG(dbgs()
- << "%r7 is non-saved low register, adding to AvailableRegs\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "%r7 is non-saved low register, adding to AvailableRegs\n");
}
}
@@ -1860,9 +1863,9 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
for (unsigned Reg : {ARM::R8, ARM::R9, ARM::R10, ARM::R11}) {
if (SavedRegs.test(Reg)) {
++RegDeficit;
- DEBUG(dbgs() << printReg(Reg, TRI)
- << " is saved high register, RegDeficit = " << RegDeficit
- << "\n");
+ LLVM_DEBUG(dbgs() << printReg(Reg, TRI)
+ << " is saved high register, RegDeficit = "
+ << RegDeficit << "\n");
}
}
@@ -1874,11 +1877,11 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
MF.getFrameInfo().isReturnAddressTaken())) {
if (SavedRegs.test(ARM::LR)) {
--RegDeficit;
- DEBUG(dbgs() << "%lr is saved register, RegDeficit = " << RegDeficit
- << "\n");
+ LLVM_DEBUG(dbgs() << "%lr is saved register, RegDeficit = "
+ << RegDeficit << "\n");
} else {
AvailableRegs.push_back(ARM::LR);
- DEBUG(dbgs() << "%lr is not saved, adding to AvailableRegs\n");
+ LLVM_DEBUG(dbgs() << "%lr is not saved, adding to AvailableRegs\n");
}
}
@@ -1887,11 +1890,11 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
// instructions. This might not reduce RegDeficit all the way to zero,
// because we can only guarantee that r4-r6 are available, but r8-r11 may
// need saving.
- DEBUG(dbgs() << "Final RegDeficit = " << RegDeficit << "\n");
+ LLVM_DEBUG(dbgs() << "Final RegDeficit = " << RegDeficit << "\n");
for (; RegDeficit > 0 && !AvailableRegs.empty(); --RegDeficit) {
unsigned Reg = AvailableRegs.pop_back_val();
- DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI)
- << " to make up reg deficit\n");
+ LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI)
+ << " to make up reg deficit\n");
SavedRegs.set(Reg);
NumGPRSpills++;
CS1Spilled = true;
@@ -1902,7 +1905,8 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
if (Reg == ARM::LR)
LRSpilled = true;
}
- DEBUG(dbgs() << "After adding spills, RegDeficit = " << RegDeficit << "\n");
+ LLVM_DEBUG(dbgs() << "After adding spills, RegDeficit = " << RegDeficit
+ << "\n");
}
// If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
@@ -1923,7 +1927,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
// If stack and double are 8-byte aligned and we are spilling an odd number
// of GPRs, spill one extra callee save GPR so we won't have to pad between
// the integer and double callee save areas.
- DEBUG(dbgs() << "NumGPRSpills = " << NumGPRSpills << "\n");
+ LLVM_DEBUG(dbgs() << "NumGPRSpills = " << NumGPRSpills << "\n");
unsigned TargetAlign = getStackAlignment();
if (TargetAlign >= 8 && (NumGPRSpills & 1)) {
if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
@@ -1935,8 +1939,8 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
(STI.isTargetWindows() && Reg == ARM::R11) ||
isARMLowRegister(Reg) || Reg == ARM::LR) {
SavedRegs.set(Reg);
- DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI)
- << " to make up alignment\n");
+ LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI)
+ << " to make up alignment\n");
if (!MRI.isReserved(Reg) && !MRI.isPhysRegUsed(Reg))
ExtraCSSpill = true;
break;
@@ -1945,8 +1949,8 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
} else if (!UnspilledCS2GPRs.empty() && !AFI->isThumb1OnlyFunction()) {
unsigned Reg = UnspilledCS2GPRs.front();
SavedRegs.set(Reg);
- DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI)
- << " to make up alignment\n");
+ LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI)
+ << " to make up alignment\n");
if (!MRI.isReserved(Reg) && !MRI.isPhysRegUsed(Reg))
ExtraCSSpill = true;
}
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index fe10705d97fe..cdd2890c8b78 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -6841,10 +6841,9 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
}
// Final sanity check before we try to actually produce a shuffle.
- DEBUG(
- for (auto Src : Sources)
- assert(Src.ShuffleVec.getValueType() == ShuffleVT);
- );
+ LLVM_DEBUG(for (auto Src
+ : Sources)
+ assert(Src.ShuffleVec.getValueType() == ShuffleVT););
// The stars all align, our next step is to produce the mask for the shuffle.
SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
@@ -8091,7 +8090,7 @@ static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget,
}
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
- DEBUG(dbgs() << "Lowering node: "; Op.dump());
+ LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
switch (Op.getOpcode()) {
default: llvm_unreachable("Don't know how to custom lower this!");
case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
@@ -10539,9 +10538,9 @@ static SDValue PerformSHLSimplify(SDNode *N,
// Shift left to compensate for the lshr of C1Int.
SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1));
- DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump(); SHL.dump();
- N->dump());
- DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
+ LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();
+ SHL.dump(); N->dump());
+ LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
DAG.ReplaceAllUsesWith(SDValue(N, 0), Res);
return SDValue(N, 0);
@@ -14816,7 +14815,7 @@ bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
HABaseType Base = HA_UNKNOWN;
uint64_t Members = 0;
bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
- DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
+ LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
return IsHA || IsIntArray;
diff --git a/lib/Target/ARM/ARMInstructionSelector.cpp b/lib/Target/ARM/ARMInstructionSelector.cpp
index 773b0dead18e..c37bd87bfe47 100644
--- a/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -154,8 +154,8 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
// we hit another of its uses or its defs.
// Copies do not have constraints.
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
return true;
@@ -399,12 +399,12 @@ bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg,
unsigned ExpectedSize,
unsigned ExpectedRegBankID) const {
if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) {
- DEBUG(dbgs() << "Unexpected size for register");
+ LLVM_DEBUG(dbgs() << "Unexpected size for register");
return false;
}
if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) {
- DEBUG(dbgs() << "Unexpected register bank for register");
+ LLVM_DEBUG(dbgs() << "Unexpected register bank for register");
return false;
}
@@ -496,13 +496,13 @@ bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I,
bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB,
MachineRegisterInfo &MRI) const {
if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) {
- DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n");
+ LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n");
return false;
}
auto GV = MIB->getOperand(1).getGlobal();
if (GV->isThreadLocal()) {
- DEBUG(dbgs() << "TLS variables not supported yet\n");
+ LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n");
return false;
}
@@ -607,7 +607,7 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB,
else
MIB->setDesc(TII.get(ARM::LDRLIT_ga_abs));
} else {
- DEBUG(dbgs() << "Object format not supported yet\n");
+ LLVM_DEBUG(dbgs() << "Object format not supported yet\n");
return false;
}
@@ -691,7 +691,7 @@ bool ARMInstructionSelector::select(MachineInstr &I,
LLT DstTy = MRI.getType(I.getOperand(0).getReg());
// FIXME: Smaller destination sizes coming soon!
if (DstTy.getSizeInBits() != 32) {
- DEBUG(dbgs() << "Unsupported destination size for extension");
+ LLVM_DEBUG(dbgs() << "Unsupported destination size for extension");
return false;
}
@@ -733,7 +733,7 @@ bool ARMInstructionSelector::select(MachineInstr &I,
break;
}
default:
- DEBUG(dbgs() << "Unsupported source size for extension");
+ LLVM_DEBUG(dbgs() << "Unsupported source size for extension");
return false;
}
break;
@@ -774,12 +774,13 @@ bool ARMInstructionSelector::select(MachineInstr &I,
}
if (SrcRegBank.getID() != DstRegBank.getID()) {
- DEBUG(dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n");
+ LLVM_DEBUG(
+ dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n");
return false;
}
if (SrcRegBank.getID() != ARM::GPRRegBankID) {
- DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n");
+ LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n");
return false;
}
@@ -789,21 +790,21 @@ bool ARMInstructionSelector::select(MachineInstr &I,
case G_CONSTANT: {
if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) {
// Non-pointer constants should be handled by TableGen.
- DEBUG(dbgs() << "Unsupported constant type\n");
+ LLVM_DEBUG(dbgs() << "Unsupported constant type\n");
return false;
}
auto &Val = I.getOperand(1);
if (Val.isCImm()) {
if (!Val.getCImm()->isZero()) {
- DEBUG(dbgs() << "Unsupported pointer constant value\n");
+ LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
return false;
}
Val.ChangeToImmediate(0);
} else {
assert(Val.isImm() && "Unexpected operand for G_CONSTANT");
if (Val.getImm() != 0) {
- DEBUG(dbgs() << "Unsupported pointer constant value\n");
+ LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
return false;
}
}
@@ -821,13 +822,15 @@ bool ARMInstructionSelector::select(MachineInstr &I,
const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
if (SrcRegBank.getID() != DstRegBank.getID()) {
- DEBUG(dbgs()
- << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n");
return false;
}
if (SrcRegBank.getID() != ARM::GPRRegBankID) {
- DEBUG(dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n");
+ LLVM_DEBUG(
+ dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n");
return false;
}
@@ -848,11 +851,11 @@ bool ARMInstructionSelector::select(MachineInstr &I,
unsigned Size = MRI.getType(OpReg).getSizeInBits();
if (Size == 64 && STI.isFPOnlySP()) {
- DEBUG(dbgs() << "Subtarget only supports single precision");
+ LLVM_DEBUG(dbgs() << "Subtarget only supports single precision");
return false;
}
if (Size != 32 && Size != 64) {
- DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
+ LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
return false;
}
@@ -883,7 +886,7 @@ bool ARMInstructionSelector::select(MachineInstr &I,
case G_LOAD: {
const auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
- DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
return false;
}
@@ -920,7 +923,7 @@ bool ARMInstructionSelector::select(MachineInstr &I,
}
case G_BRCOND: {
if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) {
- DEBUG(dbgs() << "Unsupported condition register for G_BRCOND");
+ LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND");
return false;
}
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 174ed3e5dd20..901138dbdfd5 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -2291,7 +2291,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
- DEBUG(dbgs() << "Formed " << *MIB << "\n");
+ LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumLDRDFormed;
} else {
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
@@ -2305,7 +2305,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
- DEBUG(dbgs() << "Formed " << *MIB << "\n");
+ LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumSTRDFormed;
}
MBB->erase(Op0);
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 232337412dbb..c387b75f122c 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -583,9 +583,9 @@ void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
SmallVector<BasicBlock*, 4> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
- DEBUG(dbgs() << "Loop has:\n"
- << "Blocks: " << L->getNumBlocks() << "\n"
- << "Exit blocks: " << ExitingBlocks.size() << "\n");
+ LLVM_DEBUG(dbgs() << "Loop has:\n"
+ << "Blocks: " << L->getNumBlocks() << "\n"
+ << "Exit blocks: " << ExitingBlocks.size() << "\n");
// Only allow another exit other than the latch. This acts as an early exit
// as it mirrors the profitability calculation of the runtime unroller.
@@ -616,7 +616,7 @@ void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
}
}
- DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
UP.Partial = true;
UP.Runtime = true;
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index ce6ca46437e4..f91397aca226 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -10283,10 +10283,11 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
Message.Message = "too many operands for instruction";
} else {
Message.Message = "invalid operand for instruction";
- DEBUG(dbgs() << "Missing diagnostic string for operand class " <<
- getMatchClassName((MatchClassKind)I.getOperandClass())
- << I.getOperandClass() << ", error " << I.getOperandError()
- << ", opcode " << MII.getName(I.getOpcode()) << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Missing diagnostic string for operand class "
+ << getMatchClassName((MatchClassKind)I.getOperandClass())
+ << I.getOperandClass() << ", error " << I.getOperandError()
+ << ", opcode " << MII.getName(I.getOpcode()) << "\n");
}
NearMissesOut.emplace_back(Message);
break;
diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp
index 153e7b1e2197..637e4a44c428 100644
--- a/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/lib/Target/ARM/MLxExpansionPass.cpp
@@ -309,17 +309,17 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
}
MIB.addImm(Pred).addReg(PredReg);
- DEBUG({
- dbgs() << "Expanding: " << *MI;
- dbgs() << " to:\n";
- MachineBasicBlock::iterator MII = MI;
- MII = std::prev(MII);
- MachineInstr &MI2 = *MII;
- MII = std::prev(MII);
- MachineInstr &MI1 = *MII;
- dbgs() << " " << MI1;
- dbgs() << " " << MI2;
- });
+ LLVM_DEBUG({
+ dbgs() << "Expanding: " << *MI;
+ dbgs() << " to:\n";
+ MachineBasicBlock::iterator MII = MI;
+ MII = std::prev(MII);
+ MachineInstr &MI2 = *MII;
+ MII = std::prev(MII);
+ MachineInstr &MI1 = *MII;
+ dbgs() << " " << MI1;
+ dbgs() << " " << MI2;
+ });
MI->eraseFromParent();
++NumExpand;
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index 115ba65ff868..abf54ba7e87c 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -610,7 +610,8 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
- DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
+ LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
+ << " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++NumLdSts;
@@ -657,7 +658,8 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
- DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB);
+ LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
+ << " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++NumNarrows;
@@ -826,7 +828,8 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
- DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
+ LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
+ << " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++Num2Addrs;
@@ -933,7 +936,8 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
- DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
+ LLVM_DEBUG(errs() << "Converted 32-bit: " << *MI
+ << " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
++NumNarrows;
diff --git a/lib/Target/AVR/AVRISelDAGToDAG.cpp b/lib/Target/AVR/AVRISelDAGToDAG.cpp
index 91d4e9e6598c..b0b23effc6c6 100644
--- a/lib/Target/AVR/AVRISelDAGToDAG.cpp
+++ b/lib/Target/AVR/AVRISelDAGToDAG.cpp
@@ -521,7 +521,7 @@ bool AVRDAGToDAGISel::selectMultiplication(llvm::SDNode *N) {
void AVRDAGToDAGISel::Select(SDNode *N) {
// If we have a custom node, we already have selected!
if (N->isMachineOpcode()) {
- DEBUG(errs() << "== "; N->dump(CurDAG); errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; N->dump(CurDAG); errs() << "\n");
N->setNodeId(-1);
return;
}
diff --git a/lib/Target/AVR/AsmParser/AVRAsmParser.cpp b/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
index b527ad3e0b14..d57cc098497f 100644
--- a/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
+++ b/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
@@ -482,7 +482,7 @@ bool AVRAsmParser::tryParseRelocExpression(OperandVector &Operands) {
}
bool AVRAsmParser::parseOperand(OperandVector &Operands) {
- DEBUG(dbgs() << "parseOperand\n");
+ LLVM_DEBUG(dbgs() << "parseOperand\n");
switch (getLexer().getKind()) {
default:
@@ -527,7 +527,7 @@ bool AVRAsmParser::parseOperand(OperandVector &Operands) {
OperandMatchResultTy
AVRAsmParser::parseMemriOperand(OperandVector &Operands) {
- DEBUG(dbgs() << "parseMemriOperand()\n");
+ LLVM_DEBUG(dbgs() << "parseMemriOperand()\n");
SMLoc E, S;
MCExpr const *Expression;
diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp
index fdf6423514a1..8b9bc08e144f 100644
--- a/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -190,7 +190,7 @@ void BPFDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
return;
}
@@ -277,7 +277,7 @@ void BPFDAGToDAGISel::PreprocessLoad(SDNode *Node,
if (OP1N->getOpcode() <= ISD::BUILTIN_OP_END || OP1N->getNumOperands() == 0)
return;
- DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n');
const GlobalAddressSDNode *GADN =
dyn_cast<GlobalAddressSDNode>(OP1N->getOperand(0).getNode());
@@ -287,7 +287,7 @@ void BPFDAGToDAGISel::PreprocessLoad(SDNode *Node,
getConstantFieldValue(GADN, CDN->getZExtValue(), size, new_val.c);
} else if (LDAddrNode->getOpcode() > ISD::BUILTIN_OP_END &&
LDAddrNode->getNumOperands() > 0) {
- DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n');
SDValue OP1 = LDAddrNode->getOperand(0);
if (const GlobalAddressSDNode *GADN =
@@ -310,8 +310,8 @@ void BPFDAGToDAGISel::PreprocessLoad(SDNode *Node,
val = new_val.d;
}
- DEBUG(dbgs() << "Replacing load of size " << size << " with constant " << val
- << '\n');
+ LLVM_DEBUG(dbgs() << "Replacing load of size " << size << " with constant "
+ << val << '\n');
SDValue NVal = CurDAG->getConstant(val, DL, MVT::i64);
// After replacement, the current node is dead, we need to
@@ -427,8 +427,8 @@ bool BPFDAGToDAGISel::fillGenericConstant(const DataLayout &DL,
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
uint64_t val = CI->getZExtValue();
- DEBUG(dbgs() << "Byte array at offset " << Offset << " with value " << val
- << '\n');
+ LLVM_DEBUG(dbgs() << "Byte array at offset " << Offset << " with value "
+ << val << '\n');
if (Size > 8 || (Size & (Size - 1)))
return false;
@@ -517,8 +517,9 @@ void BPFDAGToDAGISel::PreprocessCopyToReg(SDNode *Node) {
break;
}
- DEBUG(dbgs() << "Find Load Value to VReg "
- << TargetRegisterInfo::virtReg2Index(RegN->getReg()) << '\n');
+ LLVM_DEBUG(dbgs() << "Find Load Value to VReg "
+ << TargetRegisterInfo::virtReg2Index(RegN->getReg())
+ << '\n');
load_to_vreg_[RegN->getReg()] = mem_load_op;
}
@@ -544,8 +545,8 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
(IntNo == Intrinsic::bpf_load_word && MaskV == 0xFFFFFFFF)))
return;
- DEBUG(dbgs() << "Remove the redundant AND operation in: "; Node->dump();
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Remove the redundant AND operation in: ";
+ Node->dump(); dbgs() << '\n');
I--;
CurDAG->ReplaceAllUsesWith(SDValue(Node, 0), BaseV);
@@ -579,7 +580,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg()))
return;
unsigned AndOpReg = RegN->getReg();
- DEBUG(dbgs() << "Examine " << printReg(AndOpReg) << '\n');
+ LLVM_DEBUG(dbgs() << "Examine " << printReg(AndOpReg) << '\n');
// Examine the PHI insns in the MachineBasicBlock to found out the
// definitions of this virtual register. At this stage (DAG2DAG
@@ -610,7 +611,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
// Trace each incoming definition, e.g., (%0, %bb.1) and (%1, %bb.3)
// The AND operation can be removed if both %0 in %bb.1 and %1 in
// %bb.3 are defined with a load matching the MaskN.
- DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n');
unsigned PrevReg = -1;
for (unsigned i = 0; i < MII->getNumOperands(); ++i) {
const MachineOperand &MOP = MII->getOperand(i);
@@ -626,8 +627,8 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
}
}
- DEBUG(dbgs() << "Remove the redundant AND operation in: "; Node->dump();
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Remove the redundant AND operation in: "; Node->dump();
+ dbgs() << '\n');
I--;
CurDAG->ReplaceAllUsesWith(SDValue(Node, 0), BaseV);
diff --git a/lib/Target/BPF/BPFMIPeephole.cpp b/lib/Target/BPF/BPFMIPeephole.cpp
index 09cc6b31ad83..9e984d0facfb 100644
--- a/lib/Target/BPF/BPFMIPeephole.cpp
+++ b/lib/Target/BPF/BPFMIPeephole.cpp
@@ -72,15 +72,15 @@ void BPFMIPeephole::initialize(MachineFunction &MFParm) {
MF = &MFParm;
MRI = &MF->getRegInfo();
TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
- DEBUG(dbgs() << "*** BPF MachineSSA peephole pass ***\n\n");
+ LLVM_DEBUG(dbgs() << "*** BPF MachineSSA peephole pass ***\n\n");
}
bool BPFMIPeephole::isMovFrom32Def(MachineInstr *MovMI)
{
MachineInstr *DefInsn = MRI->getVRegDef(MovMI->getOperand(1).getReg());
- DEBUG(dbgs() << " Def of Mov Src:");
- DEBUG(DefInsn->dump());
+ LLVM_DEBUG(dbgs() << " Def of Mov Src:");
+ LLVM_DEBUG(DefInsn->dump());
if (!DefInsn)
return false;
@@ -111,7 +111,7 @@ bool BPFMIPeephole::isMovFrom32Def(MachineInstr *MovMI)
return false;
}
- DEBUG(dbgs() << " One ZExt elim sequence identified.\n");
+ LLVM_DEBUG(dbgs() << " One ZExt elim sequence identified.\n");
return true;
}
@@ -139,8 +139,8 @@ bool BPFMIPeephole::eliminateZExtSeq(void) {
unsigned ShfReg = MI.getOperand(1).getReg();
MachineInstr *SllMI = MRI->getVRegDef(ShfReg);
- DEBUG(dbgs() << "Starting SRL found:");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Starting SRL found:");
+ LLVM_DEBUG(MI.dump());
if (!SllMI ||
SllMI->isPHI() ||
@@ -148,8 +148,8 @@ bool BPFMIPeephole::eliminateZExtSeq(void) {
SllMI->getOperand(2).getImm() != 32)
continue;
- DEBUG(dbgs() << " SLL found:");
- DEBUG(SllMI->dump());
+ LLVM_DEBUG(dbgs() << " SLL found:");
+ LLVM_DEBUG(SllMI->dump());
MachineInstr *MovMI = MRI->getVRegDef(SllMI->getOperand(1).getReg());
if (!MovMI ||
@@ -157,12 +157,13 @@ bool BPFMIPeephole::eliminateZExtSeq(void) {
MovMI->getOpcode() != BPF::MOV_32_64)
continue;
- DEBUG(dbgs() << " Type cast Mov found:");
- DEBUG(MovMI->dump());
+ LLVM_DEBUG(dbgs() << " Type cast Mov found:");
+ LLVM_DEBUG(MovMI->dump());
unsigned SubReg = MovMI->getOperand(1).getReg();
if (!isMovFrom32Def(MovMI)) {
- DEBUG(dbgs() << " One ZExt elim sequence failed qualifying elim.\n");
+ LLVM_DEBUG(dbgs()
+ << " One ZExt elim sequence failed qualifying elim.\n");
continue;
}
@@ -228,7 +229,7 @@ public:
void BPFMIPreEmitPeephole::initialize(MachineFunction &MFParm) {
MF = &MFParm;
TRI = MF->getSubtarget<BPFSubtarget>().getRegisterInfo();
- DEBUG(dbgs() << "*** BPF PreEmit peephole pass ***\n\n");
+ LLVM_DEBUG(dbgs() << "*** BPF PreEmit peephole pass ***\n\n");
}
bool BPFMIPreEmitPeephole::eliminateRedundantMov(void) {
@@ -239,8 +240,8 @@ bool BPFMIPreEmitPeephole::eliminateRedundantMov(void) {
for (MachineInstr &MI : MBB) {
// If the previous instruction was marked for elimination, remove it now.
if (ToErase) {
- DEBUG(dbgs() << " Redundant Mov Eliminated:");
- DEBUG(ToErase->dump());
+ LLVM_DEBUG(dbgs() << " Redundant Mov Eliminated:");
+ LLVM_DEBUG(ToErase->dump());
ToErase->eraseFromParent();
ToErase = nullptr;
}
diff --git a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index 1d15a272d7db..577513bd3c93 100644
--- a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -462,9 +462,9 @@ void HexagonOperand::print(raw_ostream &OS) const {
}
bool HexagonAsmParser::finishBundle(SMLoc IDLoc, MCStreamer &Out) {
- DEBUG(dbgs() << "Bundle:");
- DEBUG(MCB.dump_pretty(dbgs()));
- DEBUG(dbgs() << "--\n");
+ LLVM_DEBUG(dbgs() << "Bundle:");
+ LLVM_DEBUG(MCB.dump_pretty(dbgs()));
+ LLVM_DEBUG(dbgs() << "--\n");
MCB.setLoc(IDLoc);
// Check the bundle for errors.
@@ -557,9 +557,9 @@ bool HexagonAsmParser::matchOneInstruction(MCInst &MCI, SMLoc IDLoc,
canonicalizeImmediates(MCI);
result = processInstruction(MCI, InstOperands, IDLoc);
- DEBUG(dbgs() << "Insn:");
- DEBUG(MCI.dump_pretty(dbgs()));
- DEBUG(dbgs() << "\n\n");
+ LLVM_DEBUG(dbgs() << "Insn:");
+ LLVM_DEBUG(MCI.dump_pretty(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n\n");
MCI.setLoc(IDLoc);
}
@@ -1296,9 +1296,9 @@ unsigned HexagonAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
return Match_Success;
}
- DEBUG(dbgs() << "Unmatched Operand:");
- DEBUG(Op->dump());
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Unmatched Operand:");
+ LLVM_DEBUG(Op->dump());
+ LLVM_DEBUG(dbgs() << "\n");
return Match_InvalidOperand;
}
diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 849bba987c3c..4791b067aa8d 100644
--- a/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -2450,7 +2450,7 @@ bool BitSimplification::simplifyExtractLow(MachineInstr *MI,
if (Len == RW)
return false;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << __func__ << " on reg: " << printReg(RD.Reg, &HRI, RD.Sub)
<< ", MI: " << *MI;
dbgs() << "Cell: " << RC << '\n';
@@ -2644,7 +2644,7 @@ bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
const HexagonEvaluator HE(HRI, MRI, HII, MF);
BitTracker BT(HE, MF);
- DEBUG(BT.trace(true));
+ LLVM_DEBUG(BT.trace(true));
BT.run();
MachineBasicBlock &Entry = MF.front();
@@ -2975,7 +2975,8 @@ void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB,
}
bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
- DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB) << "\n");
+ LLVM_DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB)
+ << "\n");
std::vector<PhiInfo> Phis;
for (auto &I : *C.LB) {
if (!I.isPHI())
@@ -2999,7 +3000,7 @@ bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
Phis.push_back(PhiInfo(I, *C.LB));
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Phis: {";
for (auto &I : Phis) {
dbgs() << ' ' << printReg(I.DefR, HRI) << "=phi("
@@ -3120,7 +3121,7 @@ bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
Groups.push_back(G);
}
- DEBUG({
+ LLVM_DEBUG({
for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
InstrGroup &G = Groups[i];
dbgs() << "Group[" << i << "] inp: "
@@ -3188,7 +3189,7 @@ bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
const HexagonEvaluator HE(*HRI, *MRI, *HII, MF);
BitTracker BT(HE, MF);
- DEBUG(BT.trace(true));
+ LLVM_DEBUG(BT.trace(true));
BT.run();
BTP = &BT;
diff --git a/lib/Target/Hexagon/HexagonBlockRanges.cpp b/lib/Target/Hexagon/HexagonBlockRanges.cpp
index 2fb30dfbc1c9..48a4505458ae 100644
--- a/lib/Target/Hexagon/HexagonBlockRanges.cpp
+++ b/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -422,10 +422,10 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeLiveMap(
InstrIndexMap &IndexMap) {
RegToRangeMap LiveMap;
- DEBUG(dbgs() << __func__ << ": index map\n" << IndexMap << '\n');
+ LLVM_DEBUG(dbgs() << __func__ << ": index map\n" << IndexMap << '\n');
computeInitialLiveRanges(IndexMap, LiveMap);
- DEBUG(dbgs() << __func__ << ": live map\n"
- << PrintRangeMap(LiveMap, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << __func__ << ": live map\n"
+ << PrintRangeMap(LiveMap, TRI) << '\n');
return LiveMap;
}
@@ -486,8 +486,8 @@ HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeDeadMap(
if (TargetRegisterInfo::isVirtualRegister(P.first.Reg))
addDeadRanges(P.first);
- DEBUG(dbgs() << __func__ << ": dead map\n"
- << PrintRangeMap(DeadMap, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << __func__ << ": dead map\n"
+ << PrintRangeMap(DeadMap, TRI) << '\n');
return DeadMap;
}
diff --git a/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
index 3e6420c671f9..2fa7888dd02b 100644
--- a/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
+++ b/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
@@ -90,7 +90,7 @@ FunctionPass *llvm::createHexagonBranchRelaxation() {
}
bool HexagonBranchRelaxation::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "****** Hexagon Branch Relaxation ******\n");
+ LLVM_DEBUG(dbgs() << "****** Hexagon Branch Relaxation ******\n");
auto &HST = MF.getSubtarget<HexagonSubtarget>();
HII = HST.getInstrInfo();
@@ -200,14 +200,14 @@ bool HexagonBranchRelaxation::reGenerateBranch(MachineFunction &MF,
for (auto &MI : B) {
if (!MI.isBranch() || !isJumpOutOfRange(MI, BlockToInstOffset))
continue;
- DEBUG(dbgs() << "Long distance jump. isExtendable("
- << HII->isExtendable(MI) << ") isConstExtended("
- << HII->isConstExtended(MI) << ") " << MI);
+ LLVM_DEBUG(dbgs() << "Long distance jump. isExtendable("
+ << HII->isExtendable(MI) << ") isConstExtended("
+ << HII->isConstExtended(MI) << ") " << MI);
// Since we have not merged HW loops relaxation into
// this code (yet), soften our approach for the moment.
if (!HII->isExtendable(MI) && !HII->isExtended(MI)) {
- DEBUG(dbgs() << "\tUnderimplemented relax branch instruction.\n");
+ LLVM_DEBUG(dbgs() << "\tUnderimplemented relax branch instruction.\n");
} else {
// Find which operand is expandable.
int ExtOpNum = HII->getCExtOpNum(MI);
diff --git a/lib/Target/Hexagon/HexagonCommonGEP.cpp b/lib/Target/Hexagon/HexagonCommonGEP.cpp
index 53803b85db80..073f3e56aaf7 100644
--- a/lib/Target/Hexagon/HexagonCommonGEP.cpp
+++ b/lib/Target/Hexagon/HexagonCommonGEP.cpp
@@ -342,7 +342,7 @@ bool HexagonCommonGEP::isHandledGepForm(GetElementPtrInst *GepI) {
void HexagonCommonGEP::processGepInst(GetElementPtrInst *GepI,
ValueToNodeMap &NM) {
- DEBUG(dbgs() << "Visiting GEP: " << *GepI << '\n');
+ LLVM_DEBUG(dbgs() << "Visiting GEP: " << *GepI << '\n');
GepNode *N = new (*Mem) GepNode;
Value *PtrOp = GepI->getPointerOperand();
uint32_t InBounds = GepI->isInBounds() ? GepNode::InBounds : 0;
@@ -426,7 +426,7 @@ void HexagonCommonGEP::collect() {
}
}
- DEBUG(dbgs() << "Gep nodes after initial collection:\n" << Nodes);
+ LLVM_DEBUG(dbgs() << "Gep nodes after initial collection:\n" << Nodes);
}
static void invert_find_roots(const NodeVect &Nodes, NodeChildrenMap &NCM,
@@ -575,7 +575,7 @@ void HexagonCommonGEP::common() {
}
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Gep node equality:\n";
for (NodePairSet::iterator I = Eq.begin(), E = Eq.end(); I != E; ++I)
dbgs() << "{ " << I->first << ", " << I->second << " }\n";
@@ -642,7 +642,7 @@ void HexagonCommonGEP::common() {
N->Parent = Rep;
}
- DEBUG(dbgs() << "Gep nodes after commoning:\n" << Nodes);
+ LLVM_DEBUG(dbgs() << "Gep nodes after commoning:\n" << Nodes);
// Finally, erase the nodes that are no longer used.
NodeSet Erase;
@@ -662,35 +662,35 @@ void HexagonCommonGEP::common() {
NodeVect::iterator NewE = remove_if(Nodes, in_set(Erase));
Nodes.resize(std::distance(Nodes.begin(), NewE));
- DEBUG(dbgs() << "Gep nodes after post-commoning cleanup:\n" << Nodes);
+ LLVM_DEBUG(dbgs() << "Gep nodes after post-commoning cleanup:\n" << Nodes);
}
template <typename T>
static BasicBlock *nearest_common_dominator(DominatorTree *DT, T &Blocks) {
- DEBUG({
- dbgs() << "NCD of {";
- for (typename T::iterator I = Blocks.begin(), E = Blocks.end();
- I != E; ++I) {
- if (!*I)
- continue;
- BasicBlock *B = cast<BasicBlock>(*I);
- dbgs() << ' ' << B->getName();
- }
- dbgs() << " }\n";
- });
+ LLVM_DEBUG({
+ dbgs() << "NCD of {";
+ for (typename T::iterator I = Blocks.begin(), E = Blocks.end(); I != E;
+ ++I) {
+ if (!*I)
+ continue;
+ BasicBlock *B = cast<BasicBlock>(*I);
+ dbgs() << ' ' << B->getName();
+ }
+ dbgs() << " }\n";
+ });
- // Allow null basic blocks in Blocks. In such cases, return nullptr.
- typename T::iterator I = Blocks.begin(), E = Blocks.end();
- if (I == E || !*I)
+ // Allow null basic blocks in Blocks. In such cases, return nullptr.
+ typename T::iterator I = Blocks.begin(), E = Blocks.end();
+ if (I == E || !*I)
+ return nullptr;
+ BasicBlock *Dom = cast<BasicBlock>(*I);
+ while (++I != E) {
+ BasicBlock *B = cast_or_null<BasicBlock>(*I);
+ Dom = B ? DT->findNearestCommonDominator(Dom, B) : nullptr;
+ if (!Dom)
return nullptr;
- BasicBlock *Dom = cast<BasicBlock>(*I);
- while (++I != E) {
- BasicBlock *B = cast_or_null<BasicBlock>(*I);
- Dom = B ? DT->findNearestCommonDominator(Dom, B) : nullptr;
- if (!Dom)
- return nullptr;
}
- DEBUG(dbgs() << "computed:" << Dom->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "computed:" << Dom->getName() << '\n');
return Dom;
}
@@ -753,7 +753,7 @@ static bool is_empty(const BasicBlock *B) {
BasicBlock *HexagonCommonGEP::recalculatePlacement(GepNode *Node,
NodeChildrenMap &NCM, NodeToValueMap &Loc) {
- DEBUG(dbgs() << "Loc for node:" << Node << '\n');
+ LLVM_DEBUG(dbgs() << "Loc for node:" << Node << '\n');
// Recalculate the placement for Node, assuming that the locations of
// its children in Loc are valid.
// Return nullptr if there is no valid placement for Node (for example, it
@@ -820,7 +820,7 @@ BasicBlock *HexagonCommonGEP::recalculatePlacement(GepNode *Node,
BasicBlock *HexagonCommonGEP::recalculatePlacementRec(GepNode *Node,
NodeChildrenMap &NCM, NodeToValueMap &Loc) {
- DEBUG(dbgs() << "LocRec begin for node:" << Node << '\n');
+ LLVM_DEBUG(dbgs() << "LocRec begin for node:" << Node << '\n');
// Recalculate the placement of Node, after recursively recalculating the
// placements of all its children.
NodeChildrenMap::iterator CF = NCM.find(Node);
@@ -830,7 +830,7 @@ BasicBlock *HexagonCommonGEP::recalculatePlacementRec(GepNode *Node,
recalculatePlacementRec(*I, NCM, Loc);
}
BasicBlock *LB = recalculatePlacement(Node, NCM, Loc);
- DEBUG(dbgs() << "LocRec end for node:" << Node << '\n');
+ LLVM_DEBUG(dbgs() << "LocRec end for node:" << Node << '\n');
return LB;
}
@@ -952,8 +952,8 @@ namespace {
void HexagonCommonGEP::separateChainForNode(GepNode *Node, Use *U,
NodeToValueMap &Loc) {
User *R = U->getUser();
- DEBUG(dbgs() << "Separating chain for node (" << Node << ") user: "
- << *R << '\n');
+ LLVM_DEBUG(dbgs() << "Separating chain for node (" << Node << ") user: " << *R
+ << '\n');
BasicBlock *PB = cast<Instruction>(R)->getParent();
GepNode *N = Node;
@@ -996,7 +996,7 @@ void HexagonCommonGEP::separateChainForNode(GepNode *Node, Use *U,
// Should at least have U in NewUs.
NewNode->Flags |= GepNode::Used;
- DEBUG(dbgs() << "new node: " << NewNode << " " << *NewNode << '\n');
+ LLVM_DEBUG(dbgs() << "new node: " << NewNode << " " << *NewNode << '\n');
assert(!NewUs.empty());
Uses[NewNode] = NewUs;
}
@@ -1007,7 +1007,7 @@ void HexagonCommonGEP::separateConstantChains(GepNode *Node,
NodeSet Ns;
nodes_for_root(Node, NCM, Ns);
- DEBUG(dbgs() << "Separating constant chains for node: " << Node << '\n');
+ LLVM_DEBUG(dbgs() << "Separating constant chains for node: " << Node << '\n');
// Collect all used nodes together with the uses from loads and stores,
// where the GEP node could be folded into the load/store instruction.
NodeToUsesMap FNs; // Foldable nodes.
@@ -1044,7 +1044,7 @@ void HexagonCommonGEP::separateConstantChains(GepNode *Node,
FNs.insert(std::make_pair(N, LSs));
}
- DEBUG(dbgs() << "Nodes with foldable users:\n" << FNs);
+ LLVM_DEBUG(dbgs() << "Nodes with foldable users:\n" << FNs);
for (NodeToUsesMap::iterator I = FNs.begin(), E = FNs.end(); I != E; ++I) {
GepNode *N = I->first;
@@ -1066,32 +1066,33 @@ void HexagonCommonGEP::computeNodePlacement(NodeToValueMap &Loc) {
for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I)
recalculatePlacementRec(*I, NCM, Loc);
- DEBUG(dbgs() << "Initial node placement:\n" << LocationAsBlock(Loc));
+ LLVM_DEBUG(dbgs() << "Initial node placement:\n" << LocationAsBlock(Loc));
if (OptEnableInv) {
for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I)
adjustForInvariance(*I, NCM, Loc);
- DEBUG(dbgs() << "Node placement after adjustment for invariance:\n"
- << LocationAsBlock(Loc));
+ LLVM_DEBUG(dbgs() << "Node placement after adjustment for invariance:\n"
+ << LocationAsBlock(Loc));
}
if (OptEnableConst) {
for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I)
separateConstantChains(*I, NCM, Loc);
}
- DEBUG(dbgs() << "Node use information:\n" << Uses);
+ LLVM_DEBUG(dbgs() << "Node use information:\n" << Uses);
// At the moment, there is no further refinement of the initial placement.
// Such a refinement could include splitting the nodes if they are placed
// too far from some of its users.
- DEBUG(dbgs() << "Final node placement:\n" << LocationAsBlock(Loc));
+ LLVM_DEBUG(dbgs() << "Final node placement:\n" << LocationAsBlock(Loc));
}
Value *HexagonCommonGEP::fabricateGEP(NodeVect &NA, BasicBlock::iterator At,
BasicBlock *LocB) {
- DEBUG(dbgs() << "Fabricating GEP in " << LocB->getName()
- << " for nodes:\n" << NA);
+ LLVM_DEBUG(dbgs() << "Fabricating GEP in " << LocB->getName()
+ << " for nodes:\n"
+ << NA);
unsigned Num = NA.size();
GepNode *RN = NA[0];
assert((RN->Flags & GepNode::Root) && "Creating GEP for non-root");
@@ -1128,7 +1129,7 @@ Value *HexagonCommonGEP::fabricateGEP(NodeVect &NA, BasicBlock::iterator At,
Type *ElTy = cast<PointerType>(InpTy->getScalarType())->getElementType();
NewInst = GetElementPtrInst::Create(ElTy, Input, A, "cgep", &*At);
NewInst->setIsInBounds(RN->Flags & GepNode::InBounds);
- DEBUG(dbgs() << "new GEP: " << *NewInst << '\n');
+ LLVM_DEBUG(dbgs() << "new GEP: " << *NewInst << '\n');
Input = NewInst;
} while (nax <= Num);
@@ -1161,7 +1162,7 @@ void HexagonCommonGEP::getAllUsersForNode(GepNode *Node, ValueVect &Values,
}
void HexagonCommonGEP::materialize(NodeToValueMap &Loc) {
- DEBUG(dbgs() << "Nodes before materialization:\n" << Nodes << '\n');
+ LLVM_DEBUG(dbgs() << "Nodes before materialization:\n" << Nodes << '\n');
NodeChildrenMap NCM;
NodeVect Roots;
// Compute the inversion again, since computing placement could alter
diff --git a/lib/Target/Hexagon/HexagonConstExtenders.cpp b/lib/Target/Hexagon/HexagonConstExtenders.cpp
index a2adc43bc481..8b024fad4f86 100644
--- a/lib/Target/Hexagon/HexagonConstExtenders.cpp
+++ b/lib/Target/Hexagon/HexagonConstExtenders.cpp
@@ -1258,7 +1258,7 @@ void HCE::assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
if (!ED.IsDef)
continue;
ExtValue EV(ED);
- DEBUG(dbgs() << " =" << I << ". " << EV << " " << ED << '\n');
+ LLVM_DEBUG(dbgs() << " =" << I << ". " << EV << " " << ED << '\n');
assert(ED.Rd.Reg != 0);
Ranges[I-Begin] = getOffsetRange(ED.Rd).shift(EV.Offset);
// A2_tfrsi is a special case: it will be replaced with A2_addi, which
@@ -1278,7 +1278,7 @@ void HCE::assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
if (ED.IsDef)
continue;
ExtValue EV(ED);
- DEBUG(dbgs() << " " << I << ". " << EV << " " << ED << '\n');
+ LLVM_DEBUG(dbgs() << " " << I << ". " << EV << " " << ED << '\n');
OffsetRange Dev = getOffsetRange(ED);
Ranges[I-Begin].intersect(Dev.shift(EV.Offset));
}
@@ -1290,7 +1290,7 @@ void HCE::assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
for (unsigned I = Begin; I != End; ++I)
RangeMap[Ranges[I-Begin]].insert(I);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Ranges\n";
for (unsigned I = Begin; I != End; ++I)
dbgs() << " " << I << ". " << Ranges[I-Begin] << '\n';
@@ -1384,7 +1384,7 @@ void HCE::assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
}
}
- DEBUG(dbgs() << "IMap (before fixup) = " << PrintIMap(IMap, *HRI));
+ LLVM_DEBUG(dbgs() << "IMap (before fixup) = " << PrintIMap(IMap, *HRI));
// There is some ambiguity in what initializer should be used, if the
// descriptor's subexpression is non-trivial: it can be the entire
@@ -1454,7 +1454,7 @@ void HCE::assignInits(const ExtRoot &ER, unsigned Begin, unsigned End,
}
}
- DEBUG(dbgs() << "IMap (after fixup) = " << PrintIMap(IMap, *HRI));
+ LLVM_DEBUG(dbgs() << "IMap (after fixup) = " << PrintIMap(IMap, *HRI));
}
void HCE::calculatePlacement(const ExtenderInit &ExtI, const IndexList &Refs,
@@ -1557,9 +1557,9 @@ HCE::Register HCE::insertInitializer(Loc DefL, const ExtenderInit &ExtI) {
assert(InitI);
(void)InitI;
- DEBUG(dbgs() << "Inserted def in bb#" << MBB.getNumber()
- << " for initializer: " << PrintInit(ExtI, *HRI)
- << "\n " << *InitI);
+ LLVM_DEBUG(dbgs() << "Inserted def in bb#" << MBB.getNumber()
+ << " for initializer: " << PrintInit(ExtI, *HRI) << "\n "
+ << *InitI);
return { DefR, 0 };
}
@@ -1812,8 +1812,8 @@ bool HCE::replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI) {
ExtValue EV(ED);
int32_t Diff = EV.Offset - DefV.Offset;
const MachineInstr &MI = *ED.UseMI;
- DEBUG(dbgs() << __func__ << " Idx:" << Idx << " ExtR:"
- << PrintRegister(ExtR, *HRI) << " Diff:" << Diff << '\n');
+ LLVM_DEBUG(dbgs() << __func__ << " Idx:" << Idx << " ExtR:"
+ << PrintRegister(ExtR, *HRI) << " Diff:" << Diff << '\n');
// These two addressing modes must be converted into indexed forms
// regardless of what the initializer looks like.
@@ -1919,7 +1919,7 @@ const MachineOperand &HCE::getStoredValueOp(const MachineInstr &MI) const {
bool HCE::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
- DEBUG(MF.print(dbgs() << "Before " << getPassName() << '\n', nullptr));
+ LLVM_DEBUG(MF.print(dbgs() << "Before " << getPassName() << '\n', nullptr));
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
@@ -1934,7 +1934,7 @@ bool HCE::runOnMachineFunction(MachineFunction &MF) {
});
bool Changed = false;
- DEBUG(dbgs() << "Collected " << Extenders.size() << " extenders\n");
+ LLVM_DEBUG(dbgs() << "Collected " << Extenders.size() << " extenders\n");
for (unsigned I = 0, E = Extenders.size(); I != E; ) {
unsigned B = I;
const ExtRoot &T = Extenders[B].getOp();
@@ -1946,7 +1946,7 @@ bool HCE::runOnMachineFunction(MachineFunction &MF) {
Changed |= replaceExtenders(IMap);
}
- DEBUG({
+ LLVM_DEBUG({
if (Changed)
MF.print(dbgs() << "After " << getPassName() << '\n', nullptr);
else
diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp
index 730cfafae429..8f22a71dc1f3 100644
--- a/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -617,7 +617,7 @@ void MachineConstPropagator::CellMap::print(raw_ostream &os,
void MachineConstPropagator::visitPHI(const MachineInstr &PN) {
const MachineBasicBlock *MB = PN.getParent();
unsigned MBN = MB->getNumber();
- DEBUG(dbgs() << "Visiting FI(" << printMBBReference(*MB) << "): " << PN);
+ LLVM_DEBUG(dbgs() << "Visiting FI(" << printMBBReference(*MB) << "): " << PN);
const MachineOperand &MD = PN.getOperand(0);
Register DefR(MD);
@@ -642,8 +642,8 @@ Bottomize:
const MachineBasicBlock *PB = PN.getOperand(i+1).getMBB();
unsigned PBN = PB->getNumber();
if (!EdgeExec.count(CFGEdge(PBN, MBN))) {
- DEBUG(dbgs() << " edge " << printMBBReference(*PB) << "->"
- << printMBBReference(*MB) << " not executable\n");
+ LLVM_DEBUG(dbgs() << " edge " << printMBBReference(*PB) << "->"
+ << printMBBReference(*MB) << " not executable\n");
continue;
}
const MachineOperand &SO = PN.getOperand(i);
@@ -658,8 +658,9 @@ Bottomize:
LatticeCell SrcC;
bool Eval = MCE.evaluate(UseR, Cells.get(UseR.Reg), SrcC);
- DEBUG(dbgs() << " edge from " << printMBBReference(*PB) << ": "
- << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg) << SrcC << '\n');
+ LLVM_DEBUG(dbgs() << " edge from " << printMBBReference(*PB) << ": "
+ << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg) << SrcC
+ << '\n');
Changed |= Eval ? DefC.meet(SrcC)
: DefC.setBottom();
Cells.update(DefR.Reg, DefC);
@@ -671,11 +672,11 @@ Bottomize:
}
void MachineConstPropagator::visitNonBranch(const MachineInstr &MI) {
- DEBUG(dbgs() << "Visiting MI(" << printMBBReference(*MI.getParent())
- << "): " << MI);
+ LLVM_DEBUG(dbgs() << "Visiting MI(" << printMBBReference(*MI.getParent())
+ << "): " << MI);
CellMap Outputs;
bool Eval = MCE.evaluate(MI, Cells, Outputs);
- DEBUG({
+ LLVM_DEBUG({
if (Eval) {
dbgs() << " outputs:";
for (auto &I : Outputs)
@@ -728,8 +729,8 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) {
while (It != End) {
const MachineInstr &MI = *It;
InstrExec.insert(&MI);
- DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "("
- << printMBBReference(B) << "): " << MI);
+ LLVM_DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "("
+ << printMBBReference(B) << "): " << MI);
// Do not evaluate subsequent branches if the evaluation of any of the
// previous branches failed. Keep iterating over the branches only
// to mark them as executable.
@@ -763,23 +764,23 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) {
// last one set "FallsThru", then add an edge to the layout successor
// to the targets.
Targets.clear();
- DEBUG(dbgs() << " failed to evaluate a branch...adding all CFG "
- "successors\n");
+ LLVM_DEBUG(dbgs() << " failed to evaluate a branch...adding all CFG "
+ "successors\n");
for (const MachineBasicBlock *SB : B.successors())
Targets.insert(SB);
}
for (const MachineBasicBlock *TB : Targets) {
unsigned TBN = TB->getNumber();
- DEBUG(dbgs() << " pushing edge " << printMBBReference(B) << " -> "
- << printMBBReference(*TB) << "\n");
+ LLVM_DEBUG(dbgs() << " pushing edge " << printMBBReference(B) << " -> "
+ << printMBBReference(*TB) << "\n");
FlowQ.push(CFGEdge(MBN, TBN));
}
}
void MachineConstPropagator::visitUsesOf(unsigned Reg) {
- DEBUG(dbgs() << "Visiting uses of " << printReg(Reg, &MCE.TRI)
- << Cells.get(Reg) << '\n');
+ LLVM_DEBUG(dbgs() << "Visiting uses of " << printReg(Reg, &MCE.TRI)
+ << Cells.get(Reg) << '\n');
for (MachineInstr &MI : MRI->use_nodbg_instructions(Reg)) {
// Do not process non-executable instructions. They can become exceutable
// later (via a flow-edge in the work queue). In such case, the instruc-
@@ -870,10 +871,10 @@ void MachineConstPropagator::propagate(MachineFunction &MF) {
CFGEdge Edge = FlowQ.front();
FlowQ.pop();
- DEBUG(dbgs() << "Picked edge "
- << printMBBReference(*MF.getBlockNumbered(Edge.first)) << "->"
- << printMBBReference(*MF.getBlockNumbered(Edge.second))
- << '\n');
+ LLVM_DEBUG(
+ dbgs() << "Picked edge "
+ << printMBBReference(*MF.getBlockNumbered(Edge.first)) << "->"
+ << printMBBReference(*MF.getBlockNumbered(Edge.second)) << '\n');
if (Edge.first != EntryNum)
if (EdgeExec.count(Edge))
continue;
@@ -927,7 +928,7 @@ void MachineConstPropagator::propagate(MachineFunction &MF) {
}
} // while (FlowQ)
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Cells after propagation:\n";
Cells.print(dbgs(), MCE.TRI);
dbgs() << "Dead CFG edges:\n";
@@ -1042,7 +1043,7 @@ bool MachineConstPropagator::rewrite(MachineFunction &MF) {
// This is the constant propagation algorithm as described by Wegman-Zadeck.
// Most of the terminology comes from there.
bool MachineConstPropagator::run(MachineFunction &MF) {
- DEBUG(MF.print(dbgs() << "Starting MachineConstPropagator\n", nullptr));
+ LLVM_DEBUG(MF.print(dbgs() << "Starting MachineConstPropagator\n", nullptr));
MRI = &MF.getRegInfo();
@@ -1054,7 +1055,7 @@ bool MachineConstPropagator::run(MachineFunction &MF) {
propagate(MF);
bool Changed = rewrite(MF);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "End of MachineConstPropagator (Changed=" << Changed << ")\n";
if (Changed)
MF.print(dbgs(), nullptr);
@@ -2778,7 +2779,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI,
AllDefs = false;
// Some diagnostics.
- // DEBUG({...}) gets confused with all this code as an argument.
+ // LLVM_DEBUG({...}) gets confused with all this code as an argument.
#ifndef NDEBUG
bool Debugging = DebugFlag && isCurrentDebugType(DEBUG_TYPE);
if (Debugging) {
@@ -2923,7 +2924,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI,
ChangedNum++;
}
- DEBUG({
+ LLVM_DEBUG({
if (!NewInstrs.empty()) {
MachineFunction &MF = *MI.getParent()->getParent();
dbgs() << "In function: " << MF.getName() << "\n";
@@ -3090,7 +3091,7 @@ bool HexagonConstEvaluator::rewriteHexConstUses(MachineInstr &MI,
MO.setIsKill(false);
}
- DEBUG({
+ LLVM_DEBUG({
if (NewMI) {
dbgs() << "Rewrite: for " << MI;
if (NewMI != &MI)
@@ -3130,7 +3131,7 @@ bool HexagonConstEvaluator::rewriteHexBranch(MachineInstr &BrI,
if (BrI.getOpcode() == Hexagon::J2_jump)
return false;
- DEBUG(dbgs() << "Rewrite(" << printMBBReference(B) << "):" << BrI);
+ LLVM_DEBUG(dbgs() << "Rewrite(" << printMBBReference(B) << "):" << BrI);
bool Rewritten = false;
if (NumTargets > 0) {
assert(!FallsThru && "This should have been checked before");
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index f3d24739da43..fccde96d8a32 100644
--- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -649,7 +649,7 @@ void HexagonCopyToCombine::emitConst64(MachineBasicBlock::iterator &InsertPt,
unsigned DoubleDestReg,
MachineOperand &HiOperand,
MachineOperand &LoOperand) {
- DEBUG(dbgs() << "Found a CONST64\n");
+ LLVM_DEBUG(dbgs() << "Found a CONST64\n");
DebugLoc DL = InsertPt->getDebugLoc();
MachineBasicBlock *BB = InsertPt->getParent();
diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index bbd06ff747ef..557e6384be6a 100644
--- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -238,7 +238,8 @@ bool HexagonEarlyIfConversion::isPreheader(const MachineBasicBlock *B) const {
bool HexagonEarlyIfConversion::matchFlowPattern(MachineBasicBlock *B,
MachineLoop *L, FlowPattern &FP) {
- DEBUG(dbgs() << "Checking flow pattern at " << printMBBReference(*B) << "\n");
+ LLVM_DEBUG(dbgs() << "Checking flow pattern at " << printMBBReference(*B)
+ << "\n");
// Interested only in conditional branches, no .new, no new-value, etc.
// Check the terminators directly, it's easier than handling all responses
@@ -325,13 +326,13 @@ bool HexagonEarlyIfConversion::matchFlowPattern(MachineBasicBlock *B,
}
// Don't try to predicate loop preheaders.
if ((TB && isPreheader(TB)) || (FB && isPreheader(FB))) {
- DEBUG(dbgs() << "One of blocks " << PrintMB(TB) << ", " << PrintMB(FB)
- << " is a loop preheader. Skipping.\n");
+ LLVM_DEBUG(dbgs() << "One of blocks " << PrintMB(TB) << ", " << PrintMB(FB)
+ << " is a loop preheader. Skipping.\n");
return false;
}
FP = FlowPattern(B, PredR, TB, FB, JB);
- DEBUG(dbgs() << "Detected " << PrintFP(FP, *TRI) << "\n");
+ LLVM_DEBUG(dbgs() << "Detected " << PrintFP(FP, *TRI) << "\n");
return true;
}
@@ -551,8 +552,9 @@ bool HexagonEarlyIfConversion::isProfitable(const FlowPattern &FP) const {
};
unsigned Spare = 0;
unsigned TotalIn = TotalCount(FP.TrueB, Spare) + TotalCount(FP.FalseB, Spare);
- DEBUG(dbgs() << "Total number of instructions to be predicated/speculated: "
- << TotalIn << ", spare room: " << Spare << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Total number of instructions to be predicated/speculated: "
+ << TotalIn << ", spare room: " << Spare << "\n");
if (TotalIn >= SizeLimit+Spare)
return false;
@@ -579,12 +581,13 @@ bool HexagonEarlyIfConversion::isProfitable(const FlowPattern &FP) const {
PredDefs += countPredicateDefs(SB);
}
}
- DEBUG(dbgs() << "Total number of extra muxes from converted phis: "
- << TotalPh << "\n");
+ LLVM_DEBUG(dbgs() << "Total number of extra muxes from converted phis: "
+ << TotalPh << "\n");
if (TotalIn+TotalPh >= SizeLimit+Spare)
return false;
- DEBUG(dbgs() << "Total number of predicate registers: " << PredDefs << "\n");
+ LLVM_DEBUG(dbgs() << "Total number of predicate registers: " << PredDefs
+ << "\n");
if (PredDefs > 4)
return false;
@@ -625,11 +628,11 @@ bool HexagonEarlyIfConversion::visitBlock(MachineBasicBlock *B,
return Changed;
if (!isValid(FP)) {
- DEBUG(dbgs() << "Conversion is not valid\n");
+ LLVM_DEBUG(dbgs() << "Conversion is not valid\n");
return Changed;
}
if (!isProfitable(FP)) {
- DEBUG(dbgs() << "Conversion is not profitable\n");
+ LLVM_DEBUG(dbgs() << "Conversion is not profitable\n");
return Changed;
}
@@ -640,8 +643,9 @@ bool HexagonEarlyIfConversion::visitBlock(MachineBasicBlock *B,
bool HexagonEarlyIfConversion::visitLoop(MachineLoop *L) {
MachineBasicBlock *HB = L ? L->getHeader() : nullptr;
- DEBUG((L ? dbgs() << "Visiting loop H:" << PrintMB(HB)
- : dbgs() << "Visiting function") << "\n");
+ LLVM_DEBUG((L ? dbgs() << "Visiting loop H:" << PrintMB(HB)
+ : dbgs() << "Visiting function")
+ << "\n");
bool Changed = false;
if (L) {
for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I)
@@ -758,7 +762,7 @@ void HexagonEarlyIfConversion::predicateInstr(MachineBasicBlock *ToB,
void HexagonEarlyIfConversion::predicateBlockNB(MachineBasicBlock *ToB,
MachineBasicBlock::iterator At, MachineBasicBlock *FromB,
unsigned PredR, bool IfTrue) {
- DEBUG(dbgs() << "Predicating block " << PrintMB(FromB) << "\n");
+ LLVM_DEBUG(dbgs() << "Predicating block " << PrintMB(FromB) << "\n");
MachineBasicBlock::iterator End = FromB->getFirstTerminator();
MachineBasicBlock::iterator I, NextI;
@@ -950,7 +954,7 @@ void HexagonEarlyIfConversion::convert(const FlowPattern &FP) {
}
void HexagonEarlyIfConversion::removeBlock(MachineBasicBlock *B) {
- DEBUG(dbgs() << "Removing block " << PrintMB(B) << "\n");
+ LLVM_DEBUG(dbgs() << "Removing block " << PrintMB(B) << "\n");
// Transfer the immediate dominator information from B to its descendants.
MachineDomTreeNode *N = MDT->getNode(B);
@@ -980,7 +984,7 @@ void HexagonEarlyIfConversion::removeBlock(MachineBasicBlock *B) {
}
void HexagonEarlyIfConversion::eliminatePhis(MachineBasicBlock *B) {
- DEBUG(dbgs() << "Removing phi nodes from block " << PrintMB(B) << "\n");
+ LLVM_DEBUG(dbgs() << "Removing phi nodes from block " << PrintMB(B) << "\n");
MachineBasicBlock::iterator I, NextI, NonPHI = B->getFirstNonPHI();
for (I = B->begin(); I != NonPHI; I = NextI) {
NextI = std::next(I);
@@ -1007,8 +1011,8 @@ void HexagonEarlyIfConversion::eliminatePhis(MachineBasicBlock *B) {
void HexagonEarlyIfConversion::mergeBlocks(MachineBasicBlock *PredB,
MachineBasicBlock *SuccB) {
- DEBUG(dbgs() << "Merging blocks " << PrintMB(PredB) << " and "
- << PrintMB(SuccB) << "\n");
+ LLVM_DEBUG(dbgs() << "Merging blocks " << PrintMB(PredB) << " and "
+ << PrintMB(SuccB) << "\n");
bool TermOk = hasUncondBranch(SuccB);
eliminatePhis(SuccB);
HII->removeBranch(*PredB);
diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 06caa2feffab..d3222a63ead7 100644
--- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -650,7 +650,7 @@ MachineInstr *HexagonExpandCondsets::genCondTfrFor(MachineOperand &SrcOp,
.add(SrcOp);
}
- DEBUG(dbgs() << "created an initial copy: " << *MIB);
+ LLVM_DEBUG(dbgs() << "created an initial copy: " << *MIB);
return &*MIB;
}
@@ -663,8 +663,8 @@ bool HexagonExpandCondsets::split(MachineInstr &MI,
return false;
TfrCounter++;
}
- DEBUG(dbgs() << "\nsplitting " << printMBBReference(*MI.getParent()) << ": "
- << MI);
+ LLVM_DEBUG(dbgs() << "\nsplitting " << printMBBReference(*MI.getParent())
+ << ": " << MI);
MachineOperand &MD = MI.getOperand(0); // Definition
MachineOperand &MP = MI.getOperand(1); // Predicate register
assert(MD.isDef());
@@ -941,8 +941,8 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond,
unsigned Opc = TfrI.getOpcode();
(void)Opc;
assert(Opc == Hexagon::A2_tfrt || Opc == Hexagon::A2_tfrf);
- DEBUG(dbgs() << "\nattempt to predicate if-" << (Cond ? "true" : "false")
- << ": " << TfrI);
+ LLVM_DEBUG(dbgs() << "\nattempt to predicate if-" << (Cond ? "true" : "false")
+ << ": " << TfrI);
MachineOperand &MD = TfrI.getOperand(0);
MachineOperand &MP = TfrI.getOperand(1);
@@ -963,7 +963,7 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond,
if (!DefI || !isPredicable(DefI))
return false;
- DEBUG(dbgs() << "Source def: " << *DefI);
+ LLVM_DEBUG(dbgs() << "Source def: " << *DefI);
// Collect the information about registers defined and used between the
// DefI and the TfrI.
@@ -1048,8 +1048,8 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond,
if (!canMoveMemTo(*DefI, TfrI, true))
CanDown = false;
- DEBUG(dbgs() << "Can move up: " << (CanUp ? "yes" : "no")
- << ", can move down: " << (CanDown ? "yes\n" : "no\n"));
+ LLVM_DEBUG(dbgs() << "Can move up: " << (CanUp ? "yes" : "no")
+ << ", can move down: " << (CanDown ? "yes\n" : "no\n"));
MachineBasicBlock::iterator PastDefIt = std::next(DefIt);
if (CanUp)
predicateAt(MD, *DefI, PastDefIt, MP, Cond, UpdRegs);
@@ -1144,10 +1144,10 @@ bool HexagonExpandCondsets::coalesceRegisters(RegisterRef R1, RegisterRef R2) {
return false;
bool Overlap = L1.overlaps(L2);
- DEBUG(dbgs() << "compatible registers: ("
- << (Overlap ? "overlap" : "disjoint") << ")\n "
- << printReg(R1.Reg, TRI, R1.Sub) << " " << L1 << "\n "
- << printReg(R2.Reg, TRI, R2.Sub) << " " << L2 << "\n");
+ LLVM_DEBUG(dbgs() << "compatible registers: ("
+ << (Overlap ? "overlap" : "disjoint") << ")\n "
+ << printReg(R1.Reg, TRI, R1.Sub) << " " << L1 << "\n "
+ << printReg(R2.Reg, TRI, R2.Sub) << " " << L2 << "\n");
if (R1.Sub || R2.Sub)
return false;
if (Overlap)
@@ -1180,7 +1180,7 @@ bool HexagonExpandCondsets::coalesceRegisters(RegisterRef R1, RegisterRef R2) {
LIS->removeInterval(R2.Reg);
updateKillFlags(R1.Reg);
- DEBUG(dbgs() << "coalesced: " << L1 << "\n");
+ LLVM_DEBUG(dbgs() << "coalesced: " << L1 << "\n");
L1.verify();
return true;
@@ -1261,8 +1261,8 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
LIS = &getAnalysis<LiveIntervals>();
MRI = &MF.getRegInfo();
- DEBUG(LIS->print(dbgs() << "Before expand-condsets\n",
- MF.getFunction().getParent()));
+ LLVM_DEBUG(LIS->print(dbgs() << "Before expand-condsets\n",
+ MF.getFunction().getParent()));
bool Changed = false;
std::set<unsigned> CoalUpd, PredUpd;
@@ -1289,8 +1289,8 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
if (!CoalUpd.count(Op.getReg()))
KillUpd.insert(Op.getReg());
updateLiveness(KillUpd, false, true, false);
- DEBUG(LIS->print(dbgs() << "After coalescing\n",
- MF.getFunction().getParent()));
+ LLVM_DEBUG(
+ LIS->print(dbgs() << "After coalescing\n", MF.getFunction().getParent()));
// First, simply split all muxes into a pair of conditional transfers
// and update the live intervals to reflect the new arrangement. The
@@ -1306,8 +1306,8 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
// predication, and after splitting they are difficult to recalculate
// (because of predicated defs), so make sure they are left untouched.
// Predication does not use live intervals.
- DEBUG(LIS->print(dbgs() << "After splitting\n",
- MF.getFunction().getParent()));
+ LLVM_DEBUG(
+ LIS->print(dbgs() << "After splitting\n", MF.getFunction().getParent()));
// Traverse all blocks and collapse predicable instructions feeding
// conditional transfers into predicated instructions.
@@ -1315,13 +1315,13 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
// cases that were not created in the previous step.
for (auto &B : MF)
Changed |= predicateInBlock(B, PredUpd);
- DEBUG(LIS->print(dbgs() << "After predicating\n",
- MF.getFunction().getParent()));
+ LLVM_DEBUG(LIS->print(dbgs() << "After predicating\n",
+ MF.getFunction().getParent()));
PredUpd.insert(CoalUpd.begin(), CoalUpd.end());
updateLiveness(PredUpd, true, true, true);
- DEBUG({
+ LLVM_DEBUG({
if (Changed)
LIS->print(dbgs() << "After expand-condsets\n",
MF.getFunction().getParent());
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 4b3e11ed3fd7..97b02e2b34cb 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -442,7 +442,7 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
if (needsStackFrame(I, CSR, HRI))
SFBlocks.push_back(&I);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Blocks needing SF: {";
for (auto &B : SFBlocks)
dbgs() << " " << printMBBReference(*B);
@@ -465,7 +465,7 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
if (!PDomB)
break;
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Computed dom block: ";
if (DomB)
dbgs() << printMBBReference(*DomB);
@@ -483,11 +483,11 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
// Make sure that DomB dominates PDomB and PDomB post-dominates DomB.
if (!MDT.dominates(DomB, PDomB)) {
- DEBUG(dbgs() << "Dom block does not dominate pdom block\n");
+ LLVM_DEBUG(dbgs() << "Dom block does not dominate pdom block\n");
return;
}
if (!MPT.dominates(PDomB, DomB)) {
- DEBUG(dbgs() << "PDom block does not post-dominate dom block\n");
+ LLVM_DEBUG(dbgs() << "PDom block does not post-dominate dom block\n");
return;
}
@@ -1396,7 +1396,7 @@ static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI) {
bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const {
- DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
MachineFrameInfo &MFI = MF.getFrameInfo();
BitVector SRegs(Hexagon::NUM_TARGET_REGS);
@@ -1406,15 +1406,16 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
// (1) For each callee-saved register, add that register and all of its
// sub-registers to SRegs.
- DEBUG(dbgs() << "Initial CS registers: {");
+ LLVM_DEBUG(dbgs() << "Initial CS registers: {");
for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
unsigned R = CSI[i].getReg();
- DEBUG(dbgs() << ' ' << printReg(R, TRI));
+ LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI));
for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
SRegs[*SR] = true;
}
- DEBUG(dbgs() << " }\n");
- DEBUG(dbgs() << "SRegs.1: "; dump_registers(SRegs, *TRI); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " }\n");
+ LLVM_DEBUG(dbgs() << "SRegs.1: "; dump_registers(SRegs, *TRI);
+ dbgs() << "\n");
// (2) For each reserved register, remove that register and all of its
// sub- and super-registers from SRegs.
@@ -1424,8 +1425,10 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
for (MCSuperRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
SRegs[*SR] = false;
}
- DEBUG(dbgs() << "Res: "; dump_registers(Reserved, *TRI); dbgs() << "\n");
- DEBUG(dbgs() << "SRegs.2: "; dump_registers(SRegs, *TRI); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Res: "; dump_registers(Reserved, *TRI);
+ dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "SRegs.2: "; dump_registers(SRegs, *TRI);
+ dbgs() << "\n");
// (3) Collect all registers that have at least one sub-register in SRegs,
// and also have no sub-registers that are reserved. These will be the can-
@@ -1446,11 +1449,13 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
break;
}
}
- DEBUG(dbgs() << "TmpSup: "; dump_registers(TmpSup, *TRI); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "TmpSup: "; dump_registers(TmpSup, *TRI);
+ dbgs() << "\n");
// (4) Include all super-registers found in (3) into SRegs.
SRegs |= TmpSup;
- DEBUG(dbgs() << "SRegs.4: "; dump_registers(SRegs, *TRI); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "SRegs.4: "; dump_registers(SRegs, *TRI);
+ dbgs() << "\n");
// (5) For each register R in SRegs, if any super-register of R is in SRegs,
// remove R from SRegs.
@@ -1463,7 +1468,8 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
break;
}
}
- DEBUG(dbgs() << "SRegs.5: "; dump_registers(SRegs, *TRI); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "SRegs.5: "; dump_registers(SRegs, *TRI);
+ dbgs() << "\n");
// Now, for each register that has a fixed stack slot, create the stack
// object for it.
@@ -1501,7 +1507,7 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
SRegs[R] = false;
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "CS information: {";
for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
int FI = CSI[i].getFrameIdx();
@@ -2021,8 +2027,8 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
auto P = BlockIndexes.insert(
std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B)));
auto &IndexMap = P.first->second;
- DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n"
- << IndexMap << '\n');
+ LLVM_DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n"
+ << IndexMap << '\n');
for (auto &In : B) {
int LFI, SFI;
@@ -2129,7 +2135,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
}
}
- DEBUG({
+ LLVM_DEBUG({
for (auto &P : FIRangeMap) {
dbgs() << "fi#" << P.first;
if (BadFIs.count(P.first))
@@ -2168,7 +2174,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
}
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Block-to-FI map (* -- live-on-exit):\n";
for (auto &P : BlockFIMap) {
auto &FIs = P.second;
@@ -2195,16 +2201,16 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
HexagonBlockRanges::InstrIndexMap &IM = F->second;
HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM);
HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM);
- DEBUG(dbgs() << printMBBReference(B) << " dead map\n"
- << HexagonBlockRanges::PrintRangeMap(DM, HRI));
+ LLVM_DEBUG(dbgs() << printMBBReference(B) << " dead map\n"
+ << HexagonBlockRanges::PrintRangeMap(DM, HRI));
for (auto FI : BlockFIMap[&B]) {
if (BadFIs.count(FI))
continue;
- DEBUG(dbgs() << "Working on fi#" << FI << '\n');
+ LLVM_DEBUG(dbgs() << "Working on fi#" << FI << '\n');
HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
for (auto &Range : RL) {
- DEBUG(dbgs() << "--Examining range:" << RL << '\n');
+ LLVM_DEBUG(dbgs() << "--Examining range:" << RL << '\n');
if (!IndexType::isInstr(Range.start()) ||
!IndexType::isInstr(Range.end()))
continue;
@@ -2219,7 +2225,8 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI, MF);
// The this-> is needed to unconfuse MSVC.
unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
- DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI)
+ << '\n');
if (FoundR == 0)
continue;
#ifndef NDEBUG
diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp
index 9288ed03d4d2..c0d2de90467a 100644
--- a/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -222,13 +222,12 @@ void HexagonGenPredicate::collectPredicateGPR(MachineFunction &MF) {
}
void HexagonGenPredicate::processPredicateGPR(const Register &Reg) {
- DEBUG(dbgs() << __func__ << ": "
- << printReg(Reg.R, TRI, Reg.S) << "\n");
+ LLVM_DEBUG(dbgs() << __func__ << ": " << printReg(Reg.R, TRI, Reg.S) << "\n");
using use_iterator = MachineRegisterInfo::use_iterator;
use_iterator I = MRI->use_begin(Reg.R), E = MRI->use_end();
if (I == E) {
- DEBUG(dbgs() << "Dead reg: " << printReg(Reg.R, TRI, Reg.S) << '\n');
+ LLVM_DEBUG(dbgs() << "Dead reg: " << printReg(Reg.R, TRI, Reg.S) << '\n');
MachineInstr *DefI = MRI->getVRegDef(Reg.R);
DefI->eraseFromParent();
return;
@@ -250,7 +249,7 @@ Register HexagonGenPredicate::getPredRegFor(const Register &Reg) {
if (F != G2P.end())
return F->second;
- DEBUG(dbgs() << __func__ << ": " << PrintRegister(Reg, *TRI));
+ LLVM_DEBUG(dbgs() << __func__ << ": " << PrintRegister(Reg, *TRI));
MachineInstr *DefI = MRI->getVRegDef(Reg.R);
assert(DefI);
unsigned Opc = DefI->getOpcode();
@@ -258,7 +257,7 @@ Register HexagonGenPredicate::getPredRegFor(const Register &Reg) {
assert(DefI->getOperand(0).isDef() && DefI->getOperand(1).isUse());
Register PR = DefI->getOperand(1);
G2P.insert(std::make_pair(Reg, PR));
- DEBUG(dbgs() << " -> " << PrintRegister(PR, *TRI) << '\n');
+ LLVM_DEBUG(dbgs() << " -> " << PrintRegister(PR, *TRI) << '\n');
return PR;
}
@@ -274,7 +273,8 @@ Register HexagonGenPredicate::getPredRegFor(const Register &Reg) {
BuildMI(B, std::next(DefIt), DL, TII->get(TargetOpcode::COPY), NewPR)
.addReg(Reg.R, 0, Reg.S);
G2P.insert(std::make_pair(Reg, Register(NewPR)));
- DEBUG(dbgs() << " -> !" << PrintRegister(Register(NewPR), *TRI) << '\n');
+ LLVM_DEBUG(dbgs() << " -> !" << PrintRegister(Register(NewPR), *TRI)
+ << '\n');
return Register(NewPR);
}
@@ -364,7 +364,7 @@ bool HexagonGenPredicate::isScalarPred(Register PredReg) {
}
bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) {
- DEBUG(dbgs() << __func__ << ": " << MI << " " << *MI);
+ LLVM_DEBUG(dbgs() << __func__ << ": " << MI << " " << *MI);
unsigned Opc = MI->getOpcode();
assert(isConvertibleToPredForm(MI));
@@ -426,7 +426,7 @@ bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) {
Register Pred = getPredRegFor(GPR);
MIB.addReg(Pred.R, 0, Pred.S);
}
- DEBUG(dbgs() << "generated: " << *MIB);
+ LLVM_DEBUG(dbgs() << "generated: " << *MIB);
// Generate a copy-out: NewGPR = NewPR, and replace all uses of OutR
// with NewGPR.
@@ -449,7 +449,7 @@ bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) {
}
bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) {
- DEBUG(dbgs() << __func__ << "\n");
+ LLVM_DEBUG(dbgs() << __func__ << "\n");
const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass;
bool Changed = false;
VectOfInst Erase;
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index e3d5825b269b..0e33976a58ac 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -376,7 +376,7 @@ FunctionPass *llvm::createHexagonHardwareLoops() {
}
bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n");
+ LLVM_DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n");
if (skipFunction(MF.getFunction()))
return false;
@@ -1012,14 +1012,15 @@ bool HexagonHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI,
bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L,
bool IsInnerHWLoop) const {
const std::vector<MachineBasicBlock *> &Blocks = L->getBlocks();
- DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0]));
+ LLVM_DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0]));
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *MBB = Blocks[i];
for (MachineBasicBlock::iterator
MII = MBB->begin(), E = MBB->end(); MII != E; ++MII) {
const MachineInstr *MI = &*MII;
if (isInvalidLoopOperation(MI, IsInnerHWLoop)) {
- DEBUG(dbgs()<< "\nCannot convert to hw_loop due to:"; MI->dump(););
+ LLVM_DEBUG(dbgs() << "\nCannot convert to hw_loop due to:";
+ MI->dump(););
return true;
}
}
@@ -1084,7 +1085,7 @@ void HexagonHardwareLoops::removeIfDead(MachineInstr *MI) {
SmallVector<MachineInstr*, 1> DeadPhis;
if (isDead(MI, DeadPhis)) {
- DEBUG(dbgs() << "HW looping will remove: " << *MI);
+ LLVM_DEBUG(dbgs() << "HW looping will remove: " << *MI);
// It is possible that some DBG_VALUE instructions refer to this
// instruction. Examine each def operand for such references;
@@ -1238,7 +1239,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L,
LoopStart = TopBlock;
// Convert the loop to a hardware loop.
- DEBUG(dbgs() << "Change to hardware loop at "; L->dump());
+ LLVM_DEBUG(dbgs() << "Change to hardware loop at "; L->dump());
DebugLoc DL;
if (InsertPos != Preheader->end())
DL = InsertPos->getDebugLoc();
@@ -1368,7 +1369,7 @@ bool HexagonHardwareLoops::isLoopFeeder(MachineLoop *L, MachineBasicBlock *A,
LoopFeederMap &LoopFeederPhi) const {
if (LoopFeederPhi.find(MO->getReg()) == LoopFeederPhi.end()) {
const std::vector<MachineBasicBlock *> &Blocks = L->getBlocks();
- DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0]));
+ LLVM_DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0]));
// Ignore all BBs that form Loop.
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *MBB = Blocks[i];
@@ -1769,16 +1770,16 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
for (unsigned i = 1, n = PredDef->getNumOperands(); i < n; ++i) {
MachineOperand &MO = PredDef->getOperand(i);
if (MO.isReg() && MO.getReg() == RB.first) {
- DEBUG(dbgs() << "\n DefMI(" << i << ") = "
- << *(MRI->getVRegDef(I->first)));
+ LLVM_DEBUG(dbgs() << "\n DefMI(" << i
+ << ") = " << *(MRI->getVRegDef(I->first)));
if (IndI)
return false;
IndI = MRI->getVRegDef(I->first);
IndMO = &MO;
} else if (MO.isReg()) {
- DEBUG(dbgs() << "\n DefMI(" << i << ") = "
- << *(MRI->getVRegDef(MO.getReg())));
+ LLVM_DEBUG(dbgs() << "\n DefMI(" << i
+ << ") = " << *(MRI->getVRegDef(MO.getReg())));
if (nonIndI)
return false;
diff --git a/lib/Target/Hexagon/HexagonHazardRecognizer.cpp b/lib/Target/Hexagon/HexagonHazardRecognizer.cpp
index 8ac333f4f01f..44f1f554c662 100644
--- a/lib/Target/Hexagon/HexagonHazardRecognizer.cpp
+++ b/lib/Target/Hexagon/HexagonHazardRecognizer.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
#define DEBUG_TYPE "post-RA-sched"
void HexagonHazardRecognizer::Reset() {
- DEBUG(dbgs() << "Reset hazard recognizer\n");
+ LLVM_DEBUG(dbgs() << "Reset hazard recognizer\n");
Resources->clearResources();
PacketNum = 0;
UsesDotCur = nullptr;
@@ -43,7 +43,7 @@ HexagonHazardRecognizer::getHazardType(SUnit *SU, int stalls) {
return NoHazard;
if (!Resources->canReserveResources(*MI)) {
- DEBUG(dbgs() << "*** Hazard in cycle " << PacketNum << ", " << *MI);
+ LLVM_DEBUG(dbgs() << "*** Hazard in cycle " << PacketNum << ", " << *MI);
HazardType RetVal = Hazard;
if (TII->mayBeNewStore(*MI)) {
// Make sure the register to be stored is defined by an instruction in the
@@ -59,14 +59,16 @@ HexagonHazardRecognizer::getHazardType(SUnit *SU, int stalls) {
MI->getDebugLoc());
if (Resources->canReserveResources(*NewMI))
RetVal = NoHazard;
- DEBUG(dbgs() << "*** Try .new version? " << (RetVal == NoHazard) << "\n");
+ LLVM_DEBUG(dbgs() << "*** Try .new version? " << (RetVal == NoHazard)
+ << "\n");
MF->DeleteMachineInstr(NewMI);
}
return RetVal;
}
if (SU == UsesDotCur && DotCurPNum != (int)PacketNum) {
- DEBUG(dbgs() << "*** .cur Hazard in cycle " << PacketNum << ", " << *MI);
+ LLVM_DEBUG(dbgs() << "*** .cur Hazard in cycle " << PacketNum << ", "
+ << *MI);
return Hazard;
}
@@ -74,7 +76,7 @@ HexagonHazardRecognizer::getHazardType(SUnit *SU, int stalls) {
}
void HexagonHazardRecognizer::AdvanceCycle() {
- DEBUG(dbgs() << "Advance cycle, clear state\n");
+ LLVM_DEBUG(dbgs() << "Advance cycle, clear state\n");
Resources->clearResources();
if (DotCurPNum != -1 && DotCurPNum != (int)PacketNum) {
UsesDotCur = nullptr;
@@ -132,7 +134,7 @@ void HexagonHazardRecognizer::EmitInstruction(SUnit *SU) {
}
else
Resources->reserveResources(*MI);
- DEBUG(dbgs() << " Add instruction " << *MI);
+ LLVM_DEBUG(dbgs() << " Add instruction " << *MI);
// When scheduling a dot cur instruction, check if there is an instruction
// that can use the dot cur in the same packet. If so, we'll attempt to
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index e639d13bd2b5..20e8dcdb06f7 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -1892,15 +1892,15 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
RootHeights[N] = std::max(getHeight(N->getOperand(0).getNode()),
getHeight(N->getOperand(1).getNode())) + 1;
- DEBUG(dbgs() << "--> No need to balance root (Weight=" << Weight
- << " Height=" << RootHeights[N] << "): ");
- DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "--> No need to balance root (Weight=" << Weight
+ << " Height=" << RootHeights[N] << "): ");
+ LLVM_DEBUG(N->dump(CurDAG));
return SDValue(N, 0);
}
- DEBUG(dbgs() << "** Balancing root node: ");
- DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "** Balancing root node: ");
+ LLVM_DEBUG(N->dump(CurDAG));
unsigned NOpcode = N->getOpcode();
@@ -1948,7 +1948,7 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
// Whoops, this node was RAUWd by one of the balanceSubTree calls we
// made. Our worklist isn't up to date anymore.
// Restart the whole process.
- DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n");
+ LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n");
return balanceSubTree(N, TopLevel);
}
@@ -2019,15 +2019,15 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
}
}
- DEBUG(dbgs() << "--> Current height=" << NodeHeights[SDValue(N, 0)]
- << " weight=" << CurrentWeight << " imbalanced="
- << Imbalanced << "\n");
+ LLVM_DEBUG(dbgs() << "--> Current height=" << NodeHeights[SDValue(N, 0)]
+ << " weight=" << CurrentWeight
+ << " imbalanced=" << Imbalanced << "\n");
// Transform MUL(x, C * 2^Y) + SHL(z, Y) -> SHL(ADD(MUL(x, C), z), Y)
// This factors out a shift in order to match memw(a<<Y+b).
if (CanFactorize && (willShiftRightEliminate(Mul1.Value, MaxPowerOf2) ||
willShiftRightEliminate(Mul2.Value, MaxPowerOf2))) {
- DEBUG(dbgs() << "--> Found common factor for two MUL children!\n");
+ LLVM_DEBUG(dbgs() << "--> Found common factor for two MUL children!\n");
int Weight = Mul1.Weight + Mul2.Weight;
int Height = std::max(NodeHeights[Mul1.Value], NodeHeights[Mul2.Value]) + 1;
SDValue Mul1Factored = factorOutPowerOf2(Mul1.Value, MaxPowerOf2);
@@ -2061,9 +2061,9 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
if (getUsesInFunction(GANode->getGlobal()) == 1 && Offset->hasOneUse() &&
getTargetLowering()->isOffsetFoldingLegal(GANode)) {
- DEBUG(dbgs() << "--> Combining GA and offset (" << Offset->getSExtValue()
- << "): ");
- DEBUG(GANode->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "--> Combining GA and offset ("
+ << Offset->getSExtValue() << "): ");
+ LLVM_DEBUG(GANode->dump(CurDAG));
SDValue NewTGA =
CurDAG->getTargetGlobalAddress(GANode->getGlobal(), SDLoc(GA.Value),
@@ -2107,7 +2107,7 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
// If this is the top level and we haven't factored out a shift, we should try
// to move a constant to the bottom to match addressing modes like memw(rX+C)
if (TopLevel && !CanFactorize && Leaves.hasConst()) {
- DEBUG(dbgs() << "--> Pushing constant to tip of tree.");
+ LLVM_DEBUG(dbgs() << "--> Pushing constant to tip of tree.");
Leaves.pushToBottom(Leaves.pop());
}
@@ -2134,7 +2134,7 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
// Make sure that none of these nodes have been RAUW'd
if ((RootWeights.count(V0.getNode()) && RootWeights[V0.getNode()] == -2) ||
(RootWeights.count(V1.getNode()) && RootWeights[V1.getNode()] == -2)) {
- DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n");
+ LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n");
return balanceSubTree(N, TopLevel);
}
@@ -2168,9 +2168,9 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
int Weight = V0Weight + V1Weight;
Leaves.push(WeightedLeaf(NewNode, Weight, L0.InsertionOrder));
- DEBUG(dbgs() << "--> Built new node (Weight=" << Weight << ",Height="
- << Height << "):\n");
- DEBUG(NewNode.dump());
+ LLVM_DEBUG(dbgs() << "--> Built new node (Weight=" << Weight
+ << ",Height=" << Height << "):\n");
+ LLVM_DEBUG(NewNode.dump());
}
assert(Leaves.size() == 1);
@@ -2194,15 +2194,15 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
}
if (N != NewRoot.getNode()) {
- DEBUG(dbgs() << "--> Root is now: ");
- DEBUG(NewRoot.dump());
+ LLVM_DEBUG(dbgs() << "--> Root is now: ");
+ LLVM_DEBUG(NewRoot.dump());
// Replace all uses of old root by new root
CurDAG->ReplaceAllUsesWith(N, NewRoot.getNode());
// Mark that we have RAUW'd N
RootWeights[N] = -2;
} else {
- DEBUG(dbgs() << "--> Root unchanged.\n");
+ LLVM_DEBUG(dbgs() << "--> Root unchanged.\n");
}
RootWeights[NewRoot.getNode()] = Leaves.top().Weight;
@@ -2225,8 +2225,8 @@ void HexagonDAGToDAGISel::rebalanceAddressTrees() {
if (RootWeights.count(BasePtr.getNode()))
continue;
- DEBUG(dbgs() << "** Rebalancing address calculation in node: ");
- DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "** Rebalancing address calculation in node: ");
+ LLVM_DEBUG(N->dump(CurDAG));
// FindRoots
SmallVector<SDNode *, 4> Worklist;
@@ -2266,8 +2266,8 @@ void HexagonDAGToDAGISel::rebalanceAddressTrees() {
N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), N->getOperand(1),
NewBasePtr, N->getOperand(3));
- DEBUG(dbgs() << "--> Final node: ");
- DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "--> Final node: ");
+ LLVM_DEBUG(N->dump(CurDAG));
}
CurDAG->RemoveDeadNodes();
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index be036b3ddb38..fbe8f16bd98d 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -359,9 +359,9 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
break;
}
}
- DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
- : "Argument must be passed on stack. "
- "Not eligible for Tail Call\n"));
+ LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
+ : "Argument must be passed on stack. "
+ "Not eligible for Tail Call\n"));
}
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
@@ -433,7 +433,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
if (NeedsArgAlign && Subtarget.hasV60TOps()) {
- DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
+ LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
unsigned VecAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
MFI.ensureMaxAlignment(LargestAlignSeen);
@@ -670,7 +670,7 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
if (A == 0)
A = HFI.getStackAlignment();
- DEBUG({
+ LLVM_DEBUG({
dbgs () << __func__ << " Align: " << A << " Size: ";
Size.getNode()->dump(&DAG);
dbgs() << "\n";
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 1ec1ce01336a..4086c6cfb810 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -430,7 +430,7 @@ bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
// Delete the J2_jump if it's equivalent to a fall-through.
if (AllowModify && JumpToBlock &&
MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
- DEBUG(dbgs() << "\nErasing the jump to successor block\n";);
+ LLVM_DEBUG(dbgs() << "\nErasing the jump to successor block\n";);
I->eraseFromParent();
I = MBB.instr_end();
if (I == MBB.instr_begin())
@@ -499,8 +499,8 @@ bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
Cond.push_back(LastInst->getOperand(1));
return false;
}
- DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
- << " with one jump\n";);
+ LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
+ << " with one jump\n";);
// Otherwise, don't know what this is.
return true;
}
@@ -547,8 +547,8 @@ bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
FBB = LastInst->getOperand(0).getMBB();
return false;
}
- DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
- << " with two jumps";);
+ LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
+ << " with two jumps";);
// Otherwise, can't handle this.
return true;
}
@@ -557,7 +557,7 @@ unsigned HexagonInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
assert(!BytesRemoved && "code size not handled");
- DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB));
+ LLVM_DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB));
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
while (I != MBB.begin()) {
@@ -629,7 +629,8 @@ unsigned HexagonInstrInfo::insertBranch(MachineBasicBlock &MBB,
// (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
// (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
- DEBUG(dbgs() << "\nInserting NVJump for " << printMBBReference(MBB););
+ LLVM_DEBUG(dbgs() << "\nInserting NVJump for "
+ << printMBBReference(MBB););
if (Cond[2].isReg()) {
unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
@@ -1501,7 +1502,7 @@ bool HexagonInstrInfo::PredicateInstruction(
MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
isEndLoopN(Cond[0].getImm())) {
- DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
+ LLVM_DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
return false;
}
int Opc = MI.getOpcode();
@@ -2251,13 +2252,13 @@ bool HexagonInstrInfo::isLateInstrFeedsEarlyInstr(const MachineInstr &LRMI,
bool isLate = isLateResultInstr(LRMI);
bool isEarly = isEarlySourceInstr(ESMI);
- DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
- DEBUG(LRMI.dump());
- DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
- DEBUG(ESMI.dump());
+ LLVM_DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
+ LLVM_DEBUG(LRMI.dump());
+ LLVM_DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
+ LLVM_DEBUG(ESMI.dump());
if (isLate && isEarly) {
- DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
+ LLVM_DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
return true;
}
@@ -4174,7 +4175,7 @@ bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
return false;
assert(Cond.size() == 2);
if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
- DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
+ LLVM_DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
return false;
}
PredReg = Cond[1].getReg();
@@ -4276,9 +4277,9 @@ void HexagonInstrInfo::immediateExtend(MachineInstr &MI) const {
bool HexagonInstrInfo::invertAndChangeJumpTarget(
MachineInstr &MI, MachineBasicBlock *NewTarget) const {
- DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "
- << printMBBReference(*NewTarget);
- MI.dump(););
+ LLVM_DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "
+ << printMBBReference(*NewTarget);
+ MI.dump(););
assert(MI.isBranch());
unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
int TargetPos = MI.getNumOperands() - 1;
@@ -4306,8 +4307,9 @@ void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
NewMI = BuildMI(B, I, DL, get(insn));
- DEBUG(dbgs() << "\n" << getName(NewMI->getOpcode()) <<
- " Class: " << NewMI->getDesc().getSchedClass());
+ LLVM_DEBUG(dbgs() << "\n"
+ << getName(NewMI->getOpcode())
+ << " Class: " << NewMI->getDesc().getSchedClass());
NewMI->eraseFromParent();
}
/* --- The code above is used to generate complete set of Hexagon Insn --- */
@@ -4317,7 +4319,7 @@ void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
// p -> NotP
// NotP -> P
bool HexagonInstrInfo::reversePredSense(MachineInstr &MI) const {
- DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump());
+ LLVM_DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump());
MI.setDesc(get(getInvertedPredicatedOpcode(MI.getOpcode())));
return true;
}
diff --git a/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index d0bc2ed83a76..8b22d35cf9c7 100644
--- a/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -1758,15 +1758,15 @@ void PolynomialMultiplyRecognize::setupPostSimplifier(Simplifier &S) {
}
bool PolynomialMultiplyRecognize::recognize() {
- DEBUG(dbgs() << "Starting PolynomialMultiplyRecognize on loop\n"
- << *CurLoop << '\n');
+ LLVM_DEBUG(dbgs() << "Starting PolynomialMultiplyRecognize on loop\n"
+ << *CurLoop << '\n');
// Restrictions:
// - The loop must consist of a single block.
// - The iteration count must be known at compile-time.
// - The loop must have an induction variable starting from 0, and
// incremented in each iteration of the loop.
BasicBlock *LoopB = CurLoop->getHeader();
- DEBUG(dbgs() << "Loop header:\n" << *LoopB);
+ LLVM_DEBUG(dbgs() << "Loop header:\n" << *LoopB);
if (LoopB != CurLoop->getLoopLatch())
return false;
@@ -1788,7 +1788,8 @@ bool PolynomialMultiplyRecognize::recognize() {
ParsedValues PV;
Simplifier PreSimp;
PV.IterCount = IterCount;
- DEBUG(dbgs() << "Loop IV: " << *CIV << "\nIterCount: " << IterCount << '\n');
+ LLVM_DEBUG(dbgs() << "Loop IV: " << *CIV << "\nIterCount: " << IterCount
+ << '\n');
setupPreSimplifier(PreSimp);
@@ -1815,7 +1816,7 @@ bool PolynomialMultiplyRecognize::recognize() {
Simplifier::Context C(SI);
Value *T = PreSimp.simplify(C);
SelectInst *SelI = (T && isa<SelectInst>(T)) ? cast<SelectInst>(T) : SI;
- DEBUG(dbgs() << "scanSelect(pre-scan): " << PE(C, SelI) << '\n');
+ LLVM_DEBUG(dbgs() << "scanSelect(pre-scan): " << PE(C, SelI) << '\n');
if (scanSelect(SelI, LoopB, EntryB, CIV, PV, true)) {
FoundPreScan = true;
if (SelI != SI) {
@@ -1828,7 +1829,7 @@ bool PolynomialMultiplyRecognize::recognize() {
}
if (!FoundPreScan) {
- DEBUG(dbgs() << "Have not found candidates for pmpy\n");
+ LLVM_DEBUG(dbgs() << "Have not found candidates for pmpy\n");
return false;
}
@@ -1868,14 +1869,14 @@ bool PolynomialMultiplyRecognize::recognize() {
SelectInst *SelI = dyn_cast<SelectInst>(&In);
if (!SelI)
continue;
- DEBUG(dbgs() << "scanSelect: " << *SelI << '\n');
+ LLVM_DEBUG(dbgs() << "scanSelect: " << *SelI << '\n');
FoundScan = scanSelect(SelI, LoopB, EntryB, CIV, PV, false);
if (FoundScan)
break;
}
assert(FoundScan);
- DEBUG({
+ LLVM_DEBUG({
StringRef PP = (PV.M ? "(P+M)" : "P");
if (!PV.Inv)
dbgs() << "Found pmpy idiom: R = " << PP << ".Q\n";
@@ -2287,10 +2288,11 @@ CleanupAndExit:
NewCall->setDebugLoc(DLoc);
- DEBUG(dbgs() << " Formed " << (Overlap ? "memmove: " : "memcpy: ")
- << *NewCall << "\n"
- << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
- << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
+ LLVM_DEBUG(dbgs() << " Formed " << (Overlap ? "memmove: " : "memcpy: ")
+ << *NewCall << "\n"
+ << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
+ << " from store ptr=" << *StoreEv << " at: " << *SI
+ << "\n");
return true;
}
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 4376a30fdd98..74c550ce8226 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -172,11 +172,11 @@ bool VLIWResourceModel::reserveResources(SUnit *SU, bool IsTop) {
Packet.push_back(SU);
#ifndef NDEBUG
- DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
+ LLVM_DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
- DEBUG(dbgs() << "\t[" << i << "] SU(");
- DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
- DEBUG(Packet[i]->getInstr()->dump());
+ LLVM_DEBUG(dbgs() << "\t[" << i << "] SU(");
+ LLVM_DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
+ LLVM_DEBUG(Packet[i]->getInstr()->dump());
}
#endif
@@ -187,10 +187,10 @@ bool VLIWResourceModel::reserveResources(SUnit *SU, bool IsTop) {
/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
/// only includes instructions that have DAG nodes, not scheduling boundaries.
void VLIWMachineScheduler::schedule() {
- DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
- << printMBBReference(*BB) << " " << BB->getName() << " in_func "
- << BB->getParent()->getName() << " at loop depth "
- << MLI->getLoopDepth(BB) << " \n");
+ LLVM_DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
+ << printMBBReference(*BB) << " " << BB->getName()
+ << " in_func " << BB->getParent()->getName()
+ << " at loop depth " << MLI->getLoopDepth(BB) << " \n");
buildDAGWithRegPressure();
@@ -205,24 +205,25 @@ void VLIWMachineScheduler::schedule() {
// Initialize the strategy before modifying the DAG.
SchedImpl->initialize(this);
- DEBUG(unsigned maxH = 0;
- for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
- if (SUnits[su].getHeight() > maxH)
- maxH = SUnits[su].getHeight();
- dbgs() << "Max Height " << maxH << "\n";);
- DEBUG(unsigned maxD = 0;
- for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
- if (SUnits[su].getDepth() > maxD)
- maxD = SUnits[su].getDepth();
- dbgs() << "Max Depth " << maxD << "\n";);
- DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
- SUnits[su].dumpAll(this));
+ LLVM_DEBUG(unsigned maxH = 0;
+ for (unsigned su = 0, e = SUnits.size(); su != e;
+ ++su) if (SUnits[su].getHeight() > maxH) maxH =
+ SUnits[su].getHeight();
+ dbgs() << "Max Height " << maxH << "\n";);
+ LLVM_DEBUG(unsigned maxD = 0;
+ for (unsigned su = 0, e = SUnits.size(); su != e;
+ ++su) if (SUnits[su].getDepth() > maxD) maxD =
+ SUnits[su].getDepth();
+ dbgs() << "Max Depth " << maxD << "\n";);
+ LLVM_DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) SUnits[su]
+ .dumpAll(this));
initQueues(TopRoots, BotRoots);
bool IsTopNode = false;
while (true) {
- DEBUG(dbgs() << "** VLIWMachineScheduler::schedule picking next node\n");
+ LLVM_DEBUG(
+ dbgs() << "** VLIWMachineScheduler::schedule picking next node\n");
SUnit *SU = SchedImpl->pickNode(IsTopNode);
if (!SU) break;
@@ -240,7 +241,7 @@ void VLIWMachineScheduler::schedule() {
placeDebugValues();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "*** Final schedule for "
<< printMBBReference(*begin()->getParent()) << " ***\n";
dumpSchedule();
@@ -379,8 +380,8 @@ void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
}
CheckPending = true;
- DEBUG(dbgs() << "*** Next cycle " << Available.getName() << " cycle "
- << CurrCycle << '\n');
+ LLVM_DEBUG(dbgs() << "*** Next cycle " << Available.getName() << " cycle "
+ << CurrCycle << '\n');
}
/// Move the boundary of scheduled code by one SUnit.
@@ -404,12 +405,12 @@ void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit *SU) {
// TODO: Check if this SU must end a dispatch group.
IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
if (startNewCycle) {
- DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
+ LLVM_DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
bumpCycle();
}
else
- DEBUG(dbgs() << "*** IssueCount " << IssueCount
- << " at cycle " << CurrCycle << '\n');
+ LLVM_DEBUG(dbgs() << "*** IssueCount " << IssueCount << " at cycle "
+ << CurrCycle << '\n');
}
/// Release pending ready nodes in to the available queue. This makes them
@@ -582,22 +583,23 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
if (!SU || SU->isScheduled)
return ResCount;
- DEBUG(if (verbose) dbgs() << ((Q.getID() == TopQID) ? "(top|" : "(bot|"));
+ LLVM_DEBUG(if (verbose) dbgs()
+ << ((Q.getID() == TopQID) ? "(top|" : "(bot|"));
// Forced priority is high.
if (SU->isScheduleHigh) {
ResCount += PriorityOne;
- DEBUG(dbgs() << "H|");
+ LLVM_DEBUG(dbgs() << "H|");
}
unsigned IsAvailableAmt = 0;
// Critical path first.
if (Q.getID() == TopQID) {
if (Top.isLatencyBound(SU)) {
- DEBUG(if (verbose) dbgs() << "LB|");
+ LLVM_DEBUG(if (verbose) dbgs() << "LB|");
ResCount += (SU->getHeight() * ScaleTwo);
}
- DEBUG(if (verbose) {
+ LLVM_DEBUG(if (verbose) {
std::stringstream dbgstr;
dbgstr << "h" << std::setw(3) << SU->getHeight() << "|";
dbgs() << dbgstr.str();
@@ -608,16 +610,16 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
if (Top.ResourceModel->isResourceAvailable(SU, true)) {
IsAvailableAmt = (PriorityTwo + PriorityThree);
ResCount += IsAvailableAmt;
- DEBUG(if (verbose) dbgs() << "A|");
+ LLVM_DEBUG(if (verbose) dbgs() << "A|");
} else
- DEBUG(if (verbose) dbgs() << " |");
+ LLVM_DEBUG(if (verbose) dbgs() << " |");
} else {
if (Bot.isLatencyBound(SU)) {
- DEBUG(if (verbose) dbgs() << "LB|");
+ LLVM_DEBUG(if (verbose) dbgs() << "LB|");
ResCount += (SU->getDepth() * ScaleTwo);
}
- DEBUG(if (verbose) {
+ LLVM_DEBUG(if (verbose) {
std::stringstream dbgstr;
dbgstr << "d" << std::setw(3) << SU->getDepth() << "|";
dbgs() << dbgstr.str();
@@ -628,9 +630,9 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
if (Bot.ResourceModel->isResourceAvailable(SU, false)) {
IsAvailableAmt = (PriorityTwo + PriorityThree);
ResCount += IsAvailableAmt;
- DEBUG(if (verbose) dbgs() << "A|");
+ LLVM_DEBUG(if (verbose) dbgs() << "A|");
} else
- DEBUG(if (verbose) dbgs() << " |");
+ LLVM_DEBUG(if (verbose) dbgs() << " |");
}
unsigned NumNodesBlocking = 0;
@@ -652,7 +654,7 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
}
ResCount += (NumNodesBlocking * ScaleTwo);
- DEBUG(if (verbose) {
+ LLVM_DEBUG(if (verbose) {
std::stringstream dbgstr;
dbgstr << "blk " << std::setw(2) << NumNodesBlocking << ")|";
dbgs() << dbgstr.str();
@@ -674,10 +676,10 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
(Delta.Excess.getUnitInc() || Delta.CriticalMax.getUnitInc() ||
Delta.CurrentMax.getUnitInc()))
ResCount -= IsAvailableAmt;
- DEBUG(if (verbose) {
- dbgs() << "RP " << Delta.Excess.getUnitInc() << "/"
- << Delta.CriticalMax.getUnitInc() <<"/"
- << Delta.CurrentMax.getUnitInc() << ")|";
+ LLVM_DEBUG(if (verbose) {
+ dbgs() << "RP " << Delta.Excess.getUnitInc() << "/"
+ << Delta.CriticalMax.getUnitInc() << "/"
+ << Delta.CurrentMax.getUnitInc() << ")|";
});
}
@@ -689,11 +691,11 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
if (Q.getID() == TopQID &&
Top.ResourceModel->isResourceAvailable(SU, true)) {
ResCount += PriorityTwo;
- DEBUG(if (verbose) dbgs() << "C|");
+ LLVM_DEBUG(if (verbose) dbgs() << "C|");
} else if (Q.getID() == BotQID &&
Bot.ResourceModel->isResourceAvailable(SU, false)) {
ResCount += PriorityTwo;
- DEBUG(if (verbose) dbgs() << "C|");
+ LLVM_DEBUG(if (verbose) dbgs() << "C|");
}
}
@@ -705,7 +707,7 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
PI.getLatency() == 0 &&
Top.ResourceModel->isInPacket(PI.getSUnit())) {
ResCount += PriorityThree;
- DEBUG(if (verbose) dbgs() << "Z|");
+ LLVM_DEBUG(if (verbose) dbgs() << "Z|");
}
}
} else if (Q.getID() == BotQID && getWeakLeft(SU, false) == 0) {
@@ -714,7 +716,7 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
SI.getLatency() == 0 &&
Bot.ResourceModel->isInPacket(SI.getSUnit())) {
ResCount += PriorityThree;
- DEBUG(if (verbose) dbgs() << "Z|");
+ LLVM_DEBUG(if (verbose) dbgs() << "Z|");
}
}
}
@@ -730,7 +732,7 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
if (PI.getLatency() > 0 &&
Top.ResourceModel->isInPacket(PI.getSUnit())) {
ResCount -= PriorityOne;
- DEBUG(if (verbose) dbgs() << "D|");
+ LLVM_DEBUG(if (verbose) dbgs() << "D|");
}
}
} else {
@@ -738,13 +740,13 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
if (SI.getLatency() > 0 &&
Bot.ResourceModel->isInPacket(SI.getSUnit())) {
ResCount -= PriorityOne;
- DEBUG(if (verbose) dbgs() << "D|");
+ LLVM_DEBUG(if (verbose) dbgs() << "D|");
}
}
}
}
- DEBUG(if (verbose) {
+ LLVM_DEBUG(if (verbose) {
std::stringstream dbgstr;
dbgstr << "Total " << std::setw(4) << ResCount << ")";
dbgs() << dbgstr.str();
@@ -762,9 +764,9 @@ ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
SchedCandidate &Candidate) {
ReadyQueue &Q = Zone.Available;
- DEBUG(if (SchedDebugVerboseLevel > 1)
- readyQueueVerboseDump(RPTracker, Candidate, Q);
- else Q.dump(););
+ LLVM_DEBUG(if (SchedDebugVerboseLevel > 1)
+ readyQueueVerboseDump(RPTracker, Candidate, Q);
+ else Q.dump(););
// getMaxPressureDelta temporarily modifies the tracker.
RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
@@ -781,7 +783,7 @@ pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
// Initialize the candidate if needed.
if (!Candidate.SU) {
- DEBUG(traceCandidate("DCAND", Q, *I, CurrentCost));
+ LLVM_DEBUG(traceCandidate("DCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -794,7 +796,7 @@ pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
if (CurrentCost < 0 && Candidate.SCost < 0) {
if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
|| (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
- DEBUG(traceCandidate("NCAND", Q, *I, CurrentCost));
+ LLVM_DEBUG(traceCandidate("NCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -805,7 +807,7 @@ pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
// Best cost.
if (CurrentCost > Candidate.SCost) {
- DEBUG(traceCandidate("CCAND", Q, *I, CurrentCost));
+ LLVM_DEBUG(traceCandidate("CCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -818,7 +820,7 @@ pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
unsigned CandWeak = getWeakLeft(Candidate.SU, (Q.getID() == TopQID));
if (CurrWeak != CandWeak) {
if (CurrWeak < CandWeak) {
- DEBUG(traceCandidate("WCAND", Q, *I, CurrentCost));
+ LLVM_DEBUG(traceCandidate("WCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -837,7 +839,7 @@ pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
CandSize = Candidate.SU->Preds.size();
}
if (CurrSize > CandSize) {
- DEBUG(traceCandidate("SPCAND", Q, *I, CurrentCost));
+ LLVM_DEBUG(traceCandidate("SPCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -855,7 +857,7 @@ pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
if (UseNewerCandidate && CurrentCost == Candidate.SCost) {
if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
|| (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
- DEBUG(traceCandidate("TCAND", Q, *I, CurrentCost));
+ LLVM_DEBUG(traceCandidate("TCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -877,12 +879,12 @@ SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
// Schedule as far as possible in the direction of no choice. This is most
// efficient, but also provides the best heuristics for CriticalPSets.
if (SUnit *SU = Bot.pickOnlyChoice()) {
- DEBUG(dbgs() << "Picked only Bottom\n");
+ LLVM_DEBUG(dbgs() << "Picked only Bottom\n");
IsTopNode = false;
return SU;
}
if (SUnit *SU = Top.pickOnlyChoice()) {
- DEBUG(dbgs() << "Picked only Top\n");
+ LLVM_DEBUG(dbgs() << "Picked only Top\n");
IsTopNode = true;
return SU;
}
@@ -900,7 +902,7 @@ SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
// increase pressure for one of the excess PSets, then schedule in that
// direction first to provide more freedom in the other direction.
if (BotResult == SingleExcess || BotResult == SingleCritical) {
- DEBUG(dbgs() << "Prefered Bottom Node\n");
+ LLVM_DEBUG(dbgs() << "Prefered Bottom Node\n");
IsTopNode = false;
return BotCand.SU;
}
@@ -911,29 +913,29 @@ SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
assert(TopResult != NoCand && "failed to find the first candidate");
if (TopResult == SingleExcess || TopResult == SingleCritical) {
- DEBUG(dbgs() << "Prefered Top Node\n");
+ LLVM_DEBUG(dbgs() << "Prefered Top Node\n");
IsTopNode = true;
return TopCand.SU;
}
// If either Q has a single candidate that minimizes pressure above the
// original region's pressure pick it.
if (BotResult == SingleMax) {
- DEBUG(dbgs() << "Prefered Bottom Node SingleMax\n");
+ LLVM_DEBUG(dbgs() << "Prefered Bottom Node SingleMax\n");
IsTopNode = false;
return BotCand.SU;
}
if (TopResult == SingleMax) {
- DEBUG(dbgs() << "Prefered Top Node SingleMax\n");
+ LLVM_DEBUG(dbgs() << "Prefered Top Node SingleMax\n");
IsTopNode = true;
return TopCand.SU;
}
if (TopCand.SCost > BotCand.SCost) {
- DEBUG(dbgs() << "Prefered Top Node Cost\n");
+ LLVM_DEBUG(dbgs() << "Prefered Top Node Cost\n");
IsTopNode = true;
return TopCand.SU;
}
// Otherwise prefer the bottom candidate in node order.
- DEBUG(dbgs() << "Prefered Bottom in Node order\n");
+ LLVM_DEBUG(dbgs() << "Prefered Bottom in Node order\n");
IsTopNode = false;
return BotCand.SU;
}
@@ -976,11 +978,11 @@ SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
if (SU->isBottomReady())
Bot.removeReady(SU);
- DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
- << " Scheduling instruction in cycle "
- << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << " (" <<
- reportPackets() << ")\n";
- SU->dump(DAG));
+ LLVM_DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
+ << " Scheduling instruction in cycle "
+ << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << " ("
+ << reportPackets() << ")\n";
+ SU->dump(DAG));
return SU;
}
diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 3dace1682840..9ba964a0aad1 100644
--- a/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -448,8 +448,8 @@ bool HexagonNewValueJump::isNewValueJumpCandidate(
}
bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n"
- << "********** Function: " << MF.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n"
+ << "********** Function: " << MF.getName() << "\n");
if (skipFunction(MF.getFunction()))
return false;
@@ -474,9 +474,10 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
MBBb != MBBe; ++MBBb) {
MachineBasicBlock *MBB = &*MBBb;
- DEBUG(dbgs() << "** dumping bb ** " << MBB->getNumber() << "\n");
- DEBUG(MBB->dump());
- DEBUG(dbgs() << "\n" << "********** dumping instr bottom up **********\n");
+ LLVM_DEBUG(dbgs() << "** dumping bb ** " << MBB->getNumber() << "\n");
+ LLVM_DEBUG(MBB->dump());
+ LLVM_DEBUG(dbgs() << "\n"
+ << "********** dumping instr bottom up **********\n");
bool foundJump = false;
bool foundCompare = false;
bool invertPredicate = false;
@@ -501,7 +502,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
if ((nvjCount == 0) || (nvjCount > -1 && nvjCount <= nvjGenerated))
break;
- DEBUG(dbgs() << "Instr: "; MI.dump(); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Instr: "; MI.dump(); dbgs() << "\n");
if (!foundJump && (MI.getOpcode() == Hexagon::J2_jumpt ||
MI.getOpcode() == Hexagon::J2_jumptpt ||
diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
index 6d9c7820ac8b..29c044b3b729 100644
--- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp
+++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
@@ -217,7 +217,7 @@ bool HexagonOptAddrMode::allValidCandidates(NodeAddr<StmtNode *> SA,
NodeSet Visited, Defs;
const auto &P = LV->getAllReachingDefsRec(UR, UN, Visited, Defs);
if (!P.second) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "*** Unable to collect all reaching defs for use ***\n"
<< PrintNode<UseNode*>(UN, *DFG) << '\n'
<< "The program's complexity may exceed the limits.\n";
@@ -226,7 +226,7 @@ bool HexagonOptAddrMode::allValidCandidates(NodeAddr<StmtNode *> SA,
}
const auto &ReachingDefs = P.first;
if (ReachingDefs.size() > 1) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "*** Multiple Reaching Defs found!!! ***\n";
for (auto DI : ReachingDefs) {
NodeAddr<UseNode *> DA = DFG->addr<UseNode *>(DI);
@@ -244,15 +244,15 @@ bool HexagonOptAddrMode::allValidCandidates(NodeAddr<StmtNode *> SA,
void HexagonOptAddrMode::getAllRealUses(NodeAddr<StmtNode *> SA,
NodeList &UNodeList) {
for (NodeAddr<DefNode *> DA : SA.Addr->members_if(DFG->IsDef, *DFG)) {
- DEBUG(dbgs() << "\t\t[DefNode]: " << Print<NodeAddr<DefNode *>>(DA, *DFG)
- << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t[DefNode]: "
+ << Print<NodeAddr<DefNode *>>(DA, *DFG) << "\n");
RegisterRef DR = DFG->getPRI().normalize(DA.Addr->getRegRef(*DFG));
auto UseSet = LV->getAllReachedUses(DR, DA);
for (auto UI : UseSet) {
NodeAddr<UseNode *> UA = DFG->addr<UseNode *>(UI);
- DEBUG({
+ LLVM_DEBUG({
NodeAddr<StmtNode *> TempIA = UA.Addr->getOwner(*DFG);
dbgs() << "\t\t\t[Reached Use]: "
<< Print<NodeAddr<InstrNode *>>(TempIA, *DFG) << "\n";
@@ -262,8 +262,8 @@ void HexagonOptAddrMode::getAllRealUses(NodeAddr<StmtNode *> SA,
NodeAddr<PhiNode *> PA = UA.Addr->getOwner(*DFG);
NodeId id = PA.Id;
const Liveness::RefMap &phiUse = LV->getRealUses(id);
- DEBUG(dbgs() << "\t\t\t\tphi real Uses"
- << Print<Liveness::RefMap>(phiUse, *DFG) << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t\t\tphi real Uses"
+ << Print<Liveness::RefMap>(phiUse, *DFG) << "\n");
if (!phiUse.empty()) {
for (auto I : phiUse) {
if (!DFG->getPRI().alias(RegisterRef(I.first), DR))
@@ -306,7 +306,8 @@ bool HexagonOptAddrMode::isSafeToExtLR(NodeAddr<StmtNode *> SN,
NodeAddr<RefNode*> AA = LV->getNearestAliasedRef(LRExtRR, IA);
if ((DFG->IsDef(AA) && AA.Id != LRExtRegRD) ||
AA.Addr->getReachingDef() != LRExtRegRD) {
- DEBUG(dbgs() << "isSafeToExtLR: Returning false; another reaching def\n");
+ LLVM_DEBUG(
+ dbgs() << "isSafeToExtLR: Returning false; another reaching def\n");
return false;
}
@@ -396,8 +397,8 @@ bool HexagonOptAddrMode::processAddUses(NodeAddr<StmtNode *> AddSN,
NodeAddr<StmtNode *> OwnerN = UseN.Addr->getOwner(*DFG);
MachineInstr *UseMI = OwnerN.Addr->getCode();
- DEBUG(dbgs() << "\t\t[MI <BB#" << UseMI->getParent()->getNumber()
- << ">]: " << *UseMI << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t[MI <BB#" << UseMI->getParent()->getNumber()
+ << ">]: " << *UseMI << "\n");
Changed |= updateAddUses(AddMI, UseMI);
}
@@ -451,7 +452,7 @@ bool HexagonOptAddrMode::analyzeUses(unsigned tfrDefR,
} else if (MI.getOpcode() == Hexagon::S2_addasl_rrri) {
NodeList AddaslUseList;
- DEBUG(dbgs() << "\nGetting ReachedUses for === " << MI << "\n");
+ LLVM_DEBUG(dbgs() << "\nGetting ReachedUses for === " << MI << "\n");
getAllRealUses(SN, AddaslUseList);
// Process phi nodes.
if (allValidCandidates(SN, AddaslUseList) &&
@@ -515,8 +516,8 @@ bool HexagonOptAddrMode::changeLoad(MachineInstr *OldMI, MachineOperand ImmOp,
} else
Changed = false;
- DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
- DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
+ LLVM_DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ LLVM_DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
} else if (ImmOpNum == 2 && OldMI->getOperand(3).getImm() == 0) {
short NewOpCode = HII->changeAddrMode_rr_io(*OldMI);
assert(NewOpCode >= 0 && "Invalid New opcode\n");
@@ -526,8 +527,8 @@ bool HexagonOptAddrMode::changeLoad(MachineInstr *OldMI, MachineOperand ImmOp,
MIB.add(ImmOp);
OpStart = 4;
Changed = true;
- DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
- DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
+ LLVM_DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ LLVM_DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
}
if (Changed)
@@ -568,8 +569,8 @@ bool HexagonOptAddrMode::changeStore(MachineInstr *OldMI, MachineOperand ImmOp,
OpStart = 3;
}
Changed = true;
- DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
- DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
+ LLVM_DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ LLVM_DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
} else if (ImmOpNum == 1 && OldMI->getOperand(2).getImm() == 0) {
short NewOpCode = HII->changeAddrMode_rr_io(*OldMI);
assert(NewOpCode >= 0 && "Invalid New opcode\n");
@@ -578,8 +579,8 @@ bool HexagonOptAddrMode::changeStore(MachineInstr *OldMI, MachineOperand ImmOp,
MIB.add(ImmOp);
OpStart = 3;
Changed = true;
- DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
- DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
+ LLVM_DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ LLVM_DEBUG(dbgs() << "[TO]: " << *MIB << "\n");
}
if (Changed)
for (unsigned i = OpStart; i < OpEnd; ++i)
@@ -602,7 +603,7 @@ bool HexagonOptAddrMode::changeAddAsl(NodeAddr<UseNode *> AddAslUN,
unsigned ImmOpNum) {
NodeAddr<StmtNode *> SA = AddAslUN.Addr->getOwner(*DFG);
- DEBUG(dbgs() << "Processing addasl :" << *AddAslMI << "\n");
+ LLVM_DEBUG(dbgs() << "Processing addasl :" << *AddAslMI << "\n");
NodeList UNodeList;
getAllRealUses(SA, UNodeList);
@@ -613,11 +614,11 @@ bool HexagonOptAddrMode::changeAddAsl(NodeAddr<UseNode *> AddAslUN,
"Can't transform this 'AddAsl' instruction!");
NodeAddr<StmtNode *> UseIA = UseUN.Addr->getOwner(*DFG);
- DEBUG(dbgs() << "[InstrNode]: " << Print<NodeAddr<InstrNode *>>(UseIA, *DFG)
- << "\n");
+ LLVM_DEBUG(dbgs() << "[InstrNode]: "
+ << Print<NodeAddr<InstrNode *>>(UseIA, *DFG) << "\n");
MachineInstr *UseMI = UseIA.Addr->getCode();
- DEBUG(dbgs() << "[MI <" << printMBBReference(*UseMI->getParent())
- << ">]: " << *UseMI << "\n");
+ LLVM_DEBUG(dbgs() << "[MI <" << printMBBReference(*UseMI->getParent())
+ << ">]: " << *UseMI << "\n");
const MCInstrDesc &UseMID = UseMI->getDesc();
assert(HII->getAddrMode(*UseMI) == HexagonII::BaseImmOffset);
@@ -695,9 +696,9 @@ bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) {
!MI->getOperand(2).isImm() || HII->isConstExtended(*MI)))
continue;
- DEBUG(dbgs() << "[Analyzing " << HII->getName(MI->getOpcode()) << "]: "
- << *MI << "\n\t[InstrNode]: "
- << Print<NodeAddr<InstrNode *>>(IA, *DFG) << '\n');
+ LLVM_DEBUG(dbgs() << "[Analyzing " << HII->getName(MI->getOpcode())
+ << "]: " << *MI << "\n\t[InstrNode]: "
+ << Print<NodeAddr<InstrNode *>>(IA, *DFG) << '\n');
NodeList UNodeList;
getAllRealUses(SA, UNodeList);
@@ -733,8 +734,9 @@ bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) {
bool KeepTfr = false;
- DEBUG(dbgs() << "\t[Total reached uses] : " << UNodeList.size() << "\n");
- DEBUG(dbgs() << "\t[Processing Reached Uses] ===\n");
+ LLVM_DEBUG(dbgs() << "\t[Total reached uses] : " << UNodeList.size()
+ << "\n");
+ LLVM_DEBUG(dbgs() << "\t[Processing Reached Uses] ===\n");
for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
NodeAddr<UseNode *> UseN = *I;
assert(!(UseN.Addr->getFlags() & NodeAttrs::PhiRef) &&
@@ -742,8 +744,8 @@ bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) {
NodeAddr<StmtNode *> OwnerN = UseN.Addr->getOwner(*DFG);
MachineInstr *UseMI = OwnerN.Addr->getCode();
- DEBUG(dbgs() << "\t\t[MI <" << printMBBReference(*UseMI->getParent())
- << ">]: " << *UseMI << "\n");
+ LLVM_DEBUG(dbgs() << "\t\t[MI <" << printMBBReference(*UseMI->getParent())
+ << ">]: " << *UseMI << "\n");
int UseMOnum = -1;
unsigned NumOperands = UseMI->getNumOperands();
@@ -793,8 +795,8 @@ bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) {
Deleted.clear();
NodeAddr<FuncNode *> FA = DFG->getFunc();
- DEBUG(dbgs() << "==== [RefMap#]=====:\n "
- << Print<NodeAddr<FuncNode *>>(FA, *DFG) << "\n");
+ LLVM_DEBUG(dbgs() << "==== [RefMap#]=====:\n "
+ << Print<NodeAddr<FuncNode *>>(FA, *DFG) << "\n");
for (NodeAddr<BlockNode *> BA : FA.Addr->members(*DFG))
Changed |= processBlock(BA);
diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp
index 386e6cbad1f0..e018785f24d8 100644
--- a/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -246,7 +246,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
if (FixedRegs[x])
continue;
unsigned R = TargetRegisterInfo::index2VirtReg(x);
- DEBUG(dbgs() << printReg(R, TRI) << " ~~");
+ LLVM_DEBUG(dbgs() << printReg(R, TRI) << " ~~");
USet &Asc = AssocMap[R];
for (auto U = MRI->use_nodbg_begin(R), Z = MRI->use_nodbg_end();
U != Z; ++U) {
@@ -269,13 +269,13 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
unsigned u = TargetRegisterInfo::virtReg2Index(T);
if (FixedRegs[u])
continue;
- DEBUG(dbgs() << ' ' << printReg(T, TRI));
+ LLVM_DEBUG(dbgs() << ' ' << printReg(T, TRI));
Asc.insert(T);
// Make it symmetric.
AssocMap[T].insert(R);
}
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
}
UUMap R2P;
@@ -468,7 +468,7 @@ bool HexagonSplitDoubleRegs::isProfitable(const USet &Part, LoopRegMap &IRM)
if (FixedNum > 0 && LoopPhiNum > 0)
TotalP -= 20*LoopPhiNum;
- DEBUG(dbgs() << "Partition profit: " << TotalP << '\n');
+ LLVM_DEBUG(dbgs() << "Partition profit: " << TotalP << '\n');
if (SplitAll)
return true;
return TotalP > 0;
@@ -563,7 +563,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L,
Rs.insert(CmpR1);
Rs.insert(CmpR2);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "For loop at " << printMBBReference(*HB) << " ind regs: ";
dump_partition(dbgs(), Rs, *TRI);
dbgs() << '\n';
@@ -996,7 +996,7 @@ bool HexagonSplitDoubleRegs::splitInstr(MachineInstr *MI,
const UUPairMap &PairMap) {
using namespace Hexagon;
- DEBUG(dbgs() << "Splitting: " << *MI);
+ LLVM_DEBUG(dbgs() << "Splitting: " << *MI);
bool Split = false;
unsigned Opc = MI->getOpcode();
@@ -1130,8 +1130,8 @@ bool HexagonSplitDoubleRegs::splitPartition(const USet &Part) {
const TargetRegisterClass *IntRC = &Hexagon::IntRegsRegClass;
bool Changed = false;
- DEBUG(dbgs() << "Splitting partition: "; dump_partition(dbgs(), Part, *TRI);
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Splitting partition: ";
+ dump_partition(dbgs(), Part, *TRI); dbgs() << '\n');
UUPairMap PairMap;
@@ -1148,8 +1148,9 @@ bool HexagonSplitDoubleRegs::splitPartition(const USet &Part) {
unsigned LoR = MRI->createVirtualRegister(IntRC);
unsigned HiR = MRI->createVirtualRegister(IntRC);
- DEBUG(dbgs() << "Created mapping: " << printReg(DR, TRI) << " -> "
- << printReg(HiR, TRI) << ':' << printReg(LoR, TRI) << '\n');
+ LLVM_DEBUG(dbgs() << "Created mapping: " << printReg(DR, TRI) << " -> "
+ << printReg(HiR, TRI) << ':' << printReg(LoR, TRI)
+ << '\n');
PairMap.insert(std::make_pair(DR, UUPair(LoR, HiR)));
}
@@ -1189,8 +1190,8 @@ bool HexagonSplitDoubleRegs::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
- DEBUG(dbgs() << "Splitting double registers in function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Splitting double registers in function: "
+ << MF.getName() << '\n');
auto &ST = MF.getSubtarget<HexagonSubtarget>();
TRI = ST.getRegisterInfo();
@@ -1204,7 +1205,7 @@ bool HexagonSplitDoubleRegs::runOnMachineFunction(MachineFunction &MF) {
collectIndRegs(IRM);
partitionRegisters(P2Rs);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Register partitioning: (partition #0 is fixed)\n";
for (UUSetMap::iterator I = P2Rs.begin(), E = P2Rs.end(); I != E; ++I) {
dbgs() << '#' << I->first << " -> ";
@@ -1222,7 +1223,8 @@ bool HexagonSplitDoubleRegs::runOnMachineFunction(MachineFunction &MF) {
if (Limit >= 0 && Counter >= Limit)
break;
USet &Part = I->second;
- DEBUG(dbgs() << "Calculating profit for partition #" << I->first << '\n');
+ LLVM_DEBUG(dbgs() << "Calculating profit for partition #" << I->first
+ << '\n');
if (!isProfitable(Part, IRM))
continue;
Counter++;
diff --git a/lib/Target/Hexagon/HexagonStoreWidening.cpp b/lib/Target/Hexagon/HexagonStoreWidening.cpp
index 0f058106e32d..991af047387e 100644
--- a/lib/Target/Hexagon/HexagonStoreWidening.cpp
+++ b/lib/Target/Hexagon/HexagonStoreWidening.cpp
@@ -474,7 +474,7 @@ bool HexagonStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
// from OG was (in the order in which they appeared in the basic block).
// (The ordering in OG does not have to match the order in the basic block.)
bool HexagonStoreWidening::replaceStores(InstrGroup &OG, InstrGroup &NG) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Replacing:\n";
for (auto I : OG)
dbgs() << " " << *I;
diff --git a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
index ea86c9c42f47..e771f383dffa 100644
--- a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
+++ b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
@@ -74,7 +74,7 @@ static cl::opt<bool>
if (TraceGVPlacement) { \
TRACE_TO(errs(), X); \
} else { \
- DEBUG(TRACE_TO(dbgs(), X)); \
+ LLVM_DEBUG(TRACE_TO(dbgs(), X)); \
} \
} while (false)
#endif
@@ -200,11 +200,11 @@ MCSection *HexagonTargetObjectFile::getExplicitSectionGlobal(
bool HexagonTargetObjectFile::isGlobalInSmallSection(const GlobalObject *GO,
const TargetMachine &TM) const {
// Only global variables, not functions.
- DEBUG(dbgs() << "Checking if value is in small-data, -G"
- << SmallDataThreshold << ": \"" << GO->getName() << "\": ");
+ LLVM_DEBUG(dbgs() << "Checking if value is in small-data, -G"
+ << SmallDataThreshold << ": \"" << GO->getName() << "\": ");
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GO);
if (!GVar) {
- DEBUG(dbgs() << "no, not a global variable\n");
+ LLVM_DEBUG(dbgs() << "no, not a global variable\n");
return false;
}
@@ -213,19 +213,19 @@ bool HexagonTargetObjectFile::isGlobalInSmallSection(const GlobalObject *GO,
// small data or not. This is how we can support mixing -G0/-G8 in LTO.
if (GVar->hasSection()) {
bool IsSmall = isSmallDataSection(GVar->getSection());
- DEBUG(dbgs() << (IsSmall ? "yes" : "no") << ", has section: "
- << GVar->getSection() << '\n');
+ LLVM_DEBUG(dbgs() << (IsSmall ? "yes" : "no")
+ << ", has section: " << GVar->getSection() << '\n');
return IsSmall;
}
if (GVar->isConstant()) {
- DEBUG(dbgs() << "no, is a constant\n");
+ LLVM_DEBUG(dbgs() << "no, is a constant\n");
return false;
}
bool IsLocal = GVar->hasLocalLinkage();
if (!StaticsInSData && IsLocal) {
- DEBUG(dbgs() << "no, is static\n");
+ LLVM_DEBUG(dbgs() << "no, is static\n");
return false;
}
@@ -234,7 +234,7 @@ bool HexagonTargetObjectFile::isGlobalInSmallSection(const GlobalObject *GO,
GType = PT->getElementType();
if (isa<ArrayType>(GType)) {
- DEBUG(dbgs() << "no, is an array\n");
+ LLVM_DEBUG(dbgs() << "no, is an array\n");
return false;
}
@@ -244,22 +244,22 @@ bool HexagonTargetObjectFile::isGlobalInSmallSection(const GlobalObject *GO,
// these objects end up in the sdata, the references will still be valid.
if (StructType *ST = dyn_cast<StructType>(GType)) {
if (ST->isOpaque()) {
- DEBUG(dbgs() << "no, has opaque type\n");
+ LLVM_DEBUG(dbgs() << "no, has opaque type\n");
return false;
}
}
unsigned Size = GVar->getParent()->getDataLayout().getTypeAllocSize(GType);
if (Size == 0) {
- DEBUG(dbgs() << "no, has size 0\n");
+ LLVM_DEBUG(dbgs() << "no, has size 0\n");
return false;
}
if (Size > SmallDataThreshold) {
- DEBUG(dbgs() << "no, size exceeds sdata threshold: " << Size << '\n');
+ LLVM_DEBUG(dbgs() << "no, size exceeds sdata threshold: " << Size << '\n');
return false;
}
- DEBUG(dbgs() << "yes\n");
+ LLVM_DEBUG(dbgs() << "yes\n");
return true;
}
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 2ae48e3d68fa..e44c7237a7e4 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -375,7 +375,7 @@ bool HexagonPacketizerList::promoteToDotCur(MachineInstr &MI,
void HexagonPacketizerList::cleanUpDotCur() {
MachineInstr *MI = nullptr;
for (auto BI : CurrentPacketMIs) {
- DEBUG(dbgs() << "Cleanup packet has "; BI->dump(););
+ LLVM_DEBUG(dbgs() << "Cleanup packet has "; BI->dump(););
if (HII->isDotCurInst(*BI)) {
MI = BI;
continue;
@@ -390,7 +390,7 @@ void HexagonPacketizerList::cleanUpDotCur() {
return;
// We did not find a use of the CUR, so de-cur it.
MI->setDesc(HII->get(HII->getNonDotCurOp(*MI)));
- DEBUG(dbgs() << "Demoted CUR "; MI->dump(););
+ LLVM_DEBUG(dbgs() << "Demoted CUR "; MI->dump(););
}
// Check to see if an instruction can be dot cur.
@@ -414,11 +414,10 @@ bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr &MI,
return false;
// Make sure candidate instruction uses cur.
- DEBUG(dbgs() << "Can we DOT Cur Vector MI\n";
- MI.dump();
- dbgs() << "in packet\n";);
+ LLVM_DEBUG(dbgs() << "Can we DOT Cur Vector MI\n"; MI.dump();
+ dbgs() << "in packet\n";);
MachineInstr &MJ = *MII;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Checking CUR against ";
MJ.dump();
});
@@ -433,12 +432,12 @@ bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr &MI,
// Check for existing uses of a vector register within the packet which
// would be affected by converting a vector load into .cur formt.
for (auto BI : CurrentPacketMIs) {
- DEBUG(dbgs() << "packet has "; BI->dump(););
+ LLVM_DEBUG(dbgs() << "packet has "; BI->dump(););
if (BI->readsRegister(DepReg, MF.getSubtarget().getRegisterInfo()))
return false;
}
- DEBUG(dbgs() << "Can Dot CUR MI\n"; MI.dump(););
+ LLVM_DEBUG(dbgs() << "Can Dot CUR MI\n"; MI.dump(););
// We can convert the opcode into a .cur.
return true;
}
@@ -1762,7 +1761,7 @@ void HexagonPacketizerList::endPacket(MachineBasicBlock *MBB,
bool memShufDisabled = getmemShufDisabled();
if (memShufDisabled && !foundLSInPacket()) {
setmemShufDisabled(false);
- DEBUG(dbgs() << " Not added to NoShufPacket\n");
+ LLVM_DEBUG(dbgs() << " Not added to NoShufPacket\n");
}
memShufDisabled = getmemShufDisabled();
@@ -1781,7 +1780,7 @@ void HexagonPacketizerList::endPacket(MachineBasicBlock *MBB,
CurrentPacketMIs.clear();
ResourceTracker->clearResources();
- DEBUG(dbgs() << "End packet\n");
+ LLVM_DEBUG(dbgs() << "End packet\n");
}
bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr &MI) {
diff --git a/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp b/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
index e94c24837511..9d1073346c72 100644
--- a/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
+++ b/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
@@ -364,17 +364,18 @@ bool HexagonVectorLoopCarriedReuse::canReplace(Instruction *I) {
if (II &&
(II->getIntrinsicID() == Intrinsic::hexagon_V6_hi ||
II->getIntrinsicID() == Intrinsic::hexagon_V6_lo)) {
- DEBUG(dbgs() << "Not considering for reuse: " << *II << "\n");
+ LLVM_DEBUG(dbgs() << "Not considering for reuse: " << *II << "\n");
return false;
}
return true;
}
void HexagonVectorLoopCarriedReuse::findValueToReuse() {
for (auto *D : Dependences) {
- DEBUG(dbgs() << "Processing dependence " << *(D->front()) << "\n");
+ LLVM_DEBUG(dbgs() << "Processing dependence " << *(D->front()) << "\n");
if (D->iterations() > HexagonVLCRIterationLim) {
- DEBUG(dbgs() <<
- ".. Skipping because number of iterations > than the limit\n");
+ LLVM_DEBUG(
+ dbgs()
+ << ".. Skipping because number of iterations > than the limit\n");
continue;
}
@@ -382,7 +383,8 @@ void HexagonVectorLoopCarriedReuse::findValueToReuse() {
Instruction *BEInst = D->back();
int Iters = D->iterations();
BasicBlock *BB = PN->getParent();
- DEBUG(dbgs() << "Checking if any uses of " << *PN << " can be reused\n");
+ LLVM_DEBUG(dbgs() << "Checking if any uses of " << *PN
+ << " can be reused\n");
SmallVector<Instruction *, 4> PNUsers;
for (auto UI = PN->use_begin(), E = PN->use_end(); UI != E; ++UI) {
@@ -392,7 +394,8 @@ void HexagonVectorLoopCarriedReuse::findValueToReuse() {
if (User->getParent() != BB)
continue;
if (ReplacedInsts.count(User)) {
- DEBUG(dbgs() << *User << " has already been replaced. Skipping...\n");
+ LLVM_DEBUG(dbgs() << *User
+ << " has already been replaced. Skipping...\n");
continue;
}
if (isa<PHINode>(User))
@@ -404,7 +407,7 @@ void HexagonVectorLoopCarriedReuse::findValueToReuse() {
PNUsers.push_back(User);
}
- DEBUG(dbgs() << PNUsers.size() << " use(s) of the PHI in the block\n");
+ LLVM_DEBUG(dbgs() << PNUsers.size() << " use(s) of the PHI in the block\n");
// For each interesting use I of PN, find an Instruction BEUser that
// performs the same operation as I on BEInst and whose other operands,
@@ -440,7 +443,7 @@ void HexagonVectorLoopCarriedReuse::findValueToReuse() {
}
}
if (BEUser) {
- DEBUG(dbgs() << "Found Value for reuse.\n");
+ LLVM_DEBUG(dbgs() << "Found Value for reuse.\n");
ReuseCandidate.Inst2Replace = I;
ReuseCandidate.BackedgeInst = BEUser;
return;
@@ -461,7 +464,7 @@ Value *HexagonVectorLoopCarriedReuse::findValueInBlock(Value *Op,
}
void HexagonVectorLoopCarriedReuse::reuseValue() {
- DEBUG(dbgs() << ReuseCandidate);
+ LLVM_DEBUG(dbgs() << ReuseCandidate);
Instruction *Inst2Replace = ReuseCandidate.Inst2Replace;
Instruction *BEInst = ReuseCandidate.BackedgeInst;
int NumOperands = Inst2Replace->getNumOperands();
@@ -486,7 +489,7 @@ void HexagonVectorLoopCarriedReuse::reuseValue() {
}
}
- DEBUG(dbgs() << "reuseValue is making the following changes\n");
+ LLVM_DEBUG(dbgs() << "reuseValue is making the following changes\n");
SmallVector<Instruction *, 4> InstsInPreheader;
for (int i = 0; i < Iterations; ++i) {
@@ -507,8 +510,8 @@ void HexagonVectorLoopCarriedReuse::reuseValue() {
InstsInPreheader.push_back(InstInPreheader);
InstInPreheader->setName(Inst2Replace->getName() + ".hexagon.vlcr");
InstInPreheader->insertBefore(LoopPH->getTerminator());
- DEBUG(dbgs() << "Added " << *InstInPreheader << " to " << LoopPH->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "Added " << *InstInPreheader << " to "
+ << LoopPH->getName() << "\n");
}
BasicBlock *BB = BEInst->getParent();
IRBuilder<> IRB(BB);
@@ -520,7 +523,8 @@ void HexagonVectorLoopCarriedReuse::reuseValue() {
NewPhi = IRB.CreatePHI(InstInPreheader->getType(), 2);
NewPhi->addIncoming(InstInPreheader, LoopPH);
NewPhi->addIncoming(BEVal, BB);
- DEBUG(dbgs() << "Adding " << *NewPhi << " to " << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Adding " << *NewPhi << " to " << BB->getName()
+ << "\n");
BEVal = NewPhi;
}
// We are in LCSSA form. So, a value defined inside the Loop is used only
@@ -539,7 +543,7 @@ bool HexagonVectorLoopCarriedReuse::doVLCR() {
bool Changed = false;
bool Continue;
- DEBUG(dbgs() << "Working on Loop: " << *CurLoop->getHeader() << "\n");
+ LLVM_DEBUG(dbgs() << "Working on Loop: " << *CurLoop->getHeader() << "\n");
do {
// Reset datastructures.
Dependences.clear();
@@ -626,10 +630,9 @@ void HexagonVectorLoopCarriedReuse::findLoopCarriedDeps() {
else
delete D;
}
- DEBUG(dbgs() << "Found " << Dependences.size() << " dependences\n");
- DEBUG(for (size_t i = 0; i < Dependences.size(); ++i) {
- dbgs() << *Dependences[i] << "\n";
- });
+ LLVM_DEBUG(dbgs() << "Found " << Dependences.size() << " dependences\n");
+ LLVM_DEBUG(for (size_t i = 0; i < Dependences.size();
+ ++i) { dbgs() << *Dependences[i] << "\n"; });
}
Pass *llvm::createHexagonVectorLoopCarriedReusePass() {
diff --git a/lib/Target/Hexagon/HexagonVectorPrint.cpp b/lib/Target/Hexagon/HexagonVectorPrint.cpp
index ddd668b2cb1e..18d2f2f4acde 100644
--- a/lib/Target/Hexagon/HexagonVectorPrint.cpp
+++ b/lib/Target/Hexagon/HexagonVectorPrint.cpp
@@ -144,14 +144,15 @@ bool HexagonVectorPrint::runOnMachineFunction(MachineFunction &Fn) {
unsigned Reg = 0;
if (getInstrVecReg(*MII, Reg)) {
VecPrintList.push_back((&*MII));
- DEBUG(dbgs() << "Found vector reg inside bundle \n"; MII->dump());
+ LLVM_DEBUG(dbgs() << "Found vector reg inside bundle \n";
+ MII->dump());
}
}
} else {
unsigned Reg = 0;
if (getInstrVecReg(MI, Reg)) {
VecPrintList.push_back(&MI);
- DEBUG(dbgs() << "Found vector reg \n"; MI.dump());
+ LLVM_DEBUG(dbgs() << "Found vector reg \n"; MI.dump());
}
}
}
@@ -163,33 +164,33 @@ bool HexagonVectorPrint::runOnMachineFunction(MachineFunction &Fn) {
for (auto *I : VecPrintList) {
DebugLoc DL = I->getDebugLoc();
MachineBasicBlock *MBB = I->getParent();
- DEBUG(dbgs() << "Evaluating V MI\n"; I->dump());
+ LLVM_DEBUG(dbgs() << "Evaluating V MI\n"; I->dump());
unsigned Reg = 0;
if (!getInstrVecReg(*I, Reg))
llvm_unreachable("Need a vector reg");
MachineBasicBlock::instr_iterator MII = I->getIterator();
if (I->isInsideBundle()) {
- DEBUG(dbgs() << "add to end of bundle\n"; I->dump());
+ LLVM_DEBUG(dbgs() << "add to end of bundle\n"; I->dump());
while (MBB->instr_end() != MII && MII->isInsideBundle())
MII++;
} else {
- DEBUG(dbgs() << "add after instruction\n"; I->dump());
+ LLVM_DEBUG(dbgs() << "add after instruction\n"; I->dump());
MII++;
}
if (MBB->instr_end() == MII)
continue;
if (Reg >= Hexagon::V0 && Reg <= Hexagon::V31) {
- DEBUG(dbgs() << "adding dump for V" << Reg-Hexagon::V0 << '\n');
+ LLVM_DEBUG(dbgs() << "adding dump for V" << Reg - Hexagon::V0 << '\n');
addAsmInstr(MBB, Reg, MII, DL, QII, Fn);
} else if (Reg >= Hexagon::W0 && Reg <= Hexagon::W15) {
- DEBUG(dbgs() << "adding dump for W" << Reg-Hexagon::W0 << '\n');
+ LLVM_DEBUG(dbgs() << "adding dump for W" << Reg - Hexagon::W0 << '\n');
addAsmInstr(MBB, Hexagon::V0 + (Reg - Hexagon::W0) * 2 + 1,
MII, DL, QII, Fn);
addAsmInstr(MBB, Hexagon::V0 + (Reg - Hexagon::W0) * 2,
MII, DL, QII, Fn);
} else if (Reg >= Hexagon::Q0 && Reg <= Hexagon::Q3) {
- DEBUG(dbgs() << "adding dump for Q" << Reg-Hexagon::Q0 << '\n');
+ LLVM_DEBUG(dbgs() << "adding dump for Q" << Reg - Hexagon::Q0 << '\n');
addAsmInstr(MBB, Reg, MII, DL, QII, Fn);
} else
llvm_unreachable("Bad Vector reg");
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index fe54c19370b3..584f35e0051a 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -510,17 +510,15 @@ public:
break;
}
- DEBUG(dbgs() << "Name=" << getFixupKindInfo(Kind).Name << "(" <<
- (unsigned)Kind << ")\n");
- DEBUG(uint32_t OldData = 0;
- for (unsigned i = 0; i < NumBytes; i++)
- OldData |= (InstAddr[i] << (i * 8)) & (0xff << (i * 8));
- dbgs() << "\tBValue=0x"; dbgs().write_hex(Value) <<
- ": AValue=0x"; dbgs().write_hex(FixupValue) <<
- ": Offset=" << Offset <<
- ": Size=" << Data.size() <<
- ": OInst=0x"; dbgs().write_hex(OldData) <<
- ": Reloc=0x"; dbgs().write_hex(Reloc););
+ LLVM_DEBUG(dbgs() << "Name=" << getFixupKindInfo(Kind).Name << "("
+ << (unsigned)Kind << ")\n");
+ LLVM_DEBUG(
+ uint32_t OldData = 0; for (unsigned i = 0; i < NumBytes; i++) OldData |=
+ (InstAddr[i] << (i * 8)) & (0xff << (i * 8));
+ dbgs() << "\tBValue=0x"; dbgs().write_hex(Value) << ": AValue=0x";
+ dbgs().write_hex(FixupValue)
+ << ": Offset=" << Offset << ": Size=" << Data.size() << ": OInst=0x";
+ dbgs().write_hex(OldData) << ": Reloc=0x"; dbgs().write_hex(Reloc););
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value. The Value has been "split up" into the
@@ -530,10 +528,10 @@ public:
InstAddr[i] |= uint8_t(Reloc >> (i * 8)) & 0xff; // Apply new reloc
}
- DEBUG(uint32_t NewData = 0;
- for (unsigned i = 0; i < NumBytes; i++)
- NewData |= (InstAddr[i] << (i * 8)) & (0xff << (i * 8));
- dbgs() << ": NInst=0x"; dbgs().write_hex(NewData) << "\n";);
+ LLVM_DEBUG(uint32_t NewData = 0;
+ for (unsigned i = 0; i < NumBytes; i++) NewData |=
+ (InstAddr[i] << (i * 8)) & (0xff << (i * 8));
+ dbgs() << ": NInst=0x"; dbgs().write_hex(NewData) << "\n";);
}
bool isInstRelaxable(MCInst const &HMI) const {
@@ -689,8 +687,9 @@ public:
ParseEnd = 0x0000c000; // End of packet parse-bits.
while(Count % HEXAGON_INSTR_SIZE) {
- DEBUG(dbgs() << "Alignment not a multiple of the instruction size:" <<
- Count % HEXAGON_INSTR_SIZE << "/" << HEXAGON_INSTR_SIZE << "\n");
+ LLVM_DEBUG(dbgs() << "Alignment not a multiple of the instruction size:"
+ << Count % HEXAGON_INSTR_SIZE << "/"
+ << HEXAGON_INSTR_SIZE << "\n");
--Count;
OW->write8(0);
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index 91500d909a1a..555386b84645 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -372,7 +372,7 @@ void HexagonMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
MCInst &HMB = const_cast<MCInst &>(MI);
assert(HexagonMCInstrInfo::isBundle(HMB));
- DEBUG(dbgs() << "Encoding bundle\n";);
+ LLVM_DEBUG(dbgs() << "Encoding bundle\n";);
State.Addend = 0;
State.Extended = false;
State.Bundle = &MI;
@@ -415,8 +415,8 @@ void HexagonMCCodeEmitter::EncodeSingleInstruction(const MCInst &MI,
// in the first place!
assert(!HexagonMCInstrInfo::getDesc(MCII, MI).isPseudo() &&
"pseudo-instruction found");
- DEBUG(dbgs() << "Encoding insn `"
- << HexagonMCInstrInfo::getName(MCII, MI) << "'\n");
+ LLVM_DEBUG(dbgs() << "Encoding insn `"
+ << HexagonMCInstrInfo::getName(MCII, MI) << "'\n");
Binary = getBinaryCodeForInstr(MI, Fixups, STI);
unsigned Opc = MI.getOpcode();
@@ -424,8 +424,8 @@ void HexagonMCCodeEmitter::EncodeSingleInstruction(const MCInst &MI,
// Check for unimplemented instructions. Immediate extenders
// are encoded as zero, so they need to be accounted for.
if (!Binary && Opc != DuplexIClass0 && Opc != A4_ext) {
- DEBUG(dbgs() << "Unimplemented inst `"
- << HexagonMCInstrInfo::getName(MCII, MI) << "'\n");
+ LLVM_DEBUG(dbgs() << "Unimplemented inst `"
+ << HexagonMCInstrInfo::getName(MCII, MI) << "'\n");
llvm_unreachable("Unimplemented Instruction");
}
Binary |= Parse;
@@ -630,13 +630,12 @@ unsigned HexagonMCCodeEmitter::getExprOpValue(const MCInst &MI,
unsigned Opc = MCID.getOpcode();
unsigned IType = HexagonMCInstrInfo::getType(MCII, MI);
- DEBUG(dbgs() << "----------------------------------------\n"
- << "Opcode Name: " << HexagonMCInstrInfo::getName(MCII, MI)
- << "\nOpcode: " << Opc
- << "\nRelocation bits: " << FixupWidth
- << "\nAddend: " << State.Addend
- << "\nVariant: " << unsigned(VarKind)
- << "\n----------------------------------------\n");
+ LLVM_DEBUG(dbgs() << "----------------------------------------\n"
+ << "Opcode Name: " << HexagonMCInstrInfo::getName(MCII, MI)
+ << "\nOpcode: " << Opc << "\nRelocation bits: "
+ << FixupWidth << "\nAddend: " << State.Addend
+ << "\nVariant: " << unsigned(VarKind)
+ << "\n----------------------------------------\n");
// Pick the applicable fixup kind for the symbol.
// Handle special cases first, the rest will be looked up in the tables.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
index 127c97e342dc..3eaef9ac7410 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
@@ -205,7 +205,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
switch (L.getOpcode()) {
default:
- DEBUG(dbgs() << "Possible compound ignored\n");
+ LLVM_DEBUG(dbgs() << "Possible compound ignored\n");
return CompoundInsn;
case Hexagon::A2_tfrsi:
@@ -233,7 +233,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
break;
case Hexagon::C2_cmpeq:
- DEBUG(dbgs() << "CX: C2_cmpeq\n");
+ LLVM_DEBUG(dbgs() << "CX: C2_cmpeq\n");
Rs = L.getOperand(1);
Rt = L.getOperand(2);
@@ -246,7 +246,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
break;
case Hexagon::C2_cmpgt:
- DEBUG(dbgs() << "CX: C2_cmpgt\n");
+ LLVM_DEBUG(dbgs() << "CX: C2_cmpgt\n");
Rs = L.getOperand(1);
Rt = L.getOperand(2);
@@ -259,7 +259,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
break;
case Hexagon::C2_cmpgtu:
- DEBUG(dbgs() << "CX: C2_cmpgtu\n");
+ LLVM_DEBUG(dbgs() << "CX: C2_cmpgtu\n");
Rs = L.getOperand(1);
Rt = L.getOperand(2);
@@ -272,7 +272,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
break;
case Hexagon::C2_cmpeqi:
- DEBUG(dbgs() << "CX: C2_cmpeqi\n");
+ LLVM_DEBUG(dbgs() << "CX: C2_cmpeqi\n");
Success = L.getOperand(2).getExpr()->evaluateAsAbsolute(Value);
(void)Success;
assert(Success);
@@ -290,7 +290,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
break;
case Hexagon::C2_cmpgti:
- DEBUG(dbgs() << "CX: C2_cmpgti\n");
+ LLVM_DEBUG(dbgs() << "CX: C2_cmpgti\n");
Success = L.getOperand(2).getExpr()->evaluateAsAbsolute(Value);
(void)Success;
assert(Success);
@@ -308,7 +308,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
break;
case Hexagon::C2_cmpgtui:
- DEBUG(dbgs() << "CX: C2_cmpgtui\n");
+ LLVM_DEBUG(dbgs() << "CX: C2_cmpgtui\n");
Rs = L.getOperand(1);
compoundOpcode = cmpgtuiBitOpcode[getCompoundOp(R)];
CompoundInsn = new (Context) MCInst;
@@ -319,7 +319,7 @@ static MCInst *getCompoundInsn(MCContext &Context, MCInst const &L,
break;
case Hexagon::S2_tstbit_i:
- DEBUG(dbgs() << "CX: S2_tstbit_i\n");
+ LLVM_DEBUG(dbgs() << "CX: S2_tstbit_i\n");
Rs = L.getOperand(1);
compoundOpcode = tstBitOpcode[getCompoundOp(R)];
CompoundInsn = new (Context) MCInst;
@@ -372,14 +372,14 @@ static bool lookForCompound(MCInstrInfo const &MCII, MCContext &Context,
BExtended = true;
continue;
}
- DEBUG(dbgs() << "J,B: " << JumpInst->getOpcode() << ","
- << Inst->getOpcode() << "\n");
+ LLVM_DEBUG(dbgs() << "J,B: " << JumpInst->getOpcode() << ","
+ << Inst->getOpcode() << "\n");
if (isOrderedCompoundPair(*Inst, BExtended, *JumpInst, JExtended)) {
MCInst *CompoundInsn = getCompoundInsn(Context, *Inst, *JumpInst);
if (CompoundInsn) {
- DEBUG(dbgs() << "B: " << Inst->getOpcode() << ","
- << JumpInst->getOpcode() << " Compounds to "
- << CompoundInsn->getOpcode() << "\n");
+ LLVM_DEBUG(dbgs() << "B: " << Inst->getOpcode() << ","
+ << JumpInst->getOpcode() << " Compounds to "
+ << CompoundInsn->getOpcode() << "\n");
J->setInst(CompoundInsn);
MCI.erase(B);
return true;
@@ -422,7 +422,7 @@ void HexagonMCInstrInfo::tryCompound(MCInstrInfo const &MCII, MCSubtargetInfo co
if (StartedValid &&
!llvm::HexagonMCShuffle(Context, false, MCII, STI, MCI)) {
- DEBUG(dbgs() << "Found ERROR\n");
+ LLVM_DEBUG(dbgs() << "Found ERROR\n");
MCI = OriginalBundle;
}
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
index fd064416fd96..b208a3668124 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
@@ -1045,8 +1045,8 @@ HexagonMCInstrInfo::getDuplexPossibilties(MCInstrInfo const &MCII,
bool bisReversable = true;
if (isStoreInst(MCB.getOperand(j).getInst()->getOpcode()) &&
isStoreInst(MCB.getOperand(k).getInst()->getOpcode())) {
- DEBUG(dbgs() << "skip out of order write pair: " << k << "," << j
- << "\n");
+ LLVM_DEBUG(dbgs() << "skip out of order write pair: " << k << "," << j
+ << "\n");
bisReversable = false;
}
if (HexagonMCInstrInfo::isMemReorderDisabled(MCB)) // }:mem_noshuf
@@ -1066,14 +1066,14 @@ HexagonMCInstrInfo::getDuplexPossibilties(MCInstrInfo const &MCII,
// Save off pairs for duplex checking.
duplexToTry.push_back(DuplexCandidate(j, k, iClass));
- DEBUG(dbgs() << "adding pair: " << j << "," << k << ":"
- << MCB.getOperand(j).getInst()->getOpcode() << ","
- << MCB.getOperand(k).getInst()->getOpcode() << "\n");
+ LLVM_DEBUG(dbgs() << "adding pair: " << j << "," << k << ":"
+ << MCB.getOperand(j).getInst()->getOpcode() << ","
+ << MCB.getOperand(k).getInst()->getOpcode() << "\n");
continue;
} else {
- DEBUG(dbgs() << "skipping pair: " << j << "," << k << ":"
- << MCB.getOperand(j).getInst()->getOpcode() << ","
- << MCB.getOperand(k).getInst()->getOpcode() << "\n");
+ LLVM_DEBUG(dbgs() << "skipping pair: " << j << "," << k << ":"
+ << MCB.getOperand(j).getInst()->getOpcode() << ","
+ << MCB.getOperand(k).getInst()->getOpcode() << "\n");
}
// Try reverse.
@@ -1091,13 +1091,15 @@ HexagonMCInstrInfo::getDuplexPossibilties(MCInstrInfo const &MCII,
// Save off pairs for duplex checking.
duplexToTry.push_back(DuplexCandidate(k, j, iClass));
- DEBUG(dbgs() << "adding pair:" << k << "," << j << ":"
- << MCB.getOperand(j).getInst()->getOpcode() << ","
- << MCB.getOperand(k).getInst()->getOpcode() << "\n");
+ LLVM_DEBUG(dbgs()
+ << "adding pair:" << k << "," << j << ":"
+ << MCB.getOperand(j).getInst()->getOpcode() << ","
+ << MCB.getOperand(k).getInst()->getOpcode() << "\n");
} else {
- DEBUG(dbgs() << "skipping pair: " << k << "," << j << ":"
- << MCB.getOperand(j).getInst()->getOpcode() << ","
- << MCB.getOperand(k).getInst()->getOpcode() << "\n");
+ LLVM_DEBUG(dbgs()
+ << "skipping pair: " << k << "," << j << ":"
+ << MCB.getOperand(j).getInst()->getOpcode() << ","
+ << MCB.getOperand(k).getInst()->getOpcode() << "\n");
}
}
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
index 7bd54fdfa3d5..4281144acaee 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
@@ -38,7 +38,8 @@ void HexagonMCShuffler::init(MCInst &MCB) {
// Copy the bundle for the shuffling.
for (const auto &I : HexagonMCInstrInfo::bundleInstructions(MCB)) {
MCInst &MI = *const_cast<MCInst *>(I.getInst());
- DEBUG(dbgs() << "Shuffling: " << MCII.getName(MI.getOpcode()) << '\n');
+ LLVM_DEBUG(dbgs() << "Shuffling: " << MCII.getName(MI.getOpcode())
+ << '\n');
assert(!HexagonMCInstrInfo::getDesc(MCII, MI).isPseudo());
if (!HexagonMCInstrInfo::isImmext(MI)) {
@@ -98,7 +99,7 @@ bool HexagonMCShuffler::reshuffleTo(MCInst &MCB) {
copyTo(MCB);
return true;
}
- DEBUG(MCB.dump());
+ LLVM_DEBUG(MCB.dump());
return false;
}
@@ -119,10 +120,10 @@ bool llvm::HexagonMCShuffle(MCContext &Context, bool Fatal,
// * %d7 = IMPLICIT_DEF; flags:
// After the IMPLICIT_DEFs were removed by the asm printer, the bundle
// became empty.
- DEBUG(dbgs() << "Skipping empty bundle");
+ LLVM_DEBUG(dbgs() << "Skipping empty bundle");
return false;
} else if (!HexagonMCInstrInfo::isBundle(MCB)) {
- DEBUG(dbgs() << "Skipping stand-alone insn");
+ LLVM_DEBUG(dbgs() << "Skipping stand-alone insn");
return false;
}
@@ -144,10 +145,10 @@ llvm::HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII,
// * %d7 = IMPLICIT_DEF; flags:
// After the IMPLICIT_DEFs were removed by the asm printer, the bundle
// became empty.
- DEBUG(dbgs() << "Skipping empty bundle");
+ LLVM_DEBUG(dbgs() << "Skipping empty bundle");
return false;
} else if (!HexagonMCInstrInfo::isBundle(MCB)) {
- DEBUG(dbgs() << "Skipping stand-alone insn");
+ LLVM_DEBUG(dbgs() << "Skipping stand-alone insn");
return false;
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index 7709a0f61624..59f3caa6af94 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -641,14 +641,14 @@ bool HexagonShuffler::shuffle() {
}
for (iterator ISJ = begin(); ISJ != end(); ++ISJ)
- DEBUG(dbgs().write_hex(ISJ->Core.getUnits()); if (ISJ->CVI.isValid()) {
+ LLVM_DEBUG(dbgs().write_hex(ISJ->Core.getUnits()); if (ISJ->CVI.isValid()) {
dbgs() << '/';
dbgs().write_hex(ISJ->CVI.getUnits()) << '|';
dbgs() << ISJ->CVI.getLanes();
} dbgs() << ':'
<< HexagonMCInstrInfo::getDesc(MCII, ISJ->getDesc()).getOpcode();
- dbgs() << '\n');
- DEBUG(dbgs() << '\n');
+ dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
return Ok;
}
diff --git a/lib/Target/Lanai/LanaiISelDAGToDAG.cpp b/lib/Target/Lanai/LanaiISelDAGToDAG.cpp
index 19dcc7d00e33..5081cfbe4922 100644
--- a/lib/Target/Lanai/LanaiISelDAGToDAG.cpp
+++ b/lib/Target/Lanai/LanaiISelDAGToDAG.cpp
@@ -275,7 +275,7 @@ void LanaiDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
return;
}
diff --git a/lib/Target/Lanai/LanaiISelLowering.cpp b/lib/Target/Lanai/LanaiISelLowering.cpp
index 33455137acc9..22fcf473d0c9 100644
--- a/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -484,8 +484,8 @@ SDValue LanaiTargetLowering::LowerCCCArguments(
break;
}
default:
- DEBUG(dbgs() << "LowerFormalArguments Unhandled argument type: "
- << RegVT.getEVTString() << "\n");
+ LLVM_DEBUG(dbgs() << "LowerFormalArguments Unhandled argument type: "
+ << RegVT.getEVTString() << "\n");
llvm_unreachable("unhandled argument type");
}
} else {
diff --git a/lib/Target/MSP430/MSP430BranchSelector.cpp b/lib/Target/MSP430/MSP430BranchSelector.cpp
index 87c320aa76aa..2b3495405545 100644
--- a/lib/Target/MSP430/MSP430BranchSelector.cpp
+++ b/lib/Target/MSP430/MSP430BranchSelector.cpp
@@ -138,15 +138,15 @@ bool MSP430BSel::expandBranches(OffsetVector &BlockOffsets) {
continue;
}
- DEBUG(dbgs() << " Found a branch that needs expanding, "
- << printMBBReference(*DestBB) << ", Distance "
- << BranchDistance << "\n");
+ LLVM_DEBUG(dbgs() << " Found a branch that needs expanding, "
+ << printMBBReference(*DestBB) << ", Distance "
+ << BranchDistance << "\n");
// If JCC is not the last instruction we need to split the MBB.
if (MI->getOpcode() == MSP430::JCC && std::next(MI) != EE) {
- DEBUG(dbgs() << " Found a basic block that needs to be split, "
- << printMBBReference(*MBB) << "\n");
+ LLVM_DEBUG(dbgs() << " Found a basic block that needs to be split, "
+ << printMBBReference(*MBB) << "\n");
// Create a new basic block.
MachineBasicBlock *NewBB =
@@ -229,7 +229,7 @@ bool MSP430BSel::runOnMachineFunction(MachineFunction &mf) {
if (!BranchSelectEnabled)
return false;
- DEBUG(dbgs() << "\n********** " << getPassName() << " **********\n");
+ LLVM_DEBUG(dbgs() << "\n********** " << getPassName() << " **********\n");
// BlockOffsets - Contains the distance from the beginning of the function to
// the beginning of each basic block.
diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 05b3bf70eef9..005f5f44a635 100644
--- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -180,7 +180,7 @@ bool MSP430DAGToDAGISel::MatchAddressBase(SDValue N, MSP430ISelAddressMode &AM)
}
bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) {
- DEBUG(errs() << "MatchAddress: "; AM.dump());
+ LLVM_DEBUG(errs() << "MatchAddress: "; AM.dump());
switch (N.getOpcode()) {
default: break;
@@ -384,9 +384,7 @@ void MSP430DAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== ";
- Node->dump(CurDAG);
- errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
return;
}
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index b3cfc869f294..4e28d910ecb1 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -1496,7 +1496,7 @@ public:
static std::unique_ptr<MipsOperand>
createNumericReg(unsigned Index, StringRef Str, const MCRegisterInfo *RegInfo,
SMLoc S, SMLoc E, MipsAsmParser &Parser) {
- DEBUG(dbgs() << "createNumericReg(" << Index << ", ...)\n");
+ LLVM_DEBUG(dbgs() << "createNumericReg(" << Index << ", ...)\n");
return CreateReg(Index, Str, RegKind_Numeric, RegInfo, S, E, Parser);
}
@@ -5698,7 +5698,7 @@ unsigned MipsAsmParser::getReg(int RC, int RegNo) {
bool MipsAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
MCAsmParser &Parser = getParser();
- DEBUG(dbgs() << "parseOperand\n");
+ LLVM_DEBUG(dbgs() << "parseOperand\n");
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
@@ -5711,7 +5711,7 @@ bool MipsAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
if (ResTy == MatchOperand_ParseFail)
return true;
- DEBUG(dbgs() << ".. Generic Parser\n");
+ LLVM_DEBUG(dbgs() << ".. Generic Parser\n");
switch (getLexer().getKind()) {
case AsmToken::Dollar: {
@@ -5741,7 +5741,7 @@ bool MipsAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
return false;
}
default: {
- DEBUG(dbgs() << ".. generic integer expression\n");
+ LLVM_DEBUG(dbgs() << ".. generic integer expression\n");
const MCExpr *Expr;
SMLoc S = Parser.getTok().getLoc(); // Start location of the operand.
@@ -5814,7 +5814,7 @@ bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) {
OperandMatchResultTy
MipsAsmParser::parseMemOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
- DEBUG(dbgs() << "parseMemOperand\n");
+ LLVM_DEBUG(dbgs() << "parseMemOperand\n");
const MCExpr *IdVal = nullptr;
SMLoc S;
bool isParenExpr = false;
@@ -6044,13 +6044,13 @@ MipsAsmParser::matchAnyRegisterWithoutDollar(OperandVector &Operands, SMLoc S) {
auto Token = Parser.getLexer().peekTok(false);
if (Token.is(AsmToken::Identifier)) {
- DEBUG(dbgs() << ".. identifier\n");
+ LLVM_DEBUG(dbgs() << ".. identifier\n");
StringRef Identifier = Token.getIdentifier();
OperandMatchResultTy ResTy =
matchAnyRegisterNameWithoutDollar(Operands, Identifier, S);
return ResTy;
} else if (Token.is(AsmToken::Integer)) {
- DEBUG(dbgs() << ".. integer\n");
+ LLVM_DEBUG(dbgs() << ".. integer\n");
int64_t RegNum = Token.getIntVal();
if (RegNum < 0 || RegNum > 31) {
// Show the error, but treat invalid register
@@ -6064,7 +6064,7 @@ MipsAsmParser::matchAnyRegisterWithoutDollar(OperandVector &Operands, SMLoc S) {
return MatchOperand_Success;
}
- DEBUG(dbgs() << Parser.getTok().getKind() << "\n");
+ LLVM_DEBUG(dbgs() << Parser.getTok().getKind() << "\n");
return MatchOperand_NoMatch;
}
@@ -6072,22 +6072,22 @@ MipsAsmParser::matchAnyRegisterWithoutDollar(OperandVector &Operands, SMLoc S) {
OperandMatchResultTy
MipsAsmParser::parseAnyRegister(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
- DEBUG(dbgs() << "parseAnyRegister\n");
+ LLVM_DEBUG(dbgs() << "parseAnyRegister\n");
auto Token = Parser.getTok();
SMLoc S = Token.getLoc();
if (Token.isNot(AsmToken::Dollar)) {
- DEBUG(dbgs() << ".. !$ -> try sym aliasing\n");
+ LLVM_DEBUG(dbgs() << ".. !$ -> try sym aliasing\n");
if (Token.is(AsmToken::Identifier)) {
if (searchSymbolAlias(Operands))
return MatchOperand_Success;
}
- DEBUG(dbgs() << ".. !symalias -> NoMatch\n");
+ LLVM_DEBUG(dbgs() << ".. !symalias -> NoMatch\n");
return MatchOperand_NoMatch;
}
- DEBUG(dbgs() << ".. $\n");
+ LLVM_DEBUG(dbgs() << ".. $\n");
OperandMatchResultTy ResTy = matchAnyRegisterWithoutDollar(Operands, S);
if (ResTy == MatchOperand_Success) {
@@ -6100,7 +6100,7 @@ MipsAsmParser::parseAnyRegister(OperandVector &Operands) {
OperandMatchResultTy
MipsAsmParser::parseJumpTarget(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
- DEBUG(dbgs() << "parseJumpTarget\n");
+ LLVM_DEBUG(dbgs() << "parseJumpTarget\n");
SMLoc S = getLexer().getLoc();
@@ -6344,7 +6344,7 @@ bool MipsAsmParser::parseBracketSuffix(StringRef Name,
bool MipsAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) {
MCAsmParser &Parser = getParser();
- DEBUG(dbgs() << "ParseInstruction\n");
+ LLVM_DEBUG(dbgs() << "ParseInstruction\n");
// We have reached first instruction, module directive are now forbidden.
getTargetStreamer().forbidModuleDirective();
diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index f7d80b40c3f3..4cbfca4c6438 100644
--- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -1225,7 +1225,8 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
return MCDisassembler::Fail;
if (hasMips32r6()) {
- DEBUG(dbgs() << "Trying MicroMipsR616 table (16-bit instructions):\n");
+ LLVM_DEBUG(
+ dbgs() << "Trying MicroMipsR616 table (16-bit instructions):\n");
// Calling the auto-generated decoder function for microMIPS32R6
// 16-bit instructions.
Result = decodeInstruction(DecoderTableMicroMipsR616, Instr, Insn,
@@ -1236,7 +1237,7 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
}
- DEBUG(dbgs() << "Trying MicroMips16 table (16-bit instructions):\n");
+ LLVM_DEBUG(dbgs() << "Trying MicroMips16 table (16-bit instructions):\n");
// Calling the auto-generated decoder function for microMIPS 16-bit
// instructions.
Result = decodeInstruction(DecoderTableMicroMips16, Instr, Insn, Address,
@@ -1251,7 +1252,8 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
return MCDisassembler::Fail;
if (hasMips32r6()) {
- DEBUG(dbgs() << "Trying MicroMips32r632 table (32-bit instructions):\n");
+ LLVM_DEBUG(
+ dbgs() << "Trying MicroMips32r632 table (32-bit instructions):\n");
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableMicroMipsR632, Instr, Insn, Address,
this, STI);
@@ -1261,7 +1263,7 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
}
- DEBUG(dbgs() << "Trying MicroMips32 table (32-bit instructions):\n");
+ LLVM_DEBUG(dbgs() << "Trying MicroMips32 table (32-bit instructions):\n");
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableMicroMips32, Instr, Insn, Address,
this, STI);
@@ -1271,7 +1273,7 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (isFP64()) {
- DEBUG(dbgs() << "Trying MicroMipsFP64 table (32-bit opcodes):\n");
+ LLVM_DEBUG(dbgs() << "Trying MicroMipsFP64 table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMicroMipsFP6432, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail) {
@@ -1300,7 +1302,7 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
Size = 4;
if (hasCOP3()) {
- DEBUG(dbgs() << "Trying COP3_ table (32-bit opcodes):\n");
+ LLVM_DEBUG(dbgs() << "Trying COP3_ table (32-bit opcodes):\n");
Result =
decodeInstruction(DecoderTableCOP3_32, Instr, Insn, Address, this, STI);
if (Result != MCDisassembler::Fail)
@@ -1308,7 +1310,8 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (hasMips32r6() && isGP64()) {
- DEBUG(dbgs() << "Trying Mips32r6_64r6 (GPR64) table (32-bit opcodes):\n");
+ LLVM_DEBUG(
+ dbgs() << "Trying Mips32r6_64r6 (GPR64) table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMips32r6_64r6_GP6432, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail)
@@ -1316,7 +1319,8 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (hasMips32r6() && isPTR64()) {
- DEBUG(dbgs() << "Trying Mips32r6_64r6 (PTR64) table (32-bit opcodes):\n");
+ LLVM_DEBUG(
+ dbgs() << "Trying Mips32r6_64r6 (PTR64) table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMips32r6_64r6_PTR6432, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail)
@@ -1324,7 +1328,7 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (hasMips32r6()) {
- DEBUG(dbgs() << "Trying Mips32r6_64r6 table (32-bit opcodes):\n");
+ LLVM_DEBUG(dbgs() << "Trying Mips32r6_64r6 table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMips32r6_64r632, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail)
@@ -1332,7 +1336,8 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (hasMips2() && isPTR64()) {
- DEBUG(dbgs() << "Trying Mips32r6_64r6 (PTR64) table (32-bit opcodes):\n");
+ LLVM_DEBUG(
+ dbgs() << "Trying Mips32r6_64r6 (PTR64) table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMips32_64_PTR6432, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail)
@@ -1340,7 +1345,7 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (hasCnMips()) {
- DEBUG(dbgs() << "Trying CnMips table (32-bit opcodes):\n");
+ LLVM_DEBUG(dbgs() << "Trying CnMips table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableCnMips32, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail)
@@ -1348,7 +1353,7 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (isGP64()) {
- DEBUG(dbgs() << "Trying Mips64 (GPR64) table (32-bit opcodes):\n");
+ LLVM_DEBUG(dbgs() << "Trying Mips64 (GPR64) table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMips6432, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail)
@@ -1356,14 +1361,15 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
if (isFP64()) {
- DEBUG(dbgs() << "Trying MipsFP64 (64 bit FPU) table (32-bit opcodes):\n");
+ LLVM_DEBUG(
+ dbgs() << "Trying MipsFP64 (64 bit FPU) table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMipsFP6432, Instr, Insn,
Address, this, STI);
if (Result != MCDisassembler::Fail)
return Result;
}
- DEBUG(dbgs() << "Trying Mips table (32-bit opcodes):\n");
+ LLVM_DEBUG(dbgs() << "Trying Mips table (32-bit opcodes):\n");
// Calling the auto-generated decoder function.
Result =
decodeInstruction(DecoderTableMips32, Instr, Insn, Address, this, STI);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 94a196e0c00c..fbc64426c9d5 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -116,15 +116,15 @@ static InputIt find_best(InputIt First, InputIt Last, UnaryPredicate Predicate,
for (InputIt I = First; I != Last; ++I) {
unsigned Matched = Predicate(*I);
if (Matched != FindBest_NoMatch) {
- DEBUG(dbgs() << std::distance(First, I) << " is a match (";
- I->print(dbgs()); dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << std::distance(First, I) << " is a match (";
+ I->print(dbgs()); dbgs() << ")\n");
if (Best == Last || BetterThan(*I, *Best)) {
- DEBUG(dbgs() << ".. and it beats the last one\n");
+ LLVM_DEBUG(dbgs() << ".. and it beats the last one\n");
Best = I;
}
}
if (Matched == FindBest_PerfectMatch) {
- DEBUG(dbgs() << ".. and it is unbeatable\n");
+ LLVM_DEBUG(dbgs() << ".. and it is unbeatable\n");
break;
}
}
@@ -444,7 +444,7 @@ void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
std::list<MipsRelocationEntry> Sorted;
std::list<ELFRelocationEntry> Remainder;
- DEBUG(dumpRelocs("R: ", Relocs));
+ LLVM_DEBUG(dumpRelocs("R: ", Relocs));
// Separate the movable relocations (AHL relocations using the high bits) from
// the immobile relocations (everything else). This does not preserve high/low
@@ -455,7 +455,7 @@ void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
});
for (auto &R : Remainder) {
- DEBUG(dbgs() << "Matching: " << R << "\n");
+ LLVM_DEBUG(dbgs() << "Matching: " << R << "\n");
unsigned MatchingType = getMatchingLoType(R);
assert(MatchingType != ELF::R_MIPS_NONE &&
@@ -490,7 +490,7 @@ void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
Sorted.insert(InsertionPoint, R)->Matched = true;
}
- DEBUG(dumpRelocs("S: ", Sorted));
+ LLVM_DEBUG(dumpRelocs("S: ", Sorted));
assert(Relocs.size() == Sorted.size() && "Some relocs were not consumed");
diff --git a/lib/Target/Mips/MicroMipsSizeReduction.cpp b/lib/Target/Mips/MicroMipsSizeReduction.cpp
index f2e014084e46..301a6c7dcd6a 100644
--- a/lib/Target/Mips/MicroMipsSizeReduction.cpp
+++ b/lib/Target/Mips/MicroMipsSizeReduction.cpp
@@ -444,12 +444,12 @@ bool MicroMipsSizeReduce::ReplaceInstruction(MachineInstr *MI,
enum OperandTransfer OpTransfer = Entry.TransferOperands();
- DEBUG(dbgs() << "Converting 32-bit: " << *MI);
+ LLVM_DEBUG(dbgs() << "Converting 32-bit: " << *MI);
++NumReduced;
if (OpTransfer == OT_OperandsAll) {
MI->setDesc(MipsII->get(Entry.NarrowOpc()));
- DEBUG(dbgs() << " to 16-bit: " << *MI);
+ LLVM_DEBUG(dbgs() << " to 16-bit: " << *MI);
return true;
} else {
MachineBasicBlock &MBB = *MI->getParent();
@@ -484,7 +484,7 @@ bool MicroMipsSizeReduce::ReplaceInstruction(MachineInstr *MI,
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
- DEBUG(dbgs() << " to 16-bit: " << *MIB);
+ LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB);
MBB.erase_instr(MI);
return true;
}
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
index 682ea5c4ed7f..c310d9491af8 100644
--- a/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -482,11 +482,11 @@ static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
// remove the use-soft-float attribute
static void removeUseSoftFloat(Function &F) {
AttrBuilder B;
- DEBUG(errs() << "removing -use-soft-float\n");
+ LLVM_DEBUG(errs() << "removing -use-soft-float\n");
B.addAttribute("use-soft-float", "false");
F.removeAttributes(AttributeList::FunctionIndex, B);
if (F.hasFnAttribute("use-soft-float")) {
- DEBUG(errs() << "still has -use-soft-float\n");
+ LLVM_DEBUG(errs() << "still has -use-soft-float\n");
}
F.addAttributes(AttributeList::FunctionIndex, B);
}
@@ -510,7 +510,7 @@ static void removeUseSoftFloat(Function &F) {
bool Mips16HardFloat::runOnModule(Module &M) {
auto &TM = static_cast<const MipsTargetMachine &>(
getAnalysis<TargetPassConfig>().getTM<TargetMachine>());
- DEBUG(errs() << "Run on Module Mips16HardFloat\n");
+ LLVM_DEBUG(errs() << "Run on Module Mips16HardFloat\n");
bool Modified = false;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->hasFnAttribute("nomips16") &&
diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp
index ff95f3c72282..751afd5ed369 100644
--- a/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -127,8 +127,8 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
Offset = SPOffset + (int64_t)StackSize;
Offset += MI.getOperand(OpNo + 1).getImm();
-
- DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
+ LLVM_DEBUG(errs() << "Offset : " << Offset << "\n"
+ << "<--------->\n");
if (!MI.isDebugValue() &&
!Mips16InstrInfo::validImmediate(MI.getOpcode(), FrameReg, Offset)) {
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index 5ee816be3730..9eb13a68e561 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -443,13 +443,15 @@ bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MCP = mf.getConstantPool();
STI = &static_cast<const MipsSubtarget &>(mf.getSubtarget());
- DEBUG(dbgs() << "constant island machine function " << "\n");
+ LLVM_DEBUG(dbgs() << "constant island machine function "
+ << "\n");
if (!STI->inMips16Mode() || !MipsSubtarget::useConstantIslands()) {
return false;
}
TII = (const Mips16InstrInfo *)STI->getInstrInfo();
MFI = MF->getInfo<MipsFunctionInfo>();
- DEBUG(dbgs() << "constant island processing " << "\n");
+ LLVM_DEBUG(dbgs() << "constant island processing "
+ << "\n");
//
// will need to make predermination if there is any constants we need to
// put in constant islands. TBD.
@@ -480,7 +482,7 @@ bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// constant pool users.
initializeFunctionInfo(CPEMIs);
CPEMIs.clear();
- DEBUG(dumpBBs());
+ LLVM_DEBUG(dumpBBs());
/// Remove dead constant pool entries.
MadeChange |= removeUnusedCPEntries();
@@ -490,31 +492,31 @@ bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
unsigned NoCPIters = 0, NoBRIters = 0;
(void)NoBRIters;
while (true) {
- DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
+ LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
bool CPChange = false;
for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
CPChange |= handleConstantPoolUser(i);
if (CPChange && ++NoCPIters > 30)
report_fatal_error("Constant Island pass failed to converge!");
- DEBUG(dumpBBs());
+ LLVM_DEBUG(dumpBBs());
// Clear NewWaterList now. If we split a block for branches, it should
// appear as "new water" for the next iteration of constant pool placement.
NewWaterList.clear();
- DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
+ LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
bool BRChange = false;
for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
BRChange |= fixupImmediateBr(ImmBranches[i]);
if (BRChange && ++NoBRIters > 30)
report_fatal_error("Branch Fix Up pass failed to converge!");
- DEBUG(dumpBBs());
+ LLVM_DEBUG(dumpBBs());
if (!CPChange && !BRChange)
break;
MadeChange = true;
}
- DEBUG(dbgs() << '\n'; dumpBBs());
+ LLVM_DEBUG(dbgs() << '\n'; dumpBBs());
BBInfo.clear();
WaterList.clear();
@@ -581,10 +583,10 @@ MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// Add a new CPEntry, but no corresponding CPUser yet.
CPEntries.emplace_back(1, CPEntry(CPEMI, i));
++NumCPEs;
- DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
- << Size << ", align = " << Align <<'\n');
+ LLVM_DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
+ << Size << ", align = " << Align << '\n');
}
- DEBUG(BB->dump());
+ LLVM_DEBUG(BB->dump());
}
/// BBHasFallthrough - Return true if the specified basic block can fallthrough
@@ -987,7 +989,7 @@ bool MipsConstantIslands::isCPEntryInRange
unsigned CPEOffset = getOffsetOf(CPEMI);
if (DoDump) {
- DEBUG({
+ LLVM_DEBUG({
unsigned Block = MI->getParent()->getNumber();
const BasicBlockInfo &BBI = BBInfo[Block];
dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
@@ -1060,7 +1062,7 @@ int MipsConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
// Check to see if the CPE is already in-range.
if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
true)) {
- DEBUG(dbgs() << "In range\n");
+ LLVM_DEBUG(dbgs() << "In range\n");
return 1;
}
@@ -1076,8 +1078,8 @@ int MipsConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
continue;
if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
U.NegOk)) {
- DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
- << CPEs[i].CPI << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[i].CPI << "\n");
// Point the CPUser node to the replacement
U.CPEMI = CPEs[i].CPEMI;
// Change the CPI in the instruction operand to refer to the clone.
@@ -1114,7 +1116,7 @@ int MipsConstantIslands::findLongFormInRangeCPEntry
if (isCPEntryInRange(UserMI, UserOffset, CPEMI,
U.getLongFormMaxDisp(), U.NegOk,
true)) {
- DEBUG(dbgs() << "In range\n");
+ LLVM_DEBUG(dbgs() << "In range\n");
UserMI->setDesc(TII->get(U.getLongFormOpcode()));
U.setMaxDisp(U.getLongFormMaxDisp());
return 2; // instruction is longer length now
@@ -1132,8 +1134,8 @@ int MipsConstantIslands::findLongFormInRangeCPEntry
continue;
if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI,
U.getLongFormMaxDisp(), U.NegOk)) {
- DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
- << CPEs[i].CPI << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[i].CPI << "\n");
// Point the CPUser node to the replacement
U.CPEMI = CPEs[i].CPEMI;
// Change the CPI in the instruction operand to refer to the clone.
@@ -1198,8 +1200,8 @@ bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
// This is the least amount of required padding seen so far.
BestGrowth = Growth;
WaterIter = IP;
- DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
- << " Growth=" << Growth << '\n');
+ LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
+ << " Growth=" << Growth << '\n');
// Keep looking unless it is perfect.
if (BestGrowth == 0)
@@ -1237,8 +1239,8 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
- DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
- << format(", expected CPE offset %#x\n", CPEOffset));
+ LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
+ << format(", expected CPE offset %#x\n", CPEOffset));
NewMBB = &*++UserMBB->getIterator();
// Add an unconditional branch from UserMBB to fallthrough block. Record
// it for branch lengthening; this new branch will not get out of range,
@@ -1264,16 +1266,16 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned LogAlign = MF->getAlignment();
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
- DEBUG(dbgs() << format("Split in middle of big block before %#x",
- BaseInsertOffset));
+ LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
+ BaseInsertOffset));
// The 4 in the following is for the unconditional branch we'll be inserting
// Alignment of the island is handled
// inside isOffsetInRange.
BaseInsertOffset -= 4;
- DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
- << " la=" << LogAlign << '\n');
+ LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
+ << " la=" << LogAlign << '\n');
// This could point off the end of the block if we've already got constant
// pool entries following this block; only the last one is in the water list.
@@ -1281,7 +1283,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
// long unconditional).
if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
BaseInsertOffset = UserBBI.postOffset() - 8;
- DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
+ LLVM_DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
}
unsigned EndInsertOffset = BaseInsertOffset + 4 +
CPEMI->getOperand(2).getImm();
@@ -1337,7 +1339,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
MachineBasicBlock *NewMBB;
water_iterator IP;
if (findAvailableWater(U, UserOffset, IP)) {
- DEBUG(dbgs() << "Found water in range\n");
+ LLVM_DEBUG(dbgs() << "Found water in range\n");
MachineBasicBlock *WaterBB = *IP;
// If the original WaterList entry was "new water" on this iteration,
@@ -1356,7 +1358,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
result = findLongFormInRangeCPEntry(U, UserOffset);
if (result != 0) return true;
}
- DEBUG(dbgs() << "No water found\n");
+ LLVM_DEBUG(dbgs() << "No water found\n");
createNewWater(CPUserIndex, UserOffset, NewMBB);
// splitBlockBeforeInstr adds to WaterList, which is important when it is
@@ -1415,8 +1417,9 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
break;
}
- DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
- << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
+ LLVM_DEBUG(
+ dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
+ << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
return true;
}
@@ -1471,11 +1474,11 @@ bool MipsConstantIslands::isBBInRange
unsigned BrOffset = getOffsetOf(MI) + PCAdj;
unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
- DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
- << " from " << printMBBReference(*MI->getParent())
- << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
- << " to " << DestOffset << " offset "
- << int(DestOffset - BrOffset) << "\t" << *MI);
+ LLVM_DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
+ << " from " << printMBBReference(*MI->getParent())
+ << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
+ << " to " << DestOffset << " offset "
+ << int(DestOffset - BrOffset) << "\t" << *MI);
if (BrOffset <= DestOffset) {
// Branch before the Dest.
@@ -1540,7 +1543,7 @@ MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
HasFarJump = true;
++NumUBrFixed;
- DEBUG(dbgs() << " Changed B to long jump " << *MI);
+ LLVM_DEBUG(dbgs() << " Changed B to long jump " << *MI);
return true;
}
@@ -1595,8 +1598,9 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
MachineBasicBlock *NewDest =
BMI->getOperand(BMITargetOperand).getMBB();
if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
- DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
- << *BMI);
+ LLVM_DEBUG(
+ dbgs() << " Invert Bcc condition and swap its destination with "
+ << *BMI);
MI->setDesc(TII->get(OppositeBranchOpcode));
BMI->getOperand(BMITargetOperand).setMBB(DestBB);
MI->getOperand(TargetOperand).setMBB(NewDest);
@@ -1616,9 +1620,9 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
}
MachineBasicBlock *NextBB = &*++MBB->getIterator();
- DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
- << " also invert condition and change dest. to "
- << printMBBReference(*NextBB) << "\n");
+ LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
+ << " also invert condition and change dest. to "
+ << printMBBReference(*NextBB) << "\n");
// Insert a new conditional branch and a new unconditional branch.
// Also update the ImmBranch as well as adding a new entry for the new branch.
@@ -1654,19 +1658,19 @@ void MipsConstantIslands::prescanForConstants() {
switch(I->getDesc().getOpcode()) {
case Mips::LwConstant32: {
PrescannedForConstants = true;
- DEBUG(dbgs() << "constant island constant " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "constant island constant " << *I << "\n");
J = I->getNumOperands();
- DEBUG(dbgs() << "num operands " << J << "\n");
+ LLVM_DEBUG(dbgs() << "num operands " << J << "\n");
MachineOperand& Literal = I->getOperand(1);
if (Literal.isImm()) {
int64_t V = Literal.getImm();
- DEBUG(dbgs() << "literal " << V << "\n");
+ LLVM_DEBUG(dbgs() << "literal " << V << "\n");
Type *Int32Ty =
Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, V);
unsigned index = MCP->getConstantPoolIndex(C, 4);
I->getOperand(2).ChangeToImmediate(index);
- DEBUG(dbgs() << "constant island constant " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "constant island constant " << *I << "\n");
I->setDesc(TII->get(Mips::LwRxPcTcp16));
I->RemoveOperand(1);
I->RemoveOperand(1);
diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp
index 7685555a1e72..4b460551539a 100644
--- a/lib/Target/Mips/MipsFastISel.cpp
+++ b/lib/Target/Mips/MipsFastISel.cpp
@@ -1001,11 +1001,12 @@ bool MipsFastISel::selectFPExt(const Instruction *I) {
bool MipsFastISel::selectSelect(const Instruction *I) {
assert(isa<SelectInst>(I) && "Expected a select instruction.");
- DEBUG(dbgs() << "selectSelect\n");
+ LLVM_DEBUG(dbgs() << "selectSelect\n");
MVT VT;
if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
- DEBUG(dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
+ LLVM_DEBUG(
+ dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
return false;
}
@@ -1288,22 +1289,22 @@ bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
}
bool MipsFastISel::fastLowerArguments() {
- DEBUG(dbgs() << "fastLowerArguments\n");
+ LLVM_DEBUG(dbgs() << "fastLowerArguments\n");
if (!FuncInfo.CanLowerReturn) {
- DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
+ LLVM_DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
return false;
}
const Function *F = FuncInfo.Fn;
if (F->isVarArg()) {
- DEBUG(dbgs() << ".. gave up (varargs)\n");
+ LLVM_DEBUG(dbgs() << ".. gave up (varargs)\n");
return false;
}
CallingConv::ID CC = F->getCallingConv();
if (CC != CallingConv::C) {
- DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
+ LLVM_DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
return false;
}
@@ -1329,21 +1330,21 @@ bool MipsFastISel::fastLowerArguments() {
if (FormalArg.hasAttribute(Attribute::InReg) ||
FormalArg.hasAttribute(Attribute::StructRet) ||
FormalArg.hasAttribute(Attribute::ByVal)) {
- DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
+ LLVM_DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
return false;
}
Type *ArgTy = FormalArg.getType();
if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
- DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
+ LLVM_DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
return false;
}
EVT ArgVT = TLI.getValueType(DL, ArgTy);
- DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
- << ArgVT.getEVTString() << "\n");
+ LLVM_DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
+ << ArgVT.getEVTString() << "\n");
if (!ArgVT.isSimple()) {
- DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
return false;
}
@@ -1355,16 +1356,16 @@ bool MipsFastISel::fastLowerArguments() {
!FormalArg.hasAttribute(Attribute::ZExt)) {
// It must be any extend, this shouldn't happen for clang-generated IR
// so just fall back on SelectionDAG.
- DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
return false;
}
if (NextGPR32 == GPR32ArgRegs.end()) {
- DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
return false;
}
- DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
+ LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
// Allocating any GPR32 prohibits further use of floating point arguments.
@@ -1375,16 +1376,16 @@ bool MipsFastISel::fastLowerArguments() {
case MVT::i32:
if (FormalArg.hasAttribute(Attribute::ZExt)) {
// The O32 ABI does not permit a zero-extended i32.
- DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
return false;
}
if (NextGPR32 == GPR32ArgRegs.end()) {
- DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
return false;
}
- DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
+ LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
// Allocating any GPR32 prohibits further use of floating point arguments.
@@ -1394,14 +1395,14 @@ bool MipsFastISel::fastLowerArguments() {
case MVT::f32:
if (UnsupportedFPMode) {
- DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
return false;
}
if (NextFGR32 == FGR32ArgRegs.end()) {
- DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
return false;
}
- DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
+ LLVM_DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
// Allocating an FGR32 also allocates the super-register AFGR64, and
// ABI rules require us to skip the corresponding GPR32.
@@ -1413,14 +1414,14 @@ bool MipsFastISel::fastLowerArguments() {
case MVT::f64:
if (UnsupportedFPMode) {
- DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
return false;
}
if (NextAFGR64 == AFGR64ArgRegs.end()) {
- DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
return false;
}
- DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
+ LLVM_DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
// Allocating an FGR32 also allocates the super-register AFGR64, and
// ABI rules require us to skip the corresponding GPR32 pair.
@@ -1433,7 +1434,7 @@ bool MipsFastISel::fastLowerArguments() {
break;
default:
- DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
return false;
}
}
@@ -1648,7 +1649,7 @@ bool MipsFastISel::selectRet(const Instruction *I) {
const Function &F = *I->getParent()->getParent();
const ReturnInst *Ret = cast<ReturnInst>(I);
- DEBUG(dbgs() << "selectRet\n");
+ LLVM_DEBUG(dbgs() << "selectRet\n");
if (!FuncInfo.CanLowerReturn)
return false;
@@ -1712,7 +1713,7 @@ bool MipsFastISel::selectRet(const Instruction *I) {
// Do not handle FGR64 returns for now.
if (RVVT == MVT::f64 && UnsupportedFPMode) {
- DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
+ LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
return false;
}
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 8a56df2df6af..7e843512f6e5 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -217,7 +217,7 @@ void MipsDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
return;
}
diff --git a/lib/Target/Mips/MipsInstructionSelector.cpp b/lib/Target/Mips/MipsInstructionSelector.cpp
index 56c442a9ee2f..a07940b5538f 100644
--- a/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -83,8 +83,8 @@ static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
return true;
diff --git a/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp b/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
index ceacaa498389..0be5abca03ff 100644
--- a/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
@@ -42,7 +42,7 @@ namespace {
}
bool MipsModuleDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(errs() << "In MipsModuleDAGToDAGISel::runMachineFunction\n");
+ LLVM_DEBUG(errs() << "In MipsModuleDAGToDAGISel::runMachineFunction\n");
auto &TPC = getAnalysis<TargetPassConfig>();
auto &TM = TPC.getTM<MipsTargetMachine>();
TM.resetSubtarget(&MF);
diff --git a/lib/Target/Mips/MipsOs16.cpp b/lib/Target/Mips/MipsOs16.cpp
index 7ee45c28a7d0..4edcb3132ada 100644
--- a/lib/Target/Mips/MipsOs16.cpp
+++ b/lib/Target/Mips/MipsOs16.cpp
@@ -96,7 +96,8 @@ static bool needsFP(Function &F) {
;
}
if (const CallInst *CI = dyn_cast<CallInst>(I)) {
- DEBUG(dbgs() << "Working on call" << "\n");
+ LLVM_DEBUG(dbgs() << "Working on call"
+ << "\n");
Function &F_ = *CI->getCalledFunction();
if (needsFPFromSig(F_))
return true;
@@ -110,9 +111,10 @@ bool MipsOs16::runOnModule(Module &M) {
bool usingMask = Mips32FunctionMask.length() > 0;
bool doneUsingMask = false; // this will make it stop repeating
- DEBUG(dbgs() << "Run on Module MipsOs16 \n" << Mips32FunctionMask << "\n");
+ LLVM_DEBUG(dbgs() << "Run on Module MipsOs16 \n"
+ << Mips32FunctionMask << "\n");
if (usingMask)
- DEBUG(dbgs() << "using mask \n" << Mips32FunctionMask << "\n");
+ LLVM_DEBUG(dbgs() << "using mask \n" << Mips32FunctionMask << "\n");
unsigned int functionIndex = 0;
bool modified = false;
@@ -121,14 +123,14 @@ bool MipsOs16::runOnModule(Module &M) {
if (F.isDeclaration())
continue;
- DEBUG(dbgs() << "Working on " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Working on " << F.getName() << "\n");
if (usingMask) {
if (!doneUsingMask) {
if (functionIndex == Mips32FunctionMask.length())
functionIndex = 0;
switch (Mips32FunctionMask[functionIndex]) {
case '1':
- DEBUG(dbgs() << "mask forced mips32: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "mask forced mips32: " << F.getName() << "\n");
F.addFnAttr("nomips16");
break;
case '.':
@@ -142,11 +144,11 @@ bool MipsOs16::runOnModule(Module &M) {
}
else {
if (needsFP(F)) {
- DEBUG(dbgs() << "os16 forced mips32: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "os16 forced mips32: " << F.getName() << "\n");
F.addFnAttr("nomips16");
}
else {
- DEBUG(dbgs() << "os16 forced mips16: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "os16 forced mips16: " << F.getName() << "\n");
F.addFnAttr("mips16");
}
}
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index 0e0d82270c89..3c108c2ba9b7 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -275,18 +275,20 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
- DEBUG(errs() << "\nFunction : " << MF.getName() << "\n";
- errs() << "<--------->\n" << MI);
+ LLVM_DEBUG(errs() << "\nFunction : " << MF.getName() << "\n";
+ errs() << "<--------->\n"
+ << MI);
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
uint64_t stackSize = MF.getFrameInfo().getStackSize();
int64_t spOffset = MF.getFrameInfo().getObjectOffset(FrameIndex);
- DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"
- << "spOffset : " << spOffset << "\n"
- << "stackSize : " << stackSize << "\n"
- << "alignment : "
- << MF.getFrameInfo().getObjectAlignment(FrameIndex) << "\n");
+ LLVM_DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"
+ << "spOffset : " << spOffset << "\n"
+ << "stackSize : " << stackSize << "\n"
+ << "alignment : "
+ << MF.getFrameInfo().getObjectAlignment(FrameIndex)
+ << "\n");
eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);
}
diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp
index 12916c0cbfdd..a62ae07c12b6 100644
--- a/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -1040,11 +1040,9 @@ MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
}
if (Val.getNode()) {
- DEBUG(dbgs() << "\nMipsSE DAG Combine:\n";
- N->printrWithDepth(dbgs(), &DAG);
- dbgs() << "\n=> \n";
- Val.getNode()->printrWithDepth(dbgs(), &DAG);
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\nMipsSE DAG Combine:\n";
+ N->printrWithDepth(dbgs(), &DAG); dbgs() << "\n=> \n";
+ Val.getNode()->printrWithDepth(dbgs(), &DAG); dbgs() << "\n");
return Val;
}
diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp
index 9b89d4077a77..c61b9db80847 100644
--- a/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -202,7 +202,8 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
Offset = SPOffset + (int64_t)StackSize;
Offset += MI.getOperand(OpNo + 1).getImm();
- DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
+ LLVM_DEBUG(errs() << "Offset : " << Offset << "\n"
+ << "<--------->\n");
if (!MI.isDebugValue()) {
// Make sure Offset fits within the field available.
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 1cbffbf8b345..a4d159c411f7 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -234,7 +234,8 @@ MipsSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS,
}
bool MipsSubtarget::useConstantIslands() {
- DEBUG(dbgs() << "use constant islands " << Mips16ConstantIslands << "\n");
+ LLVM_DEBUG(dbgs() << "use constant islands " << Mips16ConstantIslands
+ << "\n");
return Mips16ConstantIslands;
}
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index 2b9ac2cd900f..d360a9ebe361 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -207,7 +207,7 @@ MipsTargetMachine::getSubtargetImpl(const Function &F) const {
}
void MipsTargetMachine::resetSubtarget(MachineFunction *MF) {
- DEBUG(dbgs() << "resetSubtarget\n");
+ LLVM_DEBUG(dbgs() << "resetSubtarget\n");
Subtarget = const_cast<MipsSubtarget *>(getSubtargetImpl(MF->getFunction()));
MF->setSubtarget(Subtarget);
@@ -275,12 +275,12 @@ void MipsPassConfig::addPreRegAlloc() {
TargetTransformInfo
MipsTargetMachine::getTargetTransformInfo(const Function &F) {
if (Subtarget->allowMixed16_32()) {
- DEBUG(errs() << "No Target Transform Info Pass Added\n");
+ LLVM_DEBUG(errs() << "No Target Transform Info Pass Added\n");
// FIXME: This is no longer necessary as the TTI returned is per-function.
return TargetTransformInfo(F.getParent()->getDataLayout());
}
- DEBUG(errs() << "Target Transform Info Pass Added\n");
+ LLVM_DEBUG(errs() << "Target Transform Info Pass Added\n");
return TargetTransformInfo(BasicTTIImpl(this, F));
}
diff --git a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
index 1402033b9e60..5bb4fc3edd09 100644
--- a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
+++ b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
@@ -97,10 +97,12 @@ AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx,
Offset = (Offset + Align - 1) / Align * Align;
if (StackGrowsDown) {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset
+ << "]\n");
MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset
} else {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset
+ << "]\n");
MFI.setObjectOffset(FrameIdx, Offset);
Offset += MFI.getObjectSize(FrameIdx);
}
@@ -163,14 +165,14 @@ NVPTXPrologEpilogPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Adjust to alignment boundary.
Offset = (Offset + Align - 1) / Align * Align;
- DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
+ LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
// Resolve offsets for objects in the local block.
for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) {
std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i);
int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
- DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
- FIOffset << "]\n");
+ LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset
+ << "]\n");
MFI.setObjectOffset(Entry.first, FIOffset);
}
// Allocate the local block
diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp
index 152b665d0fdc..60971b48adfc 100644
--- a/lib/Target/NVPTX/NVVMReflect.cpp
+++ b/lib/Target/NVPTX/NVVMReflect.cpp
@@ -153,7 +153,7 @@ bool NVVMReflect::runOnFunction(Function &F) {
StringRef ReflectArg = cast<ConstantDataSequential>(Operand)->getAsString();
ReflectArg = ReflectArg.substr(0, ReflectArg.size() - 1);
- DEBUG(dbgs() << "Arg of _reflect : " << ReflectArg << "\n");
+ LLVM_DEBUG(dbgs() << "Arg of _reflect : " << ReflectArg << "\n");
int ReflectVal = 0; // The default value is 0
if (ReflectArg == "__CUDA_FTZ") {
diff --git a/lib/Target/Nios2/Nios2ISelDAGToDAG.cpp b/lib/Target/Nios2/Nios2ISelDAGToDAG.cpp
index acc5cee055c5..5f9679466115 100644
--- a/lib/Target/Nios2/Nios2ISelDAGToDAG.cpp
+++ b/lib/Target/Nios2/Nios2ISelDAGToDAG.cpp
@@ -61,7 +61,7 @@ void Nios2DAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
return;
}
diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp
index e84908503691..4f6ebbeb4cad 100644
--- a/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -771,11 +771,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
else if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
MOSymbol = getSymbol(GV);
- DEBUG(
- unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
- assert((GVFlags & PPCII::MO_NLP_FLAG) &&
- "LDtocL used on symbol that could be accessed directly is "
- "invalid. Must match ADDIStocHA."));
+ LLVM_DEBUG(
+ unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
+ assert((GVFlags & PPCII::MO_NLP_FLAG) &&
+ "LDtocL used on symbol that could be accessed directly is "
+ "invalid. Must match ADDIStocHA."));
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
}
@@ -800,11 +800,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
- DEBUG(
- unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
- assert (
- !(GVFlags & PPCII::MO_NLP_FLAG) &&
- "Interposable definitions must use indirect access."));
+ LLVM_DEBUG(unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
+ assert(!(GVFlags & PPCII::MO_NLP_FLAG) &&
+ "Interposable definitions must use indirect access."));
MOSymbol = getSymbol(GV);
} else if (MO.isCPI()) {
MOSymbol = GetCPISymbol(MO.getIndex());
diff --git a/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index 347f66d17095..bbb977f090c5 100644
--- a/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -236,18 +236,18 @@ void PPCBranchCoalescing::initialize(MachineFunction &MF) {
///\return true if and only if the branch can be coalesced, false otherwise
///
bool PPCBranchCoalescing::canCoalesceBranch(CoalescingCandidateInfo &Cand) {
- DEBUG(dbgs() << "Determine if branch block " << Cand.BranchBlock->getNumber()
- << " can be coalesced:");
+ LLVM_DEBUG(dbgs() << "Determine if branch block "
+ << Cand.BranchBlock->getNumber() << " can be coalesced:");
MachineBasicBlock *FalseMBB = nullptr;
if (TII->analyzeBranch(*Cand.BranchBlock, Cand.BranchTargetBlock, FalseMBB,
Cand.Cond)) {
- DEBUG(dbgs() << "TII unable to Analyze Branch - skip\n");
+ LLVM_DEBUG(dbgs() << "TII unable to Analyze Branch - skip\n");
return false;
}
for (auto &I : Cand.BranchBlock->terminators()) {
- DEBUG(dbgs() << "Looking at terminator : " << I << "\n");
+ LLVM_DEBUG(dbgs() << "Looking at terminator : " << I << "\n");
if (!I.isBranch())
continue;
@@ -265,14 +265,14 @@ bool PPCBranchCoalescing::canCoalesceBranch(CoalescingCandidateInfo &Cand) {
// must then be extended to prove that none of the implicit operands are
// changed in the blocks that are combined during coalescing.
if (I.getNumOperands() != I.getNumExplicitOperands()) {
- DEBUG(dbgs() << "Terminator contains implicit operands - skip : " << I
- << "\n");
+ LLVM_DEBUG(dbgs() << "Terminator contains implicit operands - skip : "
+ << I << "\n");
return false;
}
}
if (Cand.BranchBlock->isEHPad() || Cand.BranchBlock->hasEHPadSuccessor()) {
- DEBUG(dbgs() << "EH Pad - skip\n");
+ LLVM_DEBUG(dbgs() << "EH Pad - skip\n");
return false;
}
@@ -280,13 +280,13 @@ bool PPCBranchCoalescing::canCoalesceBranch(CoalescingCandidateInfo &Cand) {
// FalseMBB is null, and BranchTargetBlock is a successor to BranchBlock)
if (!Cand.BranchTargetBlock || FalseMBB ||
!Cand.BranchBlock->isSuccessor(Cand.BranchTargetBlock)) {
- DEBUG(dbgs() << "Does not form a triangle - skip\n");
+ LLVM_DEBUG(dbgs() << "Does not form a triangle - skip\n");
return false;
}
// Ensure there are only two successors
if (Cand.BranchBlock->succ_size() != 2) {
- DEBUG(dbgs() << "Does not have 2 successors - skip\n");
+ LLVM_DEBUG(dbgs() << "Does not have 2 successors - skip\n");
return false;
}
@@ -305,18 +305,19 @@ bool PPCBranchCoalescing::canCoalesceBranch(CoalescingCandidateInfo &Cand) {
assert(Succ && "Expecting a valid fall-through block\n");
if (!Succ->empty()) {
- DEBUG(dbgs() << "Fall-through block contains code -- skip\n");
- return false;
+ LLVM_DEBUG(dbgs() << "Fall-through block contains code -- skip\n");
+ return false;
}
if (!Succ->isSuccessor(Cand.BranchTargetBlock)) {
- DEBUG(dbgs()
- << "Successor of fall through block is not branch taken block\n");
- return false;
+ LLVM_DEBUG(
+ dbgs()
+ << "Successor of fall through block is not branch taken block\n");
+ return false;
}
Cand.FallThroughBlock = Succ;
- DEBUG(dbgs() << "Valid Candidate\n");
+ LLVM_DEBUG(dbgs() << "Valid Candidate\n");
return true;
}
@@ -331,7 +332,7 @@ bool PPCBranchCoalescing::identicalOperands(
ArrayRef<MachineOperand> OpList1, ArrayRef<MachineOperand> OpList2) const {
if (OpList1.size() != OpList2.size()) {
- DEBUG(dbgs() << "Operand list is different size\n");
+ LLVM_DEBUG(dbgs() << "Operand list is different size\n");
return false;
}
@@ -339,8 +340,8 @@ bool PPCBranchCoalescing::identicalOperands(
const MachineOperand &Op1 = OpList1[i];
const MachineOperand &Op2 = OpList2[i];
- DEBUG(dbgs() << "Op1: " << Op1 << "\n"
- << "Op2: " << Op2 << "\n");
+ LLVM_DEBUG(dbgs() << "Op1: " << Op1 << "\n"
+ << "Op2: " << Op2 << "\n");
if (Op1.isIdenticalTo(Op2)) {
// filter out instructions with physical-register uses
@@ -348,10 +349,10 @@ bool PPCBranchCoalescing::identicalOperands(
// If the physical register is constant then we can assume the value
// has not changed between uses.
&& !(Op1.isUse() && MRI->isConstantPhysReg(Op1.getReg()))) {
- DEBUG(dbgs() << "The operands are not provably identical.\n");
+ LLVM_DEBUG(dbgs() << "The operands are not provably identical.\n");
return false;
}
- DEBUG(dbgs() << "Op1 and Op2 are identical!\n");
+ LLVM_DEBUG(dbgs() << "Op1 and Op2 are identical!\n");
continue;
}
@@ -364,14 +365,14 @@ bool PPCBranchCoalescing::identicalOperands(
MachineInstr *Op1Def = MRI->getVRegDef(Op1.getReg());
MachineInstr *Op2Def = MRI->getVRegDef(Op2.getReg());
if (TII->produceSameValue(*Op1Def, *Op2Def, MRI)) {
- DEBUG(dbgs() << "Op1Def: " << *Op1Def << " and " << *Op2Def
- << " produce the same value!\n");
+ LLVM_DEBUG(dbgs() << "Op1Def: " << *Op1Def << " and " << *Op2Def
+ << " produce the same value!\n");
} else {
- DEBUG(dbgs() << "Operands produce different values\n");
+ LLVM_DEBUG(dbgs() << "Operands produce different values\n");
return false;
}
} else {
- DEBUG(dbgs() << "The operands are not provably identical.\n");
+ LLVM_DEBUG(dbgs() << "The operands are not provably identical.\n");
return false;
}
}
@@ -395,7 +396,7 @@ void PPCBranchCoalescing::moveAndUpdatePHIs(MachineBasicBlock *SourceMBB,
MachineBasicBlock::iterator ME = SourceMBB->getFirstNonPHI();
if (MI == ME) {
- DEBUG(dbgs() << "SourceMBB contains no PHI instructions.\n");
+ LLVM_DEBUG(dbgs() << "SourceMBB contains no PHI instructions.\n");
return;
}
@@ -425,19 +426,19 @@ bool PPCBranchCoalescing::canMoveToBeginning(const MachineInstr &MI,
const MachineBasicBlock &TargetMBB
) const {
- DEBUG(dbgs() << "Checking if " << MI << " can move to beginning of "
- << TargetMBB.getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Checking if " << MI << " can move to beginning of "
+ << TargetMBB.getNumber() << "\n");
for (auto &Def : MI.defs()) { // Looking at Def
for (auto &Use : MRI->use_instructions(Def.getReg())) {
if (Use.isPHI() && Use.getParent() == &TargetMBB) {
- DEBUG(dbgs() << " *** used in a PHI -- cannot move ***\n");
- return false;
+ LLVM_DEBUG(dbgs() << " *** used in a PHI -- cannot move ***\n");
+ return false;
}
}
}
- DEBUG(dbgs() << " Safe to move to the beginning.\n");
+ LLVM_DEBUG(dbgs() << " Safe to move to the beginning.\n");
return true;
}
@@ -456,22 +457,23 @@ bool PPCBranchCoalescing::canMoveToEnd(const MachineInstr &MI,
const MachineBasicBlock &TargetMBB
) const {
- DEBUG(dbgs() << "Checking if " << MI << " can move to end of "
- << TargetMBB.getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Checking if " << MI << " can move to end of "
+ << TargetMBB.getNumber() << "\n");
for (auto &Use : MI.uses()) {
if (Use.isReg() && TargetRegisterInfo::isVirtualRegister(Use.getReg())) {
MachineInstr *DefInst = MRI->getVRegDef(Use.getReg());
if (DefInst->isPHI() && DefInst->getParent() == MI.getParent()) {
- DEBUG(dbgs() << " *** Cannot move this instruction ***\n");
+ LLVM_DEBUG(dbgs() << " *** Cannot move this instruction ***\n");
return false;
} else {
- DEBUG(dbgs() << " *** def is in another block -- safe to move!\n");
+ LLVM_DEBUG(
+ dbgs() << " *** def is in another block -- safe to move!\n");
}
}
}
- DEBUG(dbgs() << " Safe to move to the end.\n");
+ LLVM_DEBUG(dbgs() << " Safe to move to the end.\n");
return true;
}
@@ -541,15 +543,17 @@ bool PPCBranchCoalescing::canMerge(CoalescingCandidateInfo &SourceRegion,
for (auto &Def : I->defs())
for (auto &Use : MRI->use_instructions(Def.getReg())) {
if (Use.isPHI() && Use.getParent() == SourceRegion.BranchTargetBlock) {
- DEBUG(dbgs() << "PHI " << *I << " defines register used in another "
- "PHI within branch target block -- can't merge\n");
+ LLVM_DEBUG(dbgs()
+ << "PHI " << *I
+ << " defines register used in another "
+ "PHI within branch target block -- can't merge\n");
NumPHINotMoved++;
return false;
}
if (Use.getParent() == SourceRegion.BranchBlock) {
- DEBUG(dbgs() << "PHI " << *I
- << " defines register used in this "
- "block -- all must move down\n");
+ LLVM_DEBUG(dbgs() << "PHI " << *I
+ << " defines register used in this "
+ "block -- all must move down\n");
SourceRegion.MustMoveDown = true;
}
}
@@ -562,13 +566,13 @@ bool PPCBranchCoalescing::canMerge(CoalescingCandidateInfo &SourceRegion,
E = SourceRegion.BranchBlock->end();
I != E; ++I) {
if (!canMoveToBeginning(*I, *SourceRegion.BranchTargetBlock)) {
- DEBUG(dbgs() << "Instruction " << *I
- << " cannot move down - must move up!\n");
+ LLVM_DEBUG(dbgs() << "Instruction " << *I
+ << " cannot move down - must move up!\n");
SourceRegion.MustMoveUp = true;
}
if (!canMoveToEnd(*I, *TargetRegion.BranchBlock)) {
- DEBUG(dbgs() << "Instruction " << *I
- << " cannot move up - must move down!\n");
+ LLVM_DEBUG(dbgs() << "Instruction " << *I
+ << " cannot move up - must move down!\n");
SourceRegion.MustMoveDown = true;
}
}
@@ -719,10 +723,10 @@ bool PPCBranchCoalescing::runOnMachineFunction(MachineFunction &MF) {
bool didSomething = false;
- DEBUG(dbgs() << "******** Branch Coalescing ********\n");
+ LLVM_DEBUG(dbgs() << "******** Branch Coalescing ********\n");
initialize(MF);
- DEBUG(dbgs() << "Function: "; MF.dump(); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Function: "; MF.dump(); dbgs() << "\n");
CoalescingCandidateInfo Cand1, Cand2;
// Walk over blocks and find candidates to merge
@@ -752,24 +756,27 @@ bool PPCBranchCoalescing::runOnMachineFunction(MachineFunction &MF) {
"Branch-taken block should post-dominate first candidate");
if (!identicalOperands(Cand1.Cond, Cand2.Cond)) {
- DEBUG(dbgs() << "Blocks " << Cand1.BranchBlock->getNumber() << " and "
- << Cand2.BranchBlock->getNumber()
- << " have different branches\n");
+ LLVM_DEBUG(dbgs() << "Blocks " << Cand1.BranchBlock->getNumber()
+ << " and " << Cand2.BranchBlock->getNumber()
+ << " have different branches\n");
break;
}
if (!canMerge(Cand2, Cand1)) {
- DEBUG(dbgs() << "Cannot merge blocks " << Cand1.BranchBlock->getNumber()
- << " and " << Cand2.BranchBlock->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Cannot merge blocks "
+ << Cand1.BranchBlock->getNumber() << " and "
+ << Cand2.BranchBlock->getNumber() << "\n");
NumBlocksNotCoalesced++;
continue;
}
- DEBUG(dbgs() << "Merging blocks " << Cand1.BranchBlock->getNumber()
- << " and " << Cand1.BranchTargetBlock->getNumber() << "\n");
+ LLVM_DEBUG(dbgs() << "Merging blocks " << Cand1.BranchBlock->getNumber()
+ << " and " << Cand1.BranchTargetBlock->getNumber()
+ << "\n");
MergedCandidates = mergeCandidates(Cand2, Cand1);
if (MergedCandidates)
didSomething = true;
- DEBUG(dbgs() << "Function after merging: "; MF.dump(); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Function after merging: "; MF.dump();
+ dbgs() << "\n");
} while (MergedCandidates);
}
@@ -779,6 +786,6 @@ bool PPCBranchCoalescing::runOnMachineFunction(MachineFunction &MF) {
MF.verify(nullptr, "Error in code produced by branch coalescing");
#endif // NDEBUG
- DEBUG(dbgs() << "Finished Branch Coalescing\n");
+ LLVM_DEBUG(dbgs() << "Finished Branch Coalescing\n");
return didSomething;
}
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp
index cd794247a7e4..c96df538a6a5 100644
--- a/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -507,7 +507,7 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
// Process nested loops first.
for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
MadeChange |= convertToCTRLoop(*I);
- DEBUG(dbgs() << "Nested loop converted\n");
+ LLVM_DEBUG(dbgs() << "Nested loop converted\n");
}
// If a nested loop has been converted, then we can't convert this loop.
@@ -567,8 +567,8 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
IE = ExitingBlocks.end(); I != IE; ++I) {
const SCEV *EC = SE->getExitCount(L, *I);
- DEBUG(dbgs() << "Exit Count for " << *L << " from block " <<
- (*I)->getName() << ": " << *EC << "\n");
+ LLVM_DEBUG(dbgs() << "Exit Count for " << *L << " from block "
+ << (*I)->getName() << ": " << *EC << "\n");
if (isa<SCEVCouldNotCompute>(EC))
continue;
if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
@@ -642,7 +642,8 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
if (!Preheader)
return MadeChange;
- DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName()
+ << "\n");
// Insert the count into the preheader and replace the condition used by the
// selected branch.
@@ -730,11 +731,12 @@ check_block:
}
if (I != BI && clobbersCTR(*I)) {
- DEBUG(dbgs() << printMBBReference(*MBB) << " (" << MBB->getFullName()
- << ") instruction " << *I << " clobbers CTR, invalidating "
- << printMBBReference(*BI->getParent()) << " ("
- << BI->getParent()->getFullName() << ") instruction " << *BI
- << "\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " (" << MBB->getFullName()
+ << ") instruction " << *I
+ << " clobbers CTR, invalidating "
+ << printMBBReference(*BI->getParent()) << " ("
+ << BI->getParent()->getFullName() << ") instruction "
+ << *BI << "\n");
return false;
}
@@ -748,10 +750,10 @@ check_block:
if (CheckPreds) {
queue_preds:
if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
- DEBUG(dbgs() << "Unable to find a MTCTR instruction for "
- << printMBBReference(*BI->getParent()) << " ("
- << BI->getParent()->getFullName() << ") instruction " << *BI
- << "\n");
+ LLVM_DEBUG(dbgs() << "Unable to find a MTCTR instruction for "
+ << printMBBReference(*BI->getParent()) << " ("
+ << BI->getParent()->getFullName() << ") instruction "
+ << *BI << "\n");
return false;
}
diff --git a/lib/Target/PowerPC/PPCExpandISEL.cpp b/lib/Target/PowerPC/PPCExpandISEL.cpp
index b00e98b63e34..6151bee61331 100644
--- a/lib/Target/PowerPC/PPCExpandISEL.cpp
+++ b/lib/Target/PowerPC/PPCExpandISEL.cpp
@@ -126,11 +126,11 @@ public:
#endif
bool runOnMachineFunction(MachineFunction &MF) override {
- DEBUG(dbgs() << "Function: "; MF.dump(); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Function: "; MF.dump(); dbgs() << "\n");
initialize(MF);
if (!collectISELInstructions()) {
- DEBUG(dbgs() << "No ISEL instructions in this function\n");
+ LLVM_DEBUG(dbgs() << "No ISEL instructions in this function\n");
return false;
}
@@ -170,9 +170,10 @@ bool PPCExpandISEL::collectISELInstructions() {
#ifndef NDEBUG
void PPCExpandISEL::DumpISELInstructions() const {
for (const auto &I : ISELInstructions) {
- DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(I.first)) << ":\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(I.first))
+ << ":\n");
for (const auto &VI : I.second)
- DEBUG(dbgs() << " "; VI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " "; VI->print(dbgs()));
}
}
#endif
@@ -192,9 +193,10 @@ void PPCExpandISEL::expandAndMergeISELs() {
bool ExpandISELEnabled = isExpandISELEnabled(*MF);
for (auto &BlockList : ISELInstructions) {
- DEBUG(dbgs() << "Expanding ISEL instructions in "
- << printMBBReference(*MF->getBlockNumbered(BlockList.first))
- << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Expanding ISEL instructions in "
+ << printMBBReference(*MF->getBlockNumbered(BlockList.first))
+ << "\n");
BlockISELList &CurrentISELList = BlockList.second;
auto I = CurrentISELList.begin();
auto E = CurrentISELList.end();
@@ -210,7 +212,8 @@ void PPCExpandISEL::expandAndMergeISELs() {
// as it would be ISEL %R0, %ZERO, %R0, %CRN.
if (useSameRegister(Dest, TrueValue) &&
useSameRegister(Dest, FalseValue)) {
- DEBUG(dbgs() << "Remove redudant ISEL instruction: " << **I << "\n");
+ LLVM_DEBUG(dbgs() << "Remove redudant ISEL instruction: " << **I
+ << "\n");
// FIXME: if the CR field used has no other uses, we could eliminate the
// instruction that defines it. This would have to be done manually
// since this pass runs too late to run DCE after it.
@@ -223,8 +226,9 @@ void PPCExpandISEL::expandAndMergeISELs() {
// condition as it would be ISEL %RX, %ZERO, %R0, %CRN, which makes it
// safe to fold ISEL to MR(OR) instead of ADDI.
MachineBasicBlock *MBB = (*I)->getParent();
- DEBUG(dbgs() << "Fold the ISEL instruction to an unconditonal copy:\n");
- DEBUG(dbgs() << "ISEL: " << **I << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Fold the ISEL instruction to an unconditonal copy:\n");
+ LLVM_DEBUG(dbgs() << "ISEL: " << **I << "\n");
NumFolded++;
// Note: we're using both the TrueValue and FalseValue operands so as
// not to lose the kill flag if it is set on either of them.
@@ -235,8 +239,8 @@ void PPCExpandISEL::expandAndMergeISELs() {
(*I)->eraseFromParent();
I++;
} else if (ExpandISELEnabled) { // Normal cases expansion enabled
- DEBUG(dbgs() << "Expand ISEL instructions:\n");
- DEBUG(dbgs() << "ISEL: " << **I << "\n");
+ LLVM_DEBUG(dbgs() << "Expand ISEL instructions:\n");
+ LLVM_DEBUG(dbgs() << "ISEL: " << **I << "\n");
BlockISELList SubISELList;
SubISELList.push_back(*I++);
// Collect the ISELs that can be merged together.
@@ -244,7 +248,7 @@ void PPCExpandISEL::expandAndMergeISELs() {
// may be redundant or foldable to a register copy. So we still keep
// the handleSpecialCases() downstream to handle them.
while (I != E && canMerge(SubISELList.back(), *I)) {
- DEBUG(dbgs() << "ISEL: " << **I << "\n");
+ LLVM_DEBUG(dbgs() << "ISEL: " << **I << "\n");
SubISELList.push_back(*I++);
}
@@ -264,7 +268,7 @@ void PPCExpandISEL::handleSpecialCases(BlockISELList &BIL,
auto MI = BIL.begin();
while (MI != BIL.end()) {
assert(isISEL(**MI) && "Expecting an ISEL instruction");
- DEBUG(dbgs() << "ISEL: " << **MI << "\n");
+ LLVM_DEBUG(dbgs() << "ISEL: " << **MI << "\n");
MachineOperand &Dest = (*MI)->getOperand(0);
MachineOperand &TrueValue = (*MI)->getOperand(1);
@@ -281,7 +285,7 @@ void PPCExpandISEL::handleSpecialCases(BlockISELList &BIL,
// Special case 1, all registers used by ISEL are the same one.
if (!IsADDIInstRequired && !IsORIInstRequired) {
- DEBUG(dbgs() << "Remove redudant ISEL instruction.");
+ LLVM_DEBUG(dbgs() << "Remove redudant ISEL instruction.");
// FIXME: if the CR field used has no other uses, we could eliminate the
// instruction that defines it. This would have to be done manually
// since this pass runs too late to run DCE after it.
@@ -300,7 +304,8 @@ void PPCExpandISEL::handleSpecialCases(BlockISELList &BIL,
// be zero. In this case, the useSameRegister method will return false,
// thereby preventing this ISEL from being folded.
if (useSameRegister(TrueValue, FalseValue) && (BIL.size() == 1)) {
- DEBUG(dbgs() << "Fold the ISEL instruction to an unconditonal copy.");
+ LLVM_DEBUG(
+ dbgs() << "Fold the ISEL instruction to an unconditonal copy.");
NumFolded++;
// Note: we're using both the TrueValue and FalseValue operands so as
// not to lose the kill flag if it is set on either of them.
@@ -439,11 +444,10 @@ void PPCExpandISEL::populateBlocks(BlockISELList &BIL) {
// condition is false
MachineOperand &ConditionRegister = MI->getOperand(3); // Condition
- DEBUG(dbgs() << "Dest: " << Dest << "\n");
- DEBUG(dbgs() << "TrueValue: " << TrueValue << "\n");
- DEBUG(dbgs() << "FalseValue: " << FalseValue << "\n");
- DEBUG(dbgs() << "ConditionRegister: " << ConditionRegister << "\n");
-
+ LLVM_DEBUG(dbgs() << "Dest: " << Dest << "\n");
+ LLVM_DEBUG(dbgs() << "TrueValue: " << TrueValue << "\n");
+ LLVM_DEBUG(dbgs() << "FalseValue: " << FalseValue << "\n");
+ LLVM_DEBUG(dbgs() << "ConditionRegister: " << ConditionRegister << "\n");
// If the Dest Register and True Value Register are not the same one, we
// need the True Block.
diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 4c718f8883ef..551220466901 100644
--- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -180,9 +180,9 @@ void PPCDispatchGroupSBHazardRecognizer::EmitInstruction(SUnit *SU) {
CurGroup.clear();
CurSlots = CurBranches = 0;
} else {
- DEBUG(dbgs() << "**** Adding to dispatch group: SU(" <<
- SU->NodeNum << "): ");
- DEBUG(DAG->dumpNode(SU));
+ LLVM_DEBUG(dbgs() << "**** Adding to dispatch group: SU(" << SU->NodeNum
+ << "): ");
+ LLVM_DEBUG(DAG->dumpNode(SU));
unsigned NSlots;
bool MustBeFirst = mustComeFirst(MCID, NSlots);
@@ -268,7 +268,7 @@ PPCHazardRecognizer970::PPCHazardRecognizer970(const ScheduleDAG &DAG)
}
void PPCHazardRecognizer970::EndDispatchGroup() {
- DEBUG(errs() << "=== Start of dispatch group\n");
+ LLVM_DEBUG(errs() << "=== Start of dispatch group\n");
NumIssued = 0;
// Structural hazard info.
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index b4818731cfd0..518defef3736 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -518,10 +518,10 @@ static unsigned getBranchHint(unsigned PCC, FunctionLoweringInfo *FuncInfo,
if (std::max(TProb, FProb) / Threshold < std::min(TProb, FProb))
return PPC::BR_NO_HINT;
- DEBUG(dbgs() << "Use branch hint for '" << FuncInfo->Fn->getName() << "::"
- << BB->getName() << "'\n"
- << " -> " << TBB->getName() << ": " << TProb << "\n"
- << " -> " << FBB->getName() << ": " << FProb << "\n");
+ LLVM_DEBUG(dbgs() << "Use branch hint for '" << FuncInfo->Fn->getName()
+ << "::" << BB->getName() << "'\n"
+ << " -> " << TBB->getName() << ": " << TProb << "\n"
+ << " -> " << FBB->getName() << ": " << FProb << "\n");
const BasicBlockSDNode *BBDN = cast<BasicBlockSDNode>(DestMBB);
@@ -1131,8 +1131,8 @@ class BitPermutationSelector {
BitGroup(SDValue V, unsigned R, unsigned S, unsigned E)
: V(V), RLAmt(R), StartIdx(S), EndIdx(E), Repl32(false), Repl32CR(false),
Repl32Coalesced(false) {
- DEBUG(dbgs() << "\tbit group for " << V.getNode() << " RLAmt = " << R <<
- " [" << S << ", " << E << "]\n");
+ LLVM_DEBUG(dbgs() << "\tbit group for " << V.getNode() << " RLAmt = " << R
+ << " [" << S << ", " << E << "]\n");
}
};
@@ -1366,7 +1366,7 @@ class BitPermutationSelector {
BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 &&
BitGroups[0].V == BitGroups[BitGroups.size()-1].V &&
BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) {
- DEBUG(dbgs() << "\tcombining final bit group with initial one\n");
+ LLVM_DEBUG(dbgs() << "\tcombining final bit group with initial one\n");
BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx;
BitGroups.erase(BitGroups.begin());
}
@@ -1453,9 +1453,9 @@ class BitPermutationSelector {
BG.Repl32 = true;
- DEBUG(dbgs() << "\t32-bit replicated bit group for " <<
- BG.V.getNode() << " RLAmt = " << BG.RLAmt <<
- " [" << BG.StartIdx << ", " << BG.EndIdx << "]\n");
+ LLVM_DEBUG(dbgs() << "\t32-bit replicated bit group for "
+ << BG.V.getNode() << " RLAmt = " << BG.RLAmt << " ["
+ << BG.StartIdx << ", " << BG.EndIdx << "]\n");
}
}
}
@@ -1469,11 +1469,11 @@ class BitPermutationSelector {
if (I->Repl32 && IP->Repl32 && I->V == IP->V && I->RLAmt == IP->RLAmt &&
I->StartIdx == (IP->EndIdx + 1) % 64 && I != IP) {
- DEBUG(dbgs() << "\tcombining 32-bit replicated bit group for " <<
- I->V.getNode() << " RLAmt = " << I->RLAmt <<
- " [" << I->StartIdx << ", " << I->EndIdx <<
- "] with group with range [" <<
- IP->StartIdx << ", " << IP->EndIdx << "]\n");
+ LLVM_DEBUG(dbgs() << "\tcombining 32-bit replicated bit group for "
+ << I->V.getNode() << " RLAmt = " << I->RLAmt << " ["
+ << I->StartIdx << ", " << I->EndIdx
+ << "] with group with range [" << IP->StartIdx << ", "
+ << IP->EndIdx << "]\n");
IP->EndIdx = I->EndIdx;
IP->Repl32CR = IP->Repl32CR || I->Repl32CR;
@@ -1497,12 +1497,12 @@ class BitPermutationSelector {
IP->EndIdx == 31 && IN->StartIdx == 0 && I != IP &&
IsAllLow32(*I)) {
- DEBUG(dbgs() << "\tcombining bit group for " <<
- I->V.getNode() << " RLAmt = " << I->RLAmt <<
- " [" << I->StartIdx << ", " << I->EndIdx <<
- "] with 32-bit replicated groups with ranges [" <<
- IP->StartIdx << ", " << IP->EndIdx << "] and [" <<
- IN->StartIdx << ", " << IN->EndIdx << "]\n");
+ LLVM_DEBUG(dbgs() << "\tcombining bit group for " << I->V.getNode()
+ << " RLAmt = " << I->RLAmt << " [" << I->StartIdx
+ << ", " << I->EndIdx
+ << "] with 32-bit replicated groups with ranges ["
+ << IP->StartIdx << ", " << IP->EndIdx << "] and ["
+ << IN->StartIdx << ", " << IN->EndIdx << "]\n");
if (IP == IN) {
// There is only one other group; change it to cover the whole
@@ -1611,15 +1611,15 @@ class BitPermutationSelector {
(unsigned) (ANDIMask != 0 && ANDISMask != 0) +
(unsigned) (bool) Res;
- DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() <<
- " RL: " << VRI.RLAmt << ":" <<
- "\n\t\t\tisel using masking: " << NumAndInsts <<
- " using rotates: " << VRI.NumGroups << "\n");
+ LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode()
+ << " RL: " << VRI.RLAmt << ":"
+ << "\n\t\t\tisel using masking: " << NumAndInsts
+ << " using rotates: " << VRI.NumGroups << "\n");
if (NumAndInsts >= VRI.NumGroups)
continue;
- DEBUG(dbgs() << "\t\t\t\tusing masking\n");
+ LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n");
if (InstCnt) *InstCnt += NumAndInsts;
@@ -1967,10 +1967,10 @@ class BitPermutationSelector {
FirstBG = false;
}
- DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() <<
- " RL: " << VRI.RLAmt << (VRI.Repl32 ? " (32):" : ":") <<
- "\n\t\t\tisel using masking: " << NumAndInsts <<
- " using rotates: " << NumRLInsts << "\n");
+ LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode()
+ << " RL: " << VRI.RLAmt << (VRI.Repl32 ? " (32):" : ":")
+ << "\n\t\t\tisel using masking: " << NumAndInsts
+ << " using rotates: " << NumRLInsts << "\n");
// When we'd use andi/andis, we bias toward using the rotates (andi only
// has a record form, and is cracked on POWER cores). However, when using
@@ -1984,7 +1984,7 @@ class BitPermutationSelector {
if ((Use32BitInsts || MoreBG) && NumAndInsts == NumRLInsts)
continue;
- DEBUG(dbgs() << "\t\t\t\tusing masking\n");
+ LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n");
if (InstCnt) *InstCnt += NumAndInsts;
@@ -2235,9 +2235,9 @@ public:
return nullptr;
Bits = std::move(*Result.second);
- DEBUG(dbgs() << "Considering bit-permutation-based instruction"
- " selection for: ");
- DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "Considering bit-permutation-based instruction"
+ " selection for: ");
+ LLVM_DEBUG(N->dump(CurDAG));
// Fill it RLAmt and set HasZeros.
computeRotationAmounts();
@@ -2254,21 +2254,21 @@ public:
// masking, we only insert the non-zero parts of the result at every step.
unsigned InstCnt, InstCntLateMask;
- DEBUG(dbgs() << "\tEarly masking:\n");
+ LLVM_DEBUG(dbgs() << "\tEarly masking:\n");
SDNode *RN = Select(N, false, &InstCnt);
- DEBUG(dbgs() << "\t\tisel would use " << InstCnt << " instructions\n");
+ LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCnt << " instructions\n");
- DEBUG(dbgs() << "\tLate masking:\n");
+ LLVM_DEBUG(dbgs() << "\tLate masking:\n");
SDNode *RNLM = Select(N, true, &InstCntLateMask);
- DEBUG(dbgs() << "\t\tisel would use " << InstCntLateMask <<
- " instructions\n");
+ LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCntLateMask
+ << " instructions\n");
if (InstCnt <= InstCntLateMask) {
- DEBUG(dbgs() << "\tUsing early-masking for isel\n");
+ LLVM_DEBUG(dbgs() << "\tUsing early-masking for isel\n");
return RN;
}
- DEBUG(dbgs() << "\tUsing late-masking for isel\n");
+ LLVM_DEBUG(dbgs() << "\tUsing late-masking for isel\n");
return RNLM;
}
};
@@ -5190,11 +5190,11 @@ void PPCDAGToDAGISel::PreprocessISelDAG() {
foldBoolExts(Res, N);
if (Res) {
- DEBUG(dbgs() << "PPC DAG preprocessing replacing:\nOld: ");
- DEBUG(N->dump(CurDAG));
- DEBUG(dbgs() << "\nNew: ");
- DEBUG(Res.getNode()->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "PPC DAG preprocessing replacing:\nOld: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nNew: ");
+ LLVM_DEBUG(Res.getNode()->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
MadeChange = true;
@@ -5271,13 +5271,13 @@ void PPCDAGToDAGISel::SwapAllSelectUsers(SDNode *N) {
User->getOperand(2),
User->getOperand(1));
- DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
- DEBUG(User->dump(CurDAG));
- DEBUG(dbgs() << "\nNew: ");
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
+ LLVM_DEBUG(User->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nNew: ");
+ LLVM_DEBUG(ResNode->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
- ReplaceUses(User, ResNode);
+ ReplaceUses(User, ResNode);
}
}
@@ -5685,11 +5685,11 @@ void PPCDAGToDAGISel::PeepholeCROps() {
SwapAllSelectUsers(MachineNode);
if (ResNode != MachineNode) {
- DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
- DEBUG(MachineNode->dump(CurDAG));
- DEBUG(dbgs() << "\nNew: ");
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "CR Peephole replacing:\nOld: ");
+ LLVM_DEBUG(MachineNode->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nNew: ");
+ LLVM_DEBUG(ResNode->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
ReplaceUses(MachineNode, ResNode);
IsModified = true;
@@ -5984,25 +5984,25 @@ void PPCDAGToDAGISel::PeepholePPC64ZExt() {
else
NewVTs.push_back(VTs.VTs[i]);
- DEBUG(dbgs() << "PPC64 ZExt Peephole morphing:\nOld: ");
- DEBUG(PN->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "PPC64 ZExt Peephole morphing:\nOld: ");
+ LLVM_DEBUG(PN->dump(CurDAG));
CurDAG->SelectNodeTo(PN, NewOpcode, CurDAG->getVTList(NewVTs), Ops);
- DEBUG(dbgs() << "\nNew: ");
- DEBUG(PN->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\nNew: ");
+ LLVM_DEBUG(PN->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
}
// Now we replace the original zero extend and its associated INSERT_SUBREG
// with the value feeding the INSERT_SUBREG (which has now been promoted to
// return an i64).
- DEBUG(dbgs() << "PPC64 ZExt Peephole replacing:\nOld: ");
- DEBUG(N->dump(CurDAG));
- DEBUG(dbgs() << "\nNew: ");
- DEBUG(Op32.getNode()->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "PPC64 ZExt Peephole replacing:\nOld: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nNew: ");
+ LLVM_DEBUG(Op32.getNode()->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
ReplaceUses(N, Op32.getNode());
}
@@ -6177,11 +6177,11 @@ void PPCDAGToDAGISel::PeepholePPC64() {
// immediate and substitute them into the load or store. If
// needed, update the target flags for the immediate operand to
// reflect the necessary relocation information.
- DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
- DEBUG(Base->dump(CurDAG));
- DEBUG(dbgs() << "\nN: ");
- DEBUG(N->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
+ LLVM_DEBUG(Base->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nN: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
// If the relocation information isn't already present on the
// immediate operand, add it now.
@@ -6194,7 +6194,7 @@ void PPCDAGToDAGISel::PeepholePPC64() {
if (GV->getAlignment() < 4 &&
(StorageOpcode == PPC::LD || StorageOpcode == PPC::STD ||
StorageOpcode == PPC::LWA || (Offset % 4) != 0)) {
- DEBUG(dbgs() << "Rejected this candidate for alignment.\n\n");
+ LLVM_DEBUG(dbgs() << "Rejected this candidate for alignment.\n\n");
continue;
}
ImmOpnd = CurDAG->getTargetGlobalAddress(GV, dl, MVT::i64, Offset, Flags);
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index abd7513a3918..1f787994d9d2 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -5133,15 +5133,15 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
assert(isa<GlobalAddressSDNode>(Callee) &&
"Callee should be an llvm::Function object.");
- DEBUG(
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
- const unsigned Width = 80 - strlen("TCO caller: ")
- - strlen(", callee linkage: 0, 0");
- dbgs() << "TCO caller: "
- << left_justify(DAG.getMachineFunction().getName(), Width)
- << ", callee linkage: "
- << GV->getVisibility() << ", " << GV->getLinkage() << "\n"
- );
+ LLVM_DEBUG(
+ const GlobalValue *GV =
+ cast<GlobalAddressSDNode>(Callee)->getGlobal();
+ const unsigned Width =
+ 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0");
+ dbgs() << "TCO caller: "
+ << left_justify(DAG.getMachineFunction().getName(), Width)
+ << ", callee linkage: " << GV->getVisibility() << ", "
+ << GV->getLinkage() << "\n");
}
}
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index 4ec98e2da985..c095ccbc1ee6 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2418,16 +2418,17 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
CompareUseMI.RemoveOperand(2);
continue;
}
- DEBUG(dbgs() << "Found LI -> CMPI -> ISEL, replacing with a copy.\n");
- DEBUG(DefMI->dump(); MI.dump(); CompareUseMI.dump());
- DEBUG(dbgs() << "Is converted to:\n");
+ LLVM_DEBUG(
+ dbgs() << "Found LI -> CMPI -> ISEL, replacing with a copy.\n");
+ LLVM_DEBUG(DefMI->dump(); MI.dump(); CompareUseMI.dump());
+ LLVM_DEBUG(dbgs() << "Is converted to:\n");
// Convert to copy and remove unneeded operands.
CompareUseMI.setDesc(get(PPC::COPY));
CompareUseMI.RemoveOperand(3);
CompareUseMI.RemoveOperand(RegToCopy == TrueReg ? 2 : 1);
CmpIselsConverted++;
Changed = true;
- DEBUG(CompareUseMI.dump());
+ LLVM_DEBUG(CompareUseMI.dump());
}
if (Changed)
return true;
@@ -2528,10 +2529,10 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
}
if (ReplaceWithLI) {
- DEBUG(dbgs() << "Replacing instruction:\n");
- DEBUG(MI.dump());
- DEBUG(dbgs() << "Fed by:\n");
- DEBUG(DefMI->dump());
+ LLVM_DEBUG(dbgs() << "Replacing instruction:\n");
+ LLVM_DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Fed by:\n");
+ LLVM_DEBUG(DefMI->dump());
LoadImmediateInfo LII;
LII.Imm = NewImm;
LII.Is64Bit = Is64BitLI;
@@ -2541,8 +2542,8 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
if (KilledDef && SetCR)
*KilledDef = nullptr;
replaceInstrWithLI(MI, LII);
- DEBUG(dbgs() << "With:\n");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "With:\n");
+ LLVM_DEBUG(MI.dump());
return true;
}
return false;
diff --git a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
index 6bfccddda65e..c360fca91c80 100644
--- a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
+++ b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
@@ -247,7 +247,7 @@ bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
if (!L->empty())
return MadeChange;
- DEBUG(dbgs() << "PIP: Examining: " << *L << "\n");
+ LLVM_DEBUG(dbgs() << "PIP: Examining: " << *L << "\n");
BasicBlock *Header = L->getHeader();
@@ -332,7 +332,7 @@ bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
if (!LoopPredecessor)
return MadeChange;
- DEBUG(dbgs() << "PIP: Found " << Buckets.size() << " buckets\n");
+ LLVM_DEBUG(dbgs() << "PIP: Found " << Buckets.size() << " buckets\n");
SmallSet<BasicBlock *, 16> BBChanged;
for (unsigned i = 0, e = Buckets.size(); i != e; ++i) {
@@ -381,7 +381,7 @@ bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
if (!BasePtrSCEV->isAffine())
continue;
- DEBUG(dbgs() << "PIP: Transforming: " << *BasePtrSCEV << "\n");
+ LLVM_DEBUG(dbgs() << "PIP: Transforming: " << *BasePtrSCEV << "\n");
assert(BasePtrSCEV->getLoop() == L &&
"AddRec for the wrong loop?");
@@ -407,7 +407,7 @@ bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
if (!isSafeToExpand(BasePtrStartSCEV, *SE))
continue;
- DEBUG(dbgs() << "PIP: New start is: " << *BasePtrStartSCEV << "\n");
+ LLVM_DEBUG(dbgs() << "PIP: New start is: " << *BasePtrStartSCEV << "\n");
if (alreadyPrepared(L, MemI, BasePtrStartSCEV, BasePtrIncSCEV))
continue;
diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp
index 6f44e3adcfe0..79bba982858c 100644
--- a/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -119,8 +119,8 @@ void PPCMIPeephole::initialize(MachineFunction &MFParm) {
MRI = &MF->getRegInfo();
MDT = &getAnalysis<MachineDominatorTree>();
TII = MF->getSubtarget<PPCSubtarget>().getInstrInfo();
- DEBUG(dbgs() << "*** PowerPC MI peephole pass ***\n\n");
- DEBUG(MF->dump());
+ LLVM_DEBUG(dbgs() << "*** PowerPC MI peephole pass ***\n\n");
+ LLVM_DEBUG(MF->dump());
}
static MachineInstr *getVRegDefOrNull(MachineOperand *Op,
@@ -238,8 +238,8 @@ bool PPCMIPeephole::simplifyCode(void) {
if (TII->convertToImmediateForm(MI)) {
// We don't erase anything in case the def has other uses. Let DCE
// remove it if it can be removed.
- DEBUG(dbgs() << "Converted instruction to imm form: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Converted instruction to imm form: ");
+ LLVM_DEBUG(MI.dump());
NumConvertedToImmediateForm++;
SomethingChanged = true;
Simplified = true;
@@ -324,10 +324,9 @@ bool PPCMIPeephole::simplifyCode(void) {
};
if (DefMI && (Immed == 0 || Immed == 3)) {
if (DefOpc == PPC::LXVDSX || isConversionOfLoadAndSplat()) {
- DEBUG(dbgs()
- << "Optimizing load-and-splat/splat "
- "to load-and-splat/copy: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Optimizing load-and-splat/splat "
+ "to load-and-splat/copy: ");
+ LLVM_DEBUG(MI.dump());
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
MI.getOperand(0).getReg())
.add(MI.getOperand(1));
@@ -346,10 +345,9 @@ bool PPCMIPeephole::simplifyCode(void) {
TRI->lookThruCopyLike(DefMI->getOperand(2).getReg(), MRI);
if ((FeedImmed == 0 || FeedImmed == 3) && FeedReg1 == FeedReg2) {
- DEBUG(dbgs()
- << "Optimizing splat/swap or splat/splat "
- "to splat/copy: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Optimizing splat/swap or splat/splat "
+ "to splat/copy: ");
+ LLVM_DEBUG(MI.dump());
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
MI.getOperand(0).getReg())
.add(MI.getOperand(1));
@@ -362,8 +360,8 @@ bool PPCMIPeephole::simplifyCode(void) {
// parameter.
else if ((Immed == 0 || Immed == 3)
&& FeedImmed == 2 && FeedReg1 == FeedReg2) {
- DEBUG(dbgs() << "Optimizing swap/splat => splat: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Optimizing swap/splat => splat: ");
+ LLVM_DEBUG(MI.dump());
MI.getOperand(1).setReg(DefMI->getOperand(1).getReg());
MI.getOperand(2).setReg(DefMI->getOperand(2).getReg());
MI.getOperand(3).setImm(3 - Immed);
@@ -373,8 +371,8 @@ bool PPCMIPeephole::simplifyCode(void) {
// If this is a swap fed by a swap, we can replace it
// with a copy from the first swap's input.
else if (Immed == 2 && FeedImmed == 2 && FeedReg1 == FeedReg2) {
- DEBUG(dbgs() << "Optimizing swap/swap => copy: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Optimizing swap/swap => copy: ");
+ LLVM_DEBUG(MI.dump());
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
MI.getOperand(0).getReg())
.add(DefMI->getOperand(1));
@@ -389,8 +387,8 @@ bool PPCMIPeephole::simplifyCode(void) {
DefMI->getOperand(0).setReg(MI.getOperand(0).getReg());
ToErase = &MI;
Simplified = true;
- DEBUG(dbgs() << "Removing redundant splat: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Removing redundant splat: ");
+ LLVM_DEBUG(MI.dump());
}
}
}
@@ -429,8 +427,8 @@ bool PPCMIPeephole::simplifyCode(void) {
// If the instruction[s] that feed this splat have already splat
// the value, this splat is redundant.
if (AlreadySplat) {
- DEBUG(dbgs() << "Changing redundant splat to a copy: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Changing redundant splat to a copy: ");
+ LLVM_DEBUG(MI.dump());
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
MI.getOperand(0).getReg())
.add(MI.getOperand(OpNo));
@@ -448,14 +446,14 @@ bool PPCMIPeephole::simplifyCode(void) {
if (ShiftOp1 == ShiftOp2) {
unsigned NewElem = (SplatImm + ShiftImm) & 0x3;
if (MRI->hasOneNonDBGUse(ShiftRes)) {
- DEBUG(dbgs() << "Removing redundant shift: ");
- DEBUG(DefMI->dump());
+ LLVM_DEBUG(dbgs() << "Removing redundant shift: ");
+ LLVM_DEBUG(DefMI->dump());
ToErase = DefMI;
}
Simplified = true;
- DEBUG(dbgs() << "Changing splat immediate from " << SplatImm <<
- " to " << NewElem << " in instruction: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Changing splat immediate from " << SplatImm
+ << " to " << NewElem << " in instruction: ");
+ LLVM_DEBUG(MI.dump());
MI.getOperand(1).setReg(ShiftOp1);
MI.getOperand(2).setImm(NewElem);
}
@@ -499,12 +497,12 @@ bool PPCMIPeephole::simplifyCode(void) {
if (Use.getOperand(i).isReg() &&
Use.getOperand(i).getReg() == FRSPDefines)
Use.getOperand(i).setReg(ConvReg1);
- DEBUG(dbgs() << "Removing redundant FRSP:\n");
- DEBUG(RoundInstr->dump());
- DEBUG(dbgs() << "As it feeds instruction:\n");
- DEBUG(MI.dump());
- DEBUG(dbgs() << "Through instruction:\n");
- DEBUG(DefMI->dump());
+ LLVM_DEBUG(dbgs() << "Removing redundant FRSP:\n");
+ LLVM_DEBUG(RoundInstr->dump());
+ LLVM_DEBUG(dbgs() << "As it feeds instruction:\n");
+ LLVM_DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Through instruction:\n");
+ LLVM_DEBUG(DefMI->dump());
RoundInstr->eraseFromParent();
}
};
@@ -552,11 +550,11 @@ bool PPCMIPeephole::simplifyCode(void) {
};
unsigned Opc = getSextLoadOp(is64Bit(MI.getOpcode()),
isXForm(SrcMI->getOpcode()));
- DEBUG(dbgs() << "Zero-extending load\n");
- DEBUG(SrcMI->dump());
- DEBUG(dbgs() << "and sign-extension\n");
- DEBUG(MI.dump());
- DEBUG(dbgs() << "are merged into sign-extending load\n");
+ LLVM_DEBUG(dbgs() << "Zero-extending load\n");
+ LLVM_DEBUG(SrcMI->dump());
+ LLVM_DEBUG(dbgs() << "and sign-extension\n");
+ LLVM_DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "are merged into sign-extending load\n");
SrcMI->setDesc(TII->get(Opc));
SrcMI->getOperand(0).setReg(MI.getOperand(0).getReg());
ToErase = &MI;
@@ -596,11 +594,11 @@ bool PPCMIPeephole::simplifyCode(void) {
};
unsigned Opc = getSextLoadOp(is64Bit(MI.getOpcode()),
isXForm(SrcMI->getOpcode()));
- DEBUG(dbgs() << "Zero-extending load\n");
- DEBUG(SrcMI->dump());
- DEBUG(dbgs() << "and sign-extension\n");
- DEBUG(MI.dump());
- DEBUG(dbgs() << "are merged into sign-extending load\n");
+ LLVM_DEBUG(dbgs() << "Zero-extending load\n");
+ LLVM_DEBUG(SrcMI->dump());
+ LLVM_DEBUG(dbgs() << "and sign-extension\n");
+ LLVM_DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "are merged into sign-extending load\n");
SrcMI->setDesc(TII->get(Opc));
SrcMI->getOperand(0).setReg(MI.getOperand(0).getReg());
ToErase = &MI;
@@ -610,7 +608,7 @@ bool PPCMIPeephole::simplifyCode(void) {
TII->isSignExtended(*SrcMI)) {
// We can eliminate EXTSW if the input is known to be already
// sign-extended.
- DEBUG(dbgs() << "Removing redundant sign-extension\n");
+ LLVM_DEBUG(dbgs() << "Removing redundant sign-extension\n");
unsigned TmpReg =
MF->getRegInfo().createVirtualRegister(&PPC::G8RCRegClass);
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::IMPLICIT_DEF),
@@ -661,7 +659,7 @@ bool PPCMIPeephole::simplifyCode(void) {
unsigned KnownZeroCount = getKnownLeadingZeroCount(SrcMI, TII);
if (MI.getOperand(3).getImm() <= KnownZeroCount) {
- DEBUG(dbgs() << "Removing redundant zero-extension\n");
+ LLVM_DEBUG(dbgs() << "Removing redundant zero-extension\n");
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
MI.getOperand(0).getReg())
.addReg(SrcReg);
@@ -727,8 +725,8 @@ bool PPCMIPeephole::simplifyCode(void) {
MachineInstr *DefPhiMI = getVRegDefOrNull(&Op1, MRI);
for (unsigned i = 1; i < DefPhiMI->getNumOperands(); i += 2) {
MachineInstr *LiMI = getVRegDefOrNull(&DefPhiMI->getOperand(i), MRI);
- DEBUG(dbgs() << "Optimizing LI to ADDI: ");
- DEBUG(LiMI->dump());
+ LLVM_DEBUG(dbgs() << "Optimizing LI to ADDI: ");
+ LLVM_DEBUG(LiMI->dump());
// There could be repeated registers in the PHI, e.g: %1 =
// PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've
@@ -746,12 +744,12 @@ bool PPCMIPeephole::simplifyCode(void) {
MachineInstrBuilder(*LiMI->getParent()->getParent(), *LiMI)
.addReg(DominatorReg)
.addImm(LiImm); // restore the imm of LI
- DEBUG(LiMI->dump());
+ LLVM_DEBUG(LiMI->dump());
}
// Replace ADD with COPY
- DEBUG(dbgs() << "Optimizing ADD to COPY: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Optimizing ADD to COPY: ");
+ LLVM_DEBUG(MI.dump());
BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY),
MI.getOperand(0).getReg())
.add(Op1);
@@ -1197,11 +1195,11 @@ bool PPCMIPeephole::eliminateRedundantCompare(void) {
continue;
}
- DEBUG(dbgs() << "Optimize two pairs of compare and branch:\n");
- DEBUG(CMPI1->dump());
- DEBUG(BI1->dump());
- DEBUG(CMPI2->dump());
- DEBUG(BI2->dump());
+ LLVM_DEBUG(dbgs() << "Optimize two pairs of compare and branch:\n");
+ LLVM_DEBUG(CMPI1->dump());
+ LLVM_DEBUG(BI1->dump());
+ LLVM_DEBUG(CMPI2->dump());
+ LLVM_DEBUG(BI2->dump());
// We adjust opcode, predicates and immediate as we determined above.
if (NewOpCode != 0 && NewOpCode != CMPI1->getOpcode()) {
@@ -1260,15 +1258,15 @@ bool PPCMIPeephole::eliminateRedundantCompare(void) {
BI2->getOperand(1).setIsKill(true);
BI1->getOperand(1).setIsKill(false);
- DEBUG(dbgs() << "into a compare and two branches:\n");
- DEBUG(CMPI1->dump());
- DEBUG(BI1->dump());
- DEBUG(BI2->dump());
+ LLVM_DEBUG(dbgs() << "into a compare and two branches:\n");
+ LLVM_DEBUG(CMPI1->dump());
+ LLVM_DEBUG(BI1->dump());
+ LLVM_DEBUG(BI2->dump());
if (IsPartiallyRedundant) {
- DEBUG(dbgs() << "The following compare is moved into "
- << printMBBReference(*MBBtoMoveCmp)
- << " to handle partial redundancy.\n");
- DEBUG(CMPI2->dump());
+ LLVM_DEBUG(dbgs() << "The following compare is moved into "
+ << printMBBReference(*MBBtoMoveCmp)
+ << " to handle partial redundancy.\n");
+ LLVM_DEBUG(CMPI2->dump());
}
Simplified = true;
diff --git a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
index d524c354ed35..1892d1e3dc26 100644
--- a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
+++ b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
@@ -67,8 +67,8 @@ namespace {
if (TII->convertToImmediateForm(MI, &DefMIToErase)) {
Changed = true;
NumRRConvertedInPreEmit++;
- DEBUG(dbgs() << "Converted instruction to imm form: ");
- DEBUG(MI.dump());
+ LLVM_DEBUG(dbgs() << "Converted instruction to imm form: ");
+ LLVM_DEBUG(MI.dump());
if (DefMIToErase) {
InstrsToErase.push_back(DefMIToErase);
}
@@ -76,8 +76,8 @@ namespace {
}
}
for (MachineInstr *MI : InstrsToErase) {
- DEBUG(dbgs() << "PPC pre-emit peephole: erasing instruction: ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << "PPC pre-emit peephole: erasing instruction: ");
+ LLVM_DEBUG(MI->dump());
MI->eraseFromParent();
NumRemovedInPreEmit++;
}
diff --git a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
index 76e25af94492..d6d948e9e1ee 100644
--- a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
+++ b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
@@ -150,8 +150,9 @@ static bool splitMBB(BlockSplitInfo &BSI) {
MachineRegisterInfo *MRI = &MF->getRegInfo();
assert(MRI->isSSA() && "Can only do this while the function is in SSA form.");
if (ThisMBB->succ_size() != 2) {
- DEBUG(dbgs() << "Don't know how to handle blocks that don't have exactly"
- << " two succesors.\n");
+ LLVM_DEBUG(
+ dbgs() << "Don't know how to handle blocks that don't have exactly"
+ << " two succesors.\n");
return false;
}
@@ -218,9 +219,9 @@ static bool splitMBB(BlockSplitInfo &BSI) {
}
addIncomingValuesToPHIs(NewBRTarget, ThisMBB, NewMBB, MRI);
- DEBUG(dbgs() << "After splitting, ThisMBB:\n"; ThisMBB->dump());
- DEBUG(dbgs() << "NewMBB:\n"; NewMBB->dump());
- DEBUG(dbgs() << "New branch-to block:\n"; NewBRTarget->dump());
+ LLVM_DEBUG(dbgs() << "After splitting, ThisMBB:\n"; ThisMBB->dump());
+ LLVM_DEBUG(dbgs() << "NewMBB:\n"; NewMBB->dump());
+ LLVM_DEBUG(dbgs() << "New branch-to block:\n"; NewBRTarget->dump());
return true;
}
@@ -491,7 +492,7 @@ PPCReduceCRLogicals::createCRLogicalOpInfo(MachineInstr &MIParam) {
Ret.ContainedInBlock &=
(MIParam.getParent() == Ret.TrueDefs.second->getParent());
}
- DEBUG(Ret.dump());
+ LLVM_DEBUG(Ret.dump());
if (Ret.IsBinary && Ret.ContainedInBlock && Ret.SingleUse) {
NumContainedSingleUseBinOps++;
if (Ret.FeedsBR && Ret.DefsSingleUse)
@@ -585,14 +586,15 @@ bool PPCReduceCRLogicals::handleCROp(CRLogicalOpInfo &CRI) {
/// BC %vr9<kill>, <BB#2>; CRBITRC:%vr9
bool PPCReduceCRLogicals::splitBlockOnBinaryCROp(CRLogicalOpInfo &CRI) {
if (CRI.CopyDefs.first == CRI.CopyDefs.second) {
- DEBUG(dbgs() << "Unable to split as the two operands are the same\n");
+ LLVM_DEBUG(dbgs() << "Unable to split as the two operands are the same\n");
NumNotSplitIdenticalOperands++;
return false;
}
if (CRI.TrueDefs.first->isCopy() || CRI.TrueDefs.second->isCopy() ||
CRI.TrueDefs.first->isPHI() || CRI.TrueDefs.second->isPHI()) {
- DEBUG(dbgs() << "Unable to split because one of the operands is a PHI or "
- "chain of copies.\n");
+ LLVM_DEBUG(
+ dbgs() << "Unable to split because one of the operands is a PHI or "
+ "chain of copies.\n");
NumNotSplitChainCopies++;
return false;
}
@@ -603,11 +605,11 @@ bool PPCReduceCRLogicals::splitBlockOnBinaryCROp(CRLogicalOpInfo &CRI) {
CRI.MI->getOpcode() != PPC::CRNAND &&
CRI.MI->getOpcode() != PPC::CRORC &&
CRI.MI->getOpcode() != PPC::CRANDC) {
- DEBUG(dbgs() << "Unable to split blocks on this opcode.\n");
+ LLVM_DEBUG(dbgs() << "Unable to split blocks on this opcode.\n");
NumNotSplitWrongOpcode++;
return false;
}
- DEBUG(dbgs() << "Splitting the following CR op:\n"; CRI.dump());
+ LLVM_DEBUG(dbgs() << "Splitting the following CR op:\n"; CRI.dump());
MachineBasicBlock::iterator Def1It = CRI.TrueDefs.first;
MachineBasicBlock::iterator Def2It = CRI.TrueDefs.second;
@@ -621,9 +623,9 @@ bool PPCReduceCRLogicals::splitBlockOnBinaryCROp(CRLogicalOpInfo &CRI) {
}
}
- DEBUG(dbgs() << "We will split the following block:\n";);
- DEBUG(CRI.MI->getParent()->dump());
- DEBUG(dbgs() << "Before instruction:\n"; SplitBefore->dump());
+ LLVM_DEBUG(dbgs() << "We will split the following block:\n";);
+ LLVM_DEBUG(CRI.MI->getParent()->dump());
+ LLVM_DEBUG(dbgs() << "Before instruction:\n"; SplitBefore->dump());
// Get the branch instruction.
MachineInstr *Branch =
@@ -656,10 +658,11 @@ bool PPCReduceCRLogicals::splitBlockOnBinaryCROp(CRLogicalOpInfo &CRI) {
TargetIsFallThrough);
MachineInstr *SplitCond =
UsingDef1 ? CRI.CopyDefs.second : CRI.CopyDefs.first;
- DEBUG(dbgs() << "We will " << (InvertNewBranch ? "invert" : "copy"));
- DEBUG(dbgs() << " the original branch and the target is the " <<
- (TargetIsFallThrough ? "fallthrough block\n" : "orig. target block\n"));
- DEBUG(dbgs() << "Original branch instruction: "; Branch->dump());
+ LLVM_DEBUG(dbgs() << "We will " << (InvertNewBranch ? "invert" : "copy"));
+ LLVM_DEBUG(dbgs() << " the original branch and the target is the "
+ << (TargetIsFallThrough ? "fallthrough block\n"
+ : "orig. target block\n"));
+ LLVM_DEBUG(dbgs() << "Original branch instruction: "; Branch->dump());
BlockSplitInfo BSI { Branch, SplitBefore, SplitCond, InvertNewBranch,
InvertOrigBranch, TargetIsFallThrough, MBPI, CRI.MI,
UsingDef1 ? CRI.CopyDefs.first : CRI.CopyDefs.second };
diff --git a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
index 49f2699ab082..903d4e97701b 100644
--- a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
+++ b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
@@ -77,7 +77,7 @@ protected:
continue;
}
- DEBUG(dbgs() << "TLS Dynamic Call Fixup:\n " << MI);
+ LLVM_DEBUG(dbgs() << "TLS Dynamic Call Fixup:\n " << MI);
unsigned OutReg = MI.getOperand(0).getReg();
unsigned InReg = MI.getOperand(1).getReg();
diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index f15af790de8f..6586f503a7b8 100644
--- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -241,7 +241,7 @@ protected:
assert(OldFMAReg == AddendMI->getOperand(0).getReg() &&
"Addend copy not tied to old FMA output!");
- DEBUG(dbgs() << "VSX FMA Mutation:\n " << MI);
+ LLVM_DEBUG(dbgs() << "VSX FMA Mutation:\n " << MI);
MI.getOperand(0).setReg(KilledProdReg);
MI.getOperand(1).setReg(KilledProdReg);
@@ -273,7 +273,7 @@ protected:
MI.getOperand(2).setIsUndef(OtherProdRegUndef);
}
- DEBUG(dbgs() << " -> " << MI);
+ LLVM_DEBUG(dbgs() << " -> " << MI);
// The killed product operand was killed here, so we can reuse it now
// for the result of the fma.
@@ -310,7 +310,7 @@ protected:
NewFMAInt.addSegment(LiveInterval::Segment(AI->start, AI->end,
NewFMAValNo));
}
- DEBUG(dbgs() << " extended: " << NewFMAInt << '\n');
+ LLVM_DEBUG(dbgs() << " extended: " << NewFMAInt << '\n');
// Extend the live interval of the addend source (it might end at the
// copy to be removed, or somewhere in between there and here). This
@@ -323,15 +323,15 @@ protected:
LiveRange &AddendSrcRange = LIS->getRegUnit(Unit);
AddendSrcRange.extendInBlock(LIS->getMBBStartIdx(&MBB),
FMAIdx.getRegSlot());
- DEBUG(dbgs() << " extended: " << AddendSrcRange << '\n');
+ LLVM_DEBUG(dbgs() << " extended: " << AddendSrcRange << '\n');
}
FMAInt.removeValNo(FMAValNo);
- DEBUG(dbgs() << " trimmed: " << FMAInt << '\n');
+ LLVM_DEBUG(dbgs() << " trimmed: " << FMAInt << '\n');
// Remove the (now unused) copy.
- DEBUG(dbgs() << " removing: " << *AddendMI << '\n');
+ LLVM_DEBUG(dbgs() << " removing: " << *AddendMI << '\n');
LIS->RemoveMachineInstrFromMaps(*AddendMI);
AddendMI->eraseFromParent();
diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index 83c9dd67c33c..2fe54a88b65c 100644
--- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -528,8 +528,8 @@ bool PPCVSXSwapRemoval::gatherVectorInstructions() {
}
if (RelevantFunction) {
- DEBUG(dbgs() << "Swap vector when first built\n\n");
- DEBUG(dumpSwapVector());
+ LLVM_DEBUG(dbgs() << "Swap vector when first built\n\n");
+ LLVM_DEBUG(dumpSwapVector());
}
return RelevantFunction;
@@ -588,14 +588,14 @@ unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg,
// as such so their containing webs will not be optimized.
void PPCVSXSwapRemoval::formWebs() {
- DEBUG(dbgs() << "\n*** Forming webs for swap removal ***\n\n");
+ LLVM_DEBUG(dbgs() << "\n*** Forming webs for swap removal ***\n\n");
for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
- DEBUG(dbgs() << "\n" << SwapVector[EntryIdx].VSEId << " ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << "\n" << SwapVector[EntryIdx].VSEId << " ");
+ LLVM_DEBUG(MI->dump());
// It's sufficient to walk vector uses and join them to their unique
// definitions. In addition, check full vector register operands
@@ -625,10 +625,11 @@ void PPCVSXSwapRemoval::formWebs() {
(void)EC->unionSets(SwapVector[DefIdx].VSEId,
SwapVector[EntryIdx].VSEId);
- DEBUG(dbgs() << format("Unioning %d with %d\n", SwapVector[DefIdx].VSEId,
- SwapVector[EntryIdx].VSEId));
- DEBUG(dbgs() << " Def: ");
- DEBUG(DefMI->dump());
+ LLVM_DEBUG(dbgs() << format("Unioning %d with %d\n",
+ SwapVector[DefIdx].VSEId,
+ SwapVector[EntryIdx].VSEId));
+ LLVM_DEBUG(dbgs() << " Def: ");
+ LLVM_DEBUG(DefMI->dump());
}
}
}
@@ -639,7 +640,7 @@ void PPCVSXSwapRemoval::formWebs() {
// as rejected.
void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
- DEBUG(dbgs() << "\n*** Rejecting webs for swap removal ***\n\n");
+ LLVM_DEBUG(dbgs() << "\n*** Rejecting webs for swap removal ***\n\n");
for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
@@ -657,12 +658,13 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
SwapVector[Repr].WebRejected = 1;
- DEBUG(dbgs() <<
- format("Web %d rejected for physreg, partial reg, or not "
- "swap[pable]\n", Repr));
- DEBUG(dbgs() << " in " << EntryIdx << ": ");
- DEBUG(SwapVector[EntryIdx].VSEMI->dump());
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(
+ dbgs() << format("Web %d rejected for physreg, partial reg, or not "
+ "swap[pable]\n",
+ Repr));
+ LLVM_DEBUG(dbgs() << " in " << EntryIdx << ": ");
+ LLVM_DEBUG(SwapVector[EntryIdx].VSEMI->dump());
+ LLVM_DEBUG(dbgs() << "\n");
}
// Reject webs than contain swapping loads that feed something other
@@ -683,13 +685,13 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
SwapVector[Repr].WebRejected = 1;
- DEBUG(dbgs() <<
- format("Web %d rejected for load not feeding swap\n", Repr));
- DEBUG(dbgs() << " def " << EntryIdx << ": ");
- DEBUG(MI->dump());
- DEBUG(dbgs() << " use " << UseIdx << ": ");
- DEBUG(UseMI.dump());
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << format(
+ "Web %d rejected for load not feeding swap\n", Repr));
+ LLVM_DEBUG(dbgs() << " def " << EntryIdx << ": ");
+ LLVM_DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << " use " << UseIdx << ": ");
+ LLVM_DEBUG(UseMI.dump());
+ LLVM_DEBUG(dbgs() << "\n");
}
}
@@ -707,13 +709,13 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
SwapVector[Repr].WebRejected = 1;
- DEBUG(dbgs() <<
- format("Web %d rejected for store not fed by swap\n", Repr));
- DEBUG(dbgs() << " def " << DefIdx << ": ");
- DEBUG(DefMI->dump());
- DEBUG(dbgs() << " use " << EntryIdx << ": ");
- DEBUG(MI->dump());
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << format(
+ "Web %d rejected for store not fed by swap\n", Repr));
+ LLVM_DEBUG(dbgs() << " def " << DefIdx << ": ");
+ LLVM_DEBUG(DefMI->dump());
+ LLVM_DEBUG(dbgs() << " use " << EntryIdx << ": ");
+ LLVM_DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << "\n");
}
// Ensure all uses of the register defined by DefMI feed store
@@ -724,21 +726,22 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
if (SwapVector[UseIdx].VSEMI->getOpcode() != MI->getOpcode()) {
SwapVector[Repr].WebRejected = 1;
- DEBUG(dbgs() <<
- format("Web %d rejected for swap not feeding only stores\n",
- Repr));
- DEBUG(dbgs() << " def " << " : ");
- DEBUG(DefMI->dump());
- DEBUG(dbgs() << " use " << UseIdx << ": ");
- DEBUG(SwapVector[UseIdx].VSEMI->dump());
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(
+ dbgs() << format(
+ "Web %d rejected for swap not feeding only stores\n", Repr));
+ LLVM_DEBUG(dbgs() << " def "
+ << " : ");
+ LLVM_DEBUG(DefMI->dump());
+ LLVM_DEBUG(dbgs() << " use " << UseIdx << ": ");
+ LLVM_DEBUG(SwapVector[UseIdx].VSEMI->dump());
+ LLVM_DEBUG(dbgs() << "\n");
}
}
}
}
- DEBUG(dbgs() << "Swap vector after web analysis:\n\n");
- DEBUG(dumpSwapVector());
+ LLVM_DEBUG(dbgs() << "Swap vector after web analysis:\n\n");
+ LLVM_DEBUG(dumpSwapVector());
}
// Walk the swap vector entries looking for swaps fed by permuting loads
@@ -748,7 +751,7 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
// such that multiple loads feed the same swap, etc.)
void PPCVSXSwapRemoval::markSwapsForRemoval() {
- DEBUG(dbgs() << "\n*** Marking swaps for removal ***\n\n");
+ LLVM_DEBUG(dbgs() << "\n*** Marking swaps for removal ***\n\n");
for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
@@ -763,8 +766,8 @@ void PPCVSXSwapRemoval::markSwapsForRemoval() {
int UseIdx = SwapMap[&UseMI];
SwapVector[UseIdx].WillRemove = 1;
- DEBUG(dbgs() << "Marking swap fed by load for removal: ");
- DEBUG(UseMI.dump());
+ LLVM_DEBUG(dbgs() << "Marking swap fed by load for removal: ");
+ LLVM_DEBUG(UseMI.dump());
}
}
@@ -778,8 +781,8 @@ void PPCVSXSwapRemoval::markSwapsForRemoval() {
int DefIdx = SwapMap[DefMI];
SwapVector[DefIdx].WillRemove = 1;
- DEBUG(dbgs() << "Marking swap feeding store for removal: ");
- DEBUG(DefMI->dump());
+ LLVM_DEBUG(dbgs() << "Marking swap feeding store for removal: ");
+ LLVM_DEBUG(DefMI->dump());
}
} else if (SwapVector[EntryIdx].IsSwappable &&
@@ -824,8 +827,8 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
unsigned NElts;
- DEBUG(dbgs() << "Changing splat: ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << "Changing splat: ");
+ LLVM_DEBUG(MI->dump());
switch (MI->getOpcode()) {
default:
@@ -848,8 +851,8 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
else
MI->getOperand(1).setImm(EltNo);
- DEBUG(dbgs() << " Into: ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << " Into: ");
+ LLVM_DEBUG(MI->dump());
break;
}
@@ -862,8 +865,8 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
case SHValues::SH_XXPERMDI: {
MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
- DEBUG(dbgs() << "Changing XXPERMDI: ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << "Changing XXPERMDI: ");
+ LLVM_DEBUG(MI->dump());
unsigned Selector = MI->getOperand(3).getImm();
if (Selector == 0 || Selector == 3)
@@ -875,8 +878,8 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
MI->getOperand(1).setReg(Reg2);
MI->getOperand(2).setReg(Reg1);
- DEBUG(dbgs() << " Into: ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << " Into: ");
+ LLVM_DEBUG(MI->dump());
break;
}
@@ -886,16 +889,16 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
case SHValues::SH_COPYWIDEN: {
MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
- DEBUG(dbgs() << "Changing SUBREG_TO_REG: ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << "Changing SUBREG_TO_REG: ");
+ LLVM_DEBUG(MI->dump());
unsigned DstReg = MI->getOperand(0).getReg();
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
MI->getOperand(0).setReg(NewVReg);
- DEBUG(dbgs() << " Into: ");
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << " Into: ");
+ LLVM_DEBUG(MI->dump());
auto InsertPoint = ++MachineBasicBlock::iterator(MI);
@@ -911,19 +914,19 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
TII->get(PPC::COPY), VSRCTmp1)
.addReg(NewVReg);
- DEBUG(std::prev(InsertPoint)->dump());
+ LLVM_DEBUG(std::prev(InsertPoint)->dump());
insertSwap(MI, InsertPoint, VSRCTmp2, VSRCTmp1);
- DEBUG(std::prev(InsertPoint)->dump());
+ LLVM_DEBUG(std::prev(InsertPoint)->dump());
BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
TII->get(PPC::COPY), DstReg)
.addReg(VSRCTmp2);
- DEBUG(std::prev(InsertPoint)->dump());
+ LLVM_DEBUG(std::prev(InsertPoint)->dump());
} else {
insertSwap(MI, InsertPoint, DstReg, NewVReg);
- DEBUG(std::prev(InsertPoint)->dump());
+ LLVM_DEBUG(std::prev(InsertPoint)->dump());
}
break;
}
@@ -934,7 +937,7 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
// a copy operation.
bool PPCVSXSwapRemoval::removeSwaps() {
- DEBUG(dbgs() << "\n*** Removing swaps ***\n\n");
+ LLVM_DEBUG(dbgs() << "\n*** Removing swaps ***\n\n");
bool Changed = false;
@@ -947,9 +950,9 @@ bool PPCVSXSwapRemoval::removeSwaps() {
MI->getOperand(0).getReg())
.add(MI->getOperand(1));
- DEBUG(dbgs() << format("Replaced %d with copy: ",
- SwapVector[EntryIdx].VSEId));
- DEBUG(MI->dump());
+ LLVM_DEBUG(dbgs() << format("Replaced %d with copy: ",
+ SwapVector[EntryIdx].VSEId));
+ LLVM_DEBUG(MI->dump());
MI->eraseFromParent();
}
diff --git a/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 91c72aa191d1..7bbb371a757f 100644
--- a/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -258,14 +258,15 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// It's a 32 bit instruction if bit 0 and 1 are 1.
if ((Bytes[0] & 0x3) == 0x3) {
Insn = support::endian::read32le(Bytes.data());
- DEBUG(dbgs() << "Trying RISCV32 table :\n");
+ LLVM_DEBUG(dbgs() << "Trying RISCV32 table :\n");
Result = decodeInstruction(DecoderTable32, MI, Insn, Address, this, STI);
Size = 4;
} else {
Insn = support::endian::read16le(Bytes.data());
if (!STI.getFeatureBits()[RISCV::Feature64Bit]) {
- DEBUG(dbgs() << "Trying RISCV32Only_16 table (16-bit Instruction):\n");
+ LLVM_DEBUG(
+ dbgs() << "Trying RISCV32Only_16 table (16-bit Instruction):\n");
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableRISCV32Only_16, MI, Insn, Address,
this, STI);
@@ -275,7 +276,7 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}
- DEBUG(dbgs() << "Trying RISCV_C table (16-bit Instruction):\n");
+ LLVM_DEBUG(dbgs() << "Trying RISCV_C table (16-bit Instruction):\n");
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTable16, MI, Insn, Address, this, STI);
Size = 2;
diff --git a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index b7d06879cadb..04441b9a9b15 100644
--- a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -71,7 +71,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we have already selected
if (Node->isMachineOpcode()) {
- DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
Node->setNodeId(-1);
return;
}
@@ -196,11 +196,11 @@ void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
continue;
}
- DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
- DEBUG(Base->dump(CurDAG));
- DEBUG(dbgs() << "\nN: ");
- DEBUG(N->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: ");
+ LLVM_DEBUG(Base->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nN: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
// Modify the offset operand of the load/store.
if (BaseOpIdx == 0) // Load
@@ -237,13 +237,14 @@ void RISCVDAGToDAGISel::doPeepholeBuildPairF64SplitF64() {
SDValue F64Val = N->getOperand(0);
if (F64Val.isMachineOpcode() &&
F64Val.getMachineOpcode() == RISCV::BuildPairF64Pseudo) {
- DEBUG(dbgs() << "Removing redundant SplitF64Pseudo and replacing uses "
- "with BuildPairF64Pseudo operands:\n");
- DEBUG(dbgs() << "N: ");
- DEBUG(N->dump(CurDAG));
- DEBUG(dbgs() << "F64Val: ");
- DEBUG(F64Val->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Removing redundant SplitF64Pseudo and replacing uses "
+ "with BuildPairF64Pseudo operands:\n");
+ LLVM_DEBUG(dbgs() << "N: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "F64Val: ");
+ LLVM_DEBUG(F64Val->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
SDValue From[] = {SDValue(N, 0), SDValue(N, 1)};
SDValue To[] = {F64Val.getOperand(0), F64Val.getOperand(1)};
CurDAG->ReplaceAllUsesOfValuesWith(From, To, 2);
diff --git a/lib/Target/RISCV/RISCVISelLowering.cpp b/lib/Target/RISCV/RISCVISelLowering.cpp
index ead9f0bdc632..52c027ba3140 100644
--- a/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -824,8 +824,8 @@ void RISCVTargetLowering::analyzeInputArgs(
if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
- DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
- << EVT(ArgVT).getEVTString() << '\n');
+ LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString() << '\n');
llvm_unreachable(nullptr);
}
}
@@ -844,8 +844,8 @@ void RISCVTargetLowering::analyzeOutputArgs(
if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
- DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
- << EVT(ArgVT).getEVTString() << "\n");
+ LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString() << "\n");
llvm_unreachable(nullptr);
}
}
diff --git a/lib/Target/SystemZ/SystemZHazardRecognizer.cpp b/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
index a643b0565282..9e8acdaabd97 100644
--- a/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
+++ b/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
@@ -85,7 +85,7 @@ void SystemZHazardRecognizer::Reset() {
GrpCount = 0;
LastFPdOpCycleIdx = UINT_MAX;
LastEmittedMI = nullptr;
- DEBUG(CurGroupDbg = "";);
+ LLVM_DEBUG(CurGroupDbg = "";);
}
bool
@@ -112,8 +112,8 @@ void SystemZHazardRecognizer::nextGroup() {
if (CurrGroupSize == 0)
return;
- DEBUG(dumpCurrGroup("Completed decode group"));
- DEBUG(CurGroupDbg = "";);
+ LLVM_DEBUG(dumpCurrGroup("Completed decode group"));
+ LLVM_DEBUG(CurGroupDbg = "";);
GrpCount++;
@@ -131,7 +131,7 @@ void SystemZHazardRecognizer::nextGroup() {
ProcResCostLim))
CriticalResourceIdx = UINT_MAX;
- DEBUG(dumpState(););
+ LLVM_DEBUG(dumpState(););
}
#ifndef NDEBUG // Debug output
@@ -234,25 +234,23 @@ static inline bool isBranchRetTrap(MachineInstr *MI) {
void SystemZHazardRecognizer::
EmitInstruction(SUnit *SU) {
const MCSchedClassDesc *SC = getSchedClass(SU);
- DEBUG(dbgs() << "++ HazardRecognizer emitting "; dumpSU(SU, dbgs());
- dbgs() << "\n";);
- DEBUG(dumpCurrGroup("Decode group before emission"););
+ LLVM_DEBUG(dbgs() << "++ HazardRecognizer emitting "; dumpSU(SU, dbgs());
+ dbgs() << "\n";);
+ LLVM_DEBUG(dumpCurrGroup("Decode group before emission"););
// If scheduling an SU that must begin a new decoder group, move on
// to next group.
if (!fitsIntoCurrentGroup(SU))
nextGroup();
- DEBUG(raw_string_ostream cgd(CurGroupDbg);
- if (CurGroupDbg.length())
- cgd << ", ";
- dumpSU(SU, cgd););
+ LLVM_DEBUG(raw_string_ostream cgd(CurGroupDbg);
+ if (CurGroupDbg.length()) cgd << ", "; dumpSU(SU, cgd););
LastEmittedMI = SU->getInstr();
// After returning from a call, we don't know much about the state.
if (SU->isCall) {
- DEBUG(dbgs() << "++ Clearing state after call.\n";);
+ LLVM_DEBUG(dbgs() << "++ Clearing state after call.\n";);
Reset();
LastEmittedMI = SU->getInstr();
return;
@@ -274,9 +272,10 @@ EmitInstruction(SUnit *SU) {
(PI->ProcResourceIdx != CriticalResourceIdx &&
CurrCounter >
ProcResourceCounters[CriticalResourceIdx]))) {
- DEBUG(dbgs() << "++ New critical resource: "
- << SchedModel->getProcResource(PI->ProcResourceIdx)->Name
- << "\n";);
+ LLVM_DEBUG(
+ dbgs() << "++ New critical resource: "
+ << SchedModel->getProcResource(PI->ProcResourceIdx)->Name
+ << "\n";);
CriticalResourceIdx = PI->ProcResourceIdx;
}
}
@@ -284,8 +283,8 @@ EmitInstruction(SUnit *SU) {
// Make note of an instruction that uses a blocking resource (FPd).
if (SU->isUnbuffered) {
LastFPdOpCycleIdx = getCurrCycleIdx(SU);
- DEBUG(dbgs() << "++ Last FPd cycle index: "
- << LastFPdOpCycleIdx << "\n";);
+ LLVM_DEBUG(dbgs() << "++ Last FPd cycle index: " << LastFPdOpCycleIdx
+ << "\n";);
}
// Insert SU into current group by increasing number of slots used
@@ -409,7 +408,7 @@ void SystemZHazardRecognizer::
copyState(SystemZHazardRecognizer *Incoming) {
// Current decoder group
CurrGroupSize = Incoming->CurrGroupSize;
- DEBUG(CurGroupDbg = Incoming->CurGroupDbg;);
+ LLVM_DEBUG(CurGroupDbg = Incoming->CurGroupDbg;);
// Processor resources
ProcResourceCounters = Incoming->ProcResourceCounters;
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 3e13cf249a56..99f17639d2ea 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -589,7 +589,7 @@ bool SystemZDAGToDAGISel::selectAddress(SDValue Addr,
if (AM.isDynAlloc() && !AM.IncludesDynAlloc)
return false;
- DEBUG(AM.dump());
+ LLVM_DEBUG(AM.dump());
return true;
}
@@ -1427,7 +1427,7 @@ bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
void SystemZDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
return;
}
@@ -1827,11 +1827,11 @@ void SystemZDAGToDAGISel::PreprocessISelDAG() {
}
if (Res) {
- DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld: ");
- DEBUG(N->dump(CurDAG));
- DEBUG(dbgs() << "\nNew: ");
- DEBUG(Res.getNode()->dump(CurDAG));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld: ");
+ LLVM_DEBUG(N->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\nNew: ");
+ LLVM_DEBUG(Res.getNode()->dump(CurDAG));
+ LLVM_DEBUG(dbgs() << "\n");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
MadeChange = true;
diff --git a/lib/Target/SystemZ/SystemZMachineScheduler.cpp b/lib/Target/SystemZ/SystemZMachineScheduler.cpp
index 1de11a1166df..fcbf4c4b5fe4 100644
--- a/lib/Target/SystemZ/SystemZMachineScheduler.cpp
+++ b/lib/Target/SystemZ/SystemZMachineScheduler.cpp
@@ -72,23 +72,22 @@ advanceTo(MachineBasicBlock::iterator NextBegin) {
}
void SystemZPostRASchedStrategy::initialize(ScheduleDAGMI *dag) {
- DEBUG(HazardRec->dumpState(););
+ LLVM_DEBUG(HazardRec->dumpState(););
}
void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) {
assert ((SchedStates.find(NextMBB) == SchedStates.end()) &&
"Entering MBB twice?");
- DEBUG(dbgs() << "** Entering " << printMBBReference(*NextMBB));
+ LLVM_DEBUG(dbgs() << "** Entering " << printMBBReference(*NextMBB));
MBB = NextMBB;
/// Create a HazardRec for MBB, save it in SchedStates and set HazardRec to
/// point to it.
HazardRec = SchedStates[MBB] = new SystemZHazardRecognizer(TII, &SchedModel);
- DEBUG(const MachineLoop *Loop = MLI->getLoopFor(MBB);
- if(Loop && Loop->getHeader() == MBB)
- dbgs() << " (Loop header)";
- dbgs() << ":\n";);
+ LLVM_DEBUG(const MachineLoop *Loop = MLI->getLoopFor(MBB);
+ if (Loop && Loop->getHeader() == MBB) dbgs() << " (Loop header)";
+ dbgs() << ":\n";);
// Try to take over the state from a single predecessor, if it has been
// scheduled. If this is not possible, we are done.
@@ -98,17 +97,17 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) {
SchedStates.find(SinglePredMBB) == SchedStates.end())
return;
- DEBUG(dbgs() << "** Continued scheduling from "
- << printMBBReference(*SinglePredMBB) << "\n";);
+ LLVM_DEBUG(dbgs() << "** Continued scheduling from "
+ << printMBBReference(*SinglePredMBB) << "\n";);
HazardRec->copyState(SchedStates[SinglePredMBB]);
- DEBUG(HazardRec->dumpState(););
+ LLVM_DEBUG(HazardRec->dumpState(););
// Emit incoming terminator(s). Be optimistic and assume that branch
// prediction will generally do "the right thing".
for (MachineBasicBlock::iterator I = SinglePredMBB->getFirstTerminator();
I != SinglePredMBB->end(); I++) {
- DEBUG(dbgs() << "** Emitting incoming branch: "; I->dump(););
+ LLVM_DEBUG(dbgs() << "** Emitting incoming branch: "; I->dump(););
bool TakenBranch = (I->isBranch() &&
(TII->getBranchInfo(*I).Target->isReg() || // Relative branch
TII->getBranchInfo(*I).Target->getMBB() == MBB));
@@ -119,7 +118,7 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) {
}
void SystemZPostRASchedStrategy::leaveMBB() {
- DEBUG(dbgs() << "** Leaving " << printMBBReference(*MBB) << "\n";);
+ LLVM_DEBUG(dbgs() << "** Leaving " << printMBBReference(*MBB) << "\n";);
// Advance to first terminator. The successor block will handle terminators
// dependent on CFG layout (T/NT branch etc).
@@ -165,14 +164,14 @@ SUnit *SystemZPostRASchedStrategy::pickNode(bool &IsTopNode) {
// If only one choice, return it.
if (Available.size() == 1) {
- DEBUG(dbgs() << "** Only one: ";
- HazardRec->dumpSU(*Available.begin(), dbgs()); dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "** Only one: ";
+ HazardRec->dumpSU(*Available.begin(), dbgs()); dbgs() << "\n";);
return *Available.begin();
}
// All nodes that are possible to schedule are stored by in the
// Available set.
- DEBUG(dbgs() << "** Available: "; Available.dump(*HazardRec););
+ LLVM_DEBUG(dbgs() << "** Available: "; Available.dump(*HazardRec););
Candidate Best;
for (auto *SU : Available) {
@@ -183,13 +182,11 @@ SUnit *SystemZPostRASchedStrategy::pickNode(bool &IsTopNode) {
// Remeber which SU is the best candidate.
if (Best.SU == nullptr || c < Best) {
Best = c;
- DEBUG(dbgs() << "** Best so far: ";);
+ LLVM_DEBUG(dbgs() << "** Best so far: ";);
} else
- DEBUG(dbgs() << "** Tried : ";);
- DEBUG(HazardRec->dumpSU(c.SU, dbgs());
- c.dumpCosts();
- dbgs() << " Height:" << c.SU->getHeight();
- dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "** Tried : ";);
+ LLVM_DEBUG(HazardRec->dumpSU(c.SU, dbgs()); c.dumpCosts();
+ dbgs() << " Height:" << c.SU->getHeight(); dbgs() << "\n";);
// Once we know we have seen all SUs that affect grouping or use unbuffered
// resources, we can stop iterating if Best looks good.
@@ -243,12 +240,9 @@ operator<(const Candidate &other) {
}
void SystemZPostRASchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
- DEBUG(dbgs() << "** Scheduling SU(" << SU->NodeNum << ") ";
- if (Available.size() == 1)
- dbgs() << "(only one) ";
- Candidate c(SU, *HazardRec);
- c.dumpCosts();
- dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "** Scheduling SU(" << SU->NodeNum << ") ";
+ if (Available.size() == 1) dbgs() << "(only one) ";
+ Candidate c(SU, *HazardRec); c.dumpCosts(); dbgs() << "\n";);
// Remove SU from Available set and update HazardRec.
Available.erase(SU);
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
index 2efac06e5be5..d58c83d14ae2 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
@@ -88,7 +88,8 @@ void WebAssemblyMCCodeEmitter::encodeInstruction(
assert(Desc.TSFlags == 0 &&
"WebAssembly non-variable_ops don't use TSFlags");
const MCOperandInfo &Info = Desc.OpInfo[i];
- DEBUG(dbgs() << "Encoding immediate: type=" << int(Info.OperandType) << "\n");
+ LLVM_DEBUG(dbgs() << "Encoding immediate: type="
+ << int(Info.OperandType) << "\n");
if (Info.OperandType == WebAssembly::OPERAND_I32IMM) {
encodeSLEB128(int32_t(MO.getImm()), OS);
} else if (Info.OperandType == WebAssembly::OPERAND_OFFSET32) {
diff --git a/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp b/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
index 3c17b2ec0f6e..7c8a631cde8a 100644
--- a/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
@@ -68,7 +68,7 @@ FunctionPass *llvm::createWebAssemblyArgumentMove() {
}
bool WebAssemblyArgumentMove::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "********** Argument Move **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
diff --git a/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
index a224ae7316af..c85050ce2e5c 100644
--- a/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -54,7 +54,7 @@ MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const {
MVT::v4i32, MVT::v4f32})
if (TRI->isTypeLegalForClass(*TRC, T))
return T;
- DEBUG(errs() << "Unknown type for register number: " << RegNo);
+ LLVM_DEBUG(errs() << "Unknown type for register number: " << RegNo);
llvm_unreachable("Unknown register type");
return MVT::Other;
}
@@ -191,7 +191,7 @@ void WebAssemblyAsmPrinter::EmitFunctionBodyEnd() {
}
void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- DEBUG(dbgs() << "EmitInstruction: " << *MI << '\n');
+ LLVM_DEBUG(dbgs() << "EmitInstruction: " << *MI << '\n');
switch (MI->getOpcode()) {
case WebAssembly::ARGUMENT_I32:
diff --git a/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
index b2607f271425..bb1b82549845 100644
--- a/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
@@ -264,9 +264,9 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
}
bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** CFG Sorting **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** CFG Sorting **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
const auto &MLI = getAnalysis<MachineLoopInfo>();
auto &MDT = getAnalysis<MachineDominatorTree>();
diff --git a/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
index 3429c4fc6e2f..1245ac037274 100644
--- a/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
@@ -366,9 +366,9 @@ static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI,
}
bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** CFG Stackifying **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** CFG Stackifying **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
const auto &MLI = getAnalysis<MachineLoopInfo>();
auto &MDT = getAnalysis<MachineDominatorTree>();
diff --git a/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp b/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
index 03bfe24e30b5..c1820bf66bc0 100644
--- a/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
@@ -83,8 +83,8 @@ static bool IsPseudoCallIndirect(const MachineInstr &MI) {
}
bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Fixing up CALL_INDIRECTs **********\n"
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Fixing up CALL_INDIRECTs **********\n"
+ << MF.getName() << '\n');
bool Changed = false;
const WebAssemblyInstrInfo *TII =
@@ -93,7 +93,7 @@ bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF) {
for (MachineInstr &MI : MBB) {
if (IsPseudoCallIndirect(MI)) {
- DEBUG(dbgs() << "Found call_indirect: " << MI << '\n');
+ LLVM_DEBUG(dbgs() << "Found call_indirect: " << MI << '\n');
// Rewrite pseudo to non-pseudo
const MCInstrDesc &Desc = TII->get(GetNonPseudoCallIndirectOpcode(MI));
@@ -123,13 +123,13 @@ bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) {
for (const MachineOperand &MO : Ops)
MI.addOperand(MO);
- DEBUG(dbgs() << " After transform: " << MI);
+ LLVM_DEBUG(dbgs() << " After transform: " << MI);
Changed = true;
}
}
}
- DEBUG(dbgs() << "\nDone fixing up CALL_INDIRECTs\n\n");
+ LLVM_DEBUG(dbgs() << "\nDone fixing up CALL_INDIRECTs\n\n");
return Changed;
}
diff --git a/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
index caf12a610be6..8639cb9fc862 100644
--- a/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -181,9 +181,9 @@ static MachineInstr *FindStartOfTree(MachineOperand &MO,
}
bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Make Locals Explicit **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Make Locals Explicit **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
// Disable this pass if directed to do so.
if (DisableWebAssemblyExplicitLocals)
diff --git a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
index c710c6972a64..bea027be7711 100644
--- a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
@@ -177,7 +177,7 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF,
if (LLVM_LIKELY(RewriteSuccs.empty()))
return false;
- DEBUG(dbgs() << "Irreducible control flow detected!\n");
+ LLVM_DEBUG(dbgs() << "Irreducible control flow detected!\n");
// Ok. We have irreducible control flow! Create a dispatch block which will
// contains a jump table to any block in the problematic set of blocks.
@@ -208,7 +208,8 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF,
continue;
unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1;
- DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index << "\n");
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index
+ << "\n");
Pair.first->second = Index;
for (auto Pred : MBB->predecessors())
@@ -267,9 +268,9 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF,
bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(
MachineFunction &MF) {
- DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
bool Changed = false;
auto &MLI = getAnalysis<MachineLoopInfo>();
@@ -287,7 +288,7 @@ bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(
// If we made any changes, completely recompute everything.
if (LLVM_UNLIKELY(Changed)) {
- DEBUG(dbgs() << "Recomputing dominators and loops.\n");
+ LLVM_DEBUG(dbgs() << "Recomputing dominators and loops.\n");
MF.getRegInfo().invalidateLiveness();
MF.RenumberBlocks();
getAnalysis<MachineDominatorTree>().runOnMachineFunction(MF);
diff --git a/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
index d22dda7546e9..5e00755a81fb 100644
--- a/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
@@ -70,7 +70,7 @@ private:
void WebAssemblyDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
return;
}
diff --git a/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
index adb8f6c7c73d..5fb97e38939a 100644
--- a/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
@@ -55,9 +55,9 @@ FunctionPass *llvm::createWebAssemblyLowerBrUnless() {
}
bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Lowering br_unless **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Lowering br_unless **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
diff --git a/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
index 53e7688e2653..1c65478fe14c 100644
--- a/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -66,9 +66,9 @@ FunctionPass *llvm::createWebAssemblyOptimizeLiveIntervals() {
}
bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Optimize LiveIntervals **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Optimize LiveIntervals **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
MachineRegisterInfo &MRI = MF.getRegInfo();
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
diff --git a/lib/Target/WebAssembly/WebAssemblyPeephole.cpp b/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
index aa70c91cb4bd..520f94cf06b4 100644
--- a/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
@@ -116,7 +116,7 @@ static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
}
bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "********** Peephole **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
diff --git a/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
index f61d65b65300..e44e7057e233 100644
--- a/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
@@ -71,7 +71,7 @@ static bool HasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) {
}
bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "********** Prepare For LiveIntervals **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
diff --git a/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
index 494259b17b00..d69a27937105 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -74,7 +74,7 @@ static float computeWeight(const MachineRegisterInfo *MRI,
}
bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "********** Register Coloring **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
@@ -97,7 +97,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
SmallVector<LiveInterval *, 0> SortedIntervals;
SortedIntervals.reserve(NumVRegs);
- DEBUG(dbgs() << "Interesting register intervals:\n");
+ LLVM_DEBUG(dbgs() << "Interesting register intervals:\n");
for (unsigned i = 0; i < NumVRegs; ++i) {
unsigned VReg = TargetRegisterInfo::index2VirtReg(i);
if (MFI.isVRegStackified(VReg))
@@ -109,10 +109,10 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
LiveInterval *LI = &Liveness->getInterval(VReg);
assert(LI->weight == 0.0f);
LI->weight = computeWeight(MRI, MBFI, VReg);
- DEBUG(LI->dump());
+ LLVM_DEBUG(LI->dump());
SortedIntervals.push_back(LI);
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
// Sort them to put arguments first (since we don't want to rename live-in
// registers), by weight next, and then by position.
@@ -129,7 +129,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
return *LHS < *RHS;
});
- DEBUG(dbgs() << "Coloring register intervals:\n");
+ LLVM_DEBUG(dbgs() << "Coloring register intervals:\n");
SmallVector<unsigned, 16> SlotMapping(SortedIntervals.size(), -1u);
SmallVector<SmallVector<LiveInterval *, 4>, 16> Assignments(
SortedIntervals.size());
@@ -159,9 +159,9 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
Changed |= Old != New;
UsedColors.set(Color);
Assignments[Color].push_back(LI);
- DEBUG(dbgs() << "Assigning vreg"
- << TargetRegisterInfo::virtReg2Index(LI->reg) << " to vreg"
- << TargetRegisterInfo::virtReg2Index(New) << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Assigning vreg" << TargetRegisterInfo::virtReg2Index(LI->reg)
+ << " to vreg" << TargetRegisterInfo::virtReg2Index(New) << "\n");
}
if (!Changed)
return false;
diff --git a/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
index 7f518ac98688..1e2a248f097e 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
@@ -60,9 +60,9 @@ FunctionPass *llvm::createWebAssemblyRegNumbering() {
}
bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Register Numbering **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Register Numbering **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -77,8 +77,8 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
break;
int64_t Imm = MI.getOperand(1).getImm();
- DEBUG(dbgs() << "Arg VReg " << MI.getOperand(0).getReg() << " -> WAReg "
- << Imm << "\n");
+ LLVM_DEBUG(dbgs() << "Arg VReg " << MI.getOperand(0).getReg()
+ << " -> WAReg " << Imm << "\n");
MFI.setWAReg(MI.getOperand(0).getReg(), Imm);
}
@@ -96,13 +96,13 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
continue;
// Handle stackified registers.
if (MFI.isVRegStackified(VReg)) {
- DEBUG(dbgs() << "VReg " << VReg << " -> WAReg "
- << (INT32_MIN | NumStackRegs) << "\n");
+ LLVM_DEBUG(dbgs() << "VReg " << VReg << " -> WAReg "
+ << (INT32_MIN | NumStackRegs) << "\n");
MFI.setWAReg(VReg, INT32_MIN | NumStackRegs++);
continue;
}
if (MFI.getWAReg(VReg) == WebAssemblyFunctionInfo::UnusedReg) {
- DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " << CurReg << "\n");
+ LLVM_DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " << CurReg << "\n");
MFI.setWAReg(VReg, CurReg++);
}
}
diff --git a/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
index c326bdb8a7bc..0b71edae3ceb 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -473,7 +473,7 @@ static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand& Op,
MachineInstr *Insert, LiveIntervals &LIS,
WebAssemblyFunctionInfo &MFI,
MachineRegisterInfo &MRI) {
- DEBUG(dbgs() << "Move for single use: "; Def->dump());
+ LLVM_DEBUG(dbgs() << "Move for single use: "; Def->dump());
MBB.splice(Insert, &MBB, Def);
LIS.handleMove(*Def);
@@ -500,7 +500,7 @@ static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand& Op,
MFI.stackifyVReg(NewReg);
- DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
}
ImposeStackOrdering(Def);
@@ -514,8 +514,8 @@ static MachineInstr *RematerializeCheapDef(
MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS,
WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI,
const WebAssemblyInstrInfo *TII, const WebAssemblyRegisterInfo *TRI) {
- DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
- DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
+ LLVM_DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
+ LLVM_DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
TII->reMaterialize(MBB, Insert, NewReg, 0, Def, *TRI);
@@ -526,7 +526,7 @@ static MachineInstr *RematerializeCheapDef(
MFI.stackifyVReg(NewReg);
ImposeStackOrdering(Clone);
- DEBUG(dbgs() << " - Cloned to "; Clone->dump());
+ LLVM_DEBUG(dbgs() << " - Cloned to "; Clone->dump());
// Shrink the interval.
bool IsDead = MRI.use_empty(Reg);
@@ -538,7 +538,7 @@ static MachineInstr *RematerializeCheapDef(
// If that was the last use of the original, delete the original.
if (IsDead) {
- DEBUG(dbgs() << " - Deleting original\n");
+ LLVM_DEBUG(dbgs() << " - Deleting original\n");
SlotIndex Idx = LIS.getInstructionIndex(Def).getRegSlot();
LIS.removePhysRegDefAt(WebAssembly::ARGUMENTS, Idx);
LIS.removeInterval(Reg);
@@ -573,7 +573,7 @@ static MachineInstr *MoveAndTeeForMultiUse(
unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB,
MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI,
MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) {
- DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump());
+ LLVM_DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump());
// Move Def into place.
MBB.splice(Insert, &MBB, Def);
@@ -609,8 +609,8 @@ static MachineInstr *MoveAndTeeForMultiUse(
ImposeStackOrdering(Def);
ImposeStackOrdering(Tee);
- DEBUG(dbgs() << " - Replaced register: "; Def->dump());
- DEBUG(dbgs() << " - Tee instruction: "; Tee->dump());
+ LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ LLVM_DEBUG(dbgs() << " - Tee instruction: "; Tee->dump());
return Def;
}
@@ -737,9 +737,9 @@ public:
} // end anonymous namespace
bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Register Stackifying **********\n"
- "********** Function: "
- << MF.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Register Stackifying **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
bool Changed = false;
MachineRegisterInfo &MRI = MF.getRegInfo();
diff --git a/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
index f66081174e90..f432b367d156 100644
--- a/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
@@ -62,7 +62,7 @@ FunctionPass *llvm::createWebAssemblyReplacePhysRegs() {
}
bool WebAssemblyReplacePhysRegs::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "********** Replace Physical Registers **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
diff --git a/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
index ac53151047f0..93c109f93901 100644
--- a/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
+++ b/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
@@ -76,7 +76,7 @@ static void RewriteP2Align(MachineInstr &MI, unsigned OperandNo) {
}
bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "********** Set p2align Operands **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
diff --git a/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp b/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
index f1c700f9e8f8..893e8484c4c6 100644
--- a/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
@@ -111,8 +111,8 @@ static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
continue;
Changed = true;
- DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
- << MI << "\n");
+ LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
+ << MI << "\n");
O.setReg(ToReg);
// If the store's def was previously dead, it is no longer.
@@ -170,7 +170,7 @@ static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
}
bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "********** Store Results **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
@@ -189,7 +189,7 @@ bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) {
assert(MRI.tracksLiveness() && "StoreResults expects liveness tracking");
for (auto &MBB : MF) {
- DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n');
for (auto &MI : MBB)
switch (MI.getOpcode()) {
default:
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp
index b3c491b3de5e..bac9d02d69c3 100644
--- a/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -103,7 +103,7 @@ StringRef llvm::X86Disassembler::GetInstrName(unsigned Opcode,
return MII->getName(Opcode);
}
-#define debug(s) DEBUG(Debug(__FILE__, __LINE__, s));
+#define debug(s) LLVM_DEBUG(Debug(__FILE__, __LINE__, s));
namespace llvm {
diff --git a/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index e89dd4972596..53fc97545908 100644
--- a/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -407,7 +407,7 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
MBB->getParent()->getMachineMemOperand(LMMO, LMMOffset, Size));
if (LoadBase.isReg())
getBaseOperand(NewLoad).setIsKill(false);
- DEBUG(NewLoad->dump());
+ LLVM_DEBUG(NewLoad->dump());
// If the load and store are consecutive, use the loadInst location to
// reduce register pressure.
MachineInstr *StInst = StoreInst;
@@ -428,7 +428,7 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands);
assert(StoreSrcVReg.isReg() && "Expected virtual register");
NewStore->getOperand(X86::AddrNumOperands).setIsKill(StoreSrcVReg.isKill());
- DEBUG(NewStore->dump());
+ LLVM_DEBUG(NewStore->dump());
}
void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst,
@@ -674,7 +674,7 @@ bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";);
+ LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";);
// Look for a load then a store to XMM/YMM which look like a memcpy
findPotentiallylBlockedCopies(MF);
@@ -711,10 +711,10 @@ bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
// into smaller copies such that each smaller store that was causing
// a store block would now be copied separately.
MachineInstr *StoreInst = LoadStoreInstPair.second;
- DEBUG(dbgs() << "Blocked load and store instructions: \n");
- DEBUG(LoadInst->dump());
- DEBUG(StoreInst->dump());
- DEBUG(dbgs() << "Replaced with:\n");
+ LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n");
+ LLVM_DEBUG(LoadInst->dump());
+ LLVM_DEBUG(StoreInst->dump());
+ LLVM_DEBUG(dbgs() << "Replaced with:\n");
removeRedundantBlockingStores(BlockingStoresDispSizeMap);
breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap);
updateKillStatus(LoadInst, StoreInst);
@@ -726,7 +726,7 @@ bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
}
ForRemoval.clear();
BlockedLoadsStoresPairs.clear();
- DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";);
+ LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";);
return Changed;
}
diff --git a/lib/Target/X86/X86CmovConversion.cpp b/lib/Target/X86/X86CmovConversion.cpp
index 38baa90f4797..f73455cc31b8 100644
--- a/lib/Target/X86/X86CmovConversion.cpp
+++ b/lib/Target/X86/X86CmovConversion.cpp
@@ -169,8 +169,8 @@ bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
if (!EnableCmovConverter)
return false;
- DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
- << "**********\n");
+ LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
+ << "**********\n");
bool Changed = false;
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
@@ -776,7 +776,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
auto *NewCMOV = NewMIs.pop_back_val();
assert(X86::getCondFromCMovOpc(NewCMOV->getOpcode()) == OppCC &&
"Last new instruction isn't the expected CMOV!");
- DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
+ LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
MBB->insert(MachineBasicBlock::iterator(MI), NewCMOV);
if (&*MIItBegin == &MI)
MIItBegin = MachineBasicBlock::iterator(NewCMOV);
@@ -784,7 +784,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
// Sink whatever instructions were needed to produce the unfolded operand
// into the false block.
for (auto *NewMI : NewMIs) {
- DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
+ LLVM_DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
FalseMBB->insert(FalseInsertionPoint, NewMI);
// Re-map any operands that are from other cmovs to the inputs for this block.
for (auto &MOp : NewMI->uses()) {
@@ -846,8 +846,8 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
.addReg(Op2Reg)
.addMBB(MBB);
(void)MIB;
- DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
- DEBUG(dbgs() << "\tTo: "; MIB->dump());
+ LLVM_DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
+ LLVM_DEBUG(dbgs() << "\tTo: "; MIB->dump());
// Add this PHI to the rewrite table.
RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
diff --git a/lib/Target/X86/X86DomainReassignment.cpp b/lib/Target/X86/X86DomainReassignment.cpp
index b41640f7bd75..ef854dcc2a70 100644
--- a/lib/Target/X86/X86DomainReassignment.cpp
+++ b/lib/Target/X86/X86DomainReassignment.cpp
@@ -701,8 +701,9 @@ bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
if (DisableX86DomainReassignment)
return false;
- DEBUG(dbgs() << "***** Machine Function before Domain Reassignment *****\n");
- DEBUG(MF.print(dbgs()));
+ LLVM_DEBUG(
+ dbgs() << "***** Machine Function before Domain Reassignment *****\n");
+ LLVM_DEBUG(MF.print(dbgs()));
STI = &MF.getSubtarget<X86Subtarget>();
// GPR->K is the only transformation currently supported, bail out early if no
@@ -752,8 +753,9 @@ bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
DeleteContainerSeconds(Converters);
- DEBUG(dbgs() << "***** Machine Function after Domain Reassignment *****\n");
- DEBUG(MF.print(dbgs()));
+ LLVM_DEBUG(
+ dbgs() << "***** Machine Function after Domain Reassignment *****\n");
+ LLVM_DEBUG(MF.print(dbgs()));
return Changed;
}
diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp
index 46f13821bae7..d9bf60c2c9fb 100644
--- a/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/lib/Target/X86/X86FixupBWInsts.cpp
@@ -155,13 +155,13 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
MLI = &getAnalysis<MachineLoopInfo>();
LiveRegs.init(TII->getRegisterInfo());
- DEBUG(dbgs() << "Start X86FixupBWInsts\n";);
+ LLVM_DEBUG(dbgs() << "Start X86FixupBWInsts\n";);
// Process all basic blocks.
for (auto &MBB : MF)
processBasicBlock(MF, MBB);
- DEBUG(dbgs() << "End X86FixupBWInsts\n";);
+ LLVM_DEBUG(dbgs() << "End X86FixupBWInsts\n";);
return true;
}
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index df8c8340a61f..157b07d819b3 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -206,11 +206,11 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
TSM.init(&Func.getSubtarget());
TII = ST.getInstrInfo();
- DEBUG(dbgs() << "Start X86FixupLEAs\n";);
+ LLVM_DEBUG(dbgs() << "Start X86FixupLEAs\n";);
// Process all basic blocks.
for (MachineFunction::iterator I = Func.begin(), E = Func.end(); I != E; ++I)
processBasicBlock(Func, I);
- DEBUG(dbgs() << "End X86FixupLEAs\n";);
+ LLVM_DEBUG(dbgs() << "End X86FixupLEAs\n";);
return true;
}
@@ -408,9 +408,9 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
MachineInstr *NewMI = postRAConvertToLEA(MFI, MBI);
if (NewMI) {
++NumLEAs;
- DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MBI->dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MBI->dump(););
// now to replace with an equivalent LEA...
- DEBUG(dbgs() << "FixLEA: Replaced by: "; NewMI->dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: "; NewMI->dump(););
MFI->erase(MBI);
MachineBasicBlock::iterator J =
static_cast<MachineBasicBlock::iterator>(NewMI);
@@ -435,8 +435,8 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
return;
if (MI.getOperand(2).getImm() > 1)
return;
- DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
- DEBUG(dbgs() << "FixLEA: Replaced by: ";);
+ LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
MachineInstr *NewMI = nullptr;
// Make ADD instruction for two registers writing to LEA's destination
if (SrcR1 != 0 && SrcR2 != 0) {
@@ -444,7 +444,7 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
const MachineOperand &Src = MI.getOperand(SrcR1 == DstR ? 3 : 1);
NewMI =
BuildMI(*MFI, I, MI.getDebugLoc(), ADDrr, DstR).addReg(DstR).add(Src);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
// Make ADD instruction for immediate
if (MI.getOperand(4).getImm() != 0) {
@@ -454,7 +454,7 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), ADDri, DstR)
.add(SrcR)
.addImm(MI.getOperand(4).getImm());
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
if (NewMI) {
MFI->erase(I);
@@ -504,8 +504,8 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(LEAOpcode));
const MCInstrDesc &ADDri = TII->get(getADDriFromLEA(LEAOpcode, Offset));
- DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MI.dump(););
- DEBUG(dbgs() << "FixLEA: Replaced by: ";);
+ LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MI.dump(););
+ LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
// First try to replace LEA with one or two (for the 3-op LEA case)
// add instructions:
@@ -515,11 +515,11 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
const MachineOperand &Src = DstR == BaseR ? Index : Base;
MachineInstr *NewMI =
BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Src);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
// Create ADD instruction for the Offset in case of 3-Ops LEA.
if (hasLEAOffset(Offset)) {
NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
return NewMI;
}
@@ -535,11 +535,11 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
.add(IsInefficientBase ? Base : Index)
.addImm(0)
.add(Segment);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
// Create ADD instruction for the Offset in case of 3-Ops LEA.
if (hasLEAOffset(Offset)) {
NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
}
return NewMI;
}
@@ -551,11 +551,11 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
if (IsScale1 && !hasLEAOffset(Offset)) {
bool BIK = Base.isKill() && BaseR != IndexR;
TII->copyPhysReg(*MFI, MI, DL, DstR, BaseR, BIK);
- DEBUG(MI.getPrevNode()->dump(););
+ LLVM_DEBUG(MI.getPrevNode()->dump(););
MachineInstr *NewMI =
BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Index);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
return NewMI;
}
// lea offset(%base,%index,scale), %dst =>
@@ -567,10 +567,10 @@ FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
.add(Index)
.add(Offset)
.add(Segment);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
NewMI = BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Base);
- DEBUG(NewMI->dump(););
+ LLVM_DEBUG(NewMI->dump(););
return NewMI;
}
diff --git a/lib/Target/X86/X86FlagsCopyLowering.cpp b/lib/Target/X86/X86FlagsCopyLowering.cpp
index e31a47c52242..c18739254b0d 100644
--- a/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -338,8 +338,8 @@ static MachineBasicBlock &splitBlock(MachineBasicBlock &MBB,
}
bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
- << " **********\n");
+ LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
+ << " **********\n");
auto &Subtarget = MF.getSubtarget<X86Subtarget>();
MRI = &MF.getRegInfo();
@@ -381,8 +381,9 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
// instructions. Until we have a motivating test case and fail to avoid
// it by changing other parts of LLVM's lowering, we refuse to handle
// this complex case here.
- DEBUG(dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
- CopyDefI.dump());
+ LLVM_DEBUG(
+ dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
+ CopyDefI.dump());
report_fatal_error(
"Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
}
@@ -406,7 +407,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
auto TestPos = CopyDefI.getIterator();
DebugLoc TestLoc = CopyDefI.getDebugLoc();
- DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
+ LLVM_DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
// Scan for usage of newly set EFLAGS so we can rewrite them. We just buffer
// jumps because their usage is very constrained.
@@ -443,7 +444,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
// other lowering transformation could induce this to happen, we do
// a hard check even in non-debug builds here.
if (&TestMBB != &UseMBB && !MDT->dominates(&TestMBB, &UseMBB)) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "ERROR: Encountered use that is not dominated by our test "
"basic block! Rewriting this would require inserting PHI "
"nodes to track the flag state across the CFG.\n\nTest "
@@ -477,7 +478,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
continue;
}
- DEBUG(dbgs() << " Rewriting use: "; MI.dump());
+ LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
// Check the kill flag before we rewrite as that may change it.
if (FlagUse->isKill())
@@ -570,7 +571,8 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
if (MI.getOpcode() == TargetOpcode::COPY &&
(MI.getOperand(0).getReg() == X86::EFLAGS ||
MI.getOperand(1).getReg() == X86::EFLAGS)) {
- DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: "; MI.dump());
+ LLVM_DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: ";
+ MI.dump());
llvm_unreachable("Unlowered EFLAGS copy!");
}
#endif
@@ -608,7 +610,7 @@ unsigned X86FlagsCopyLoweringPass::promoteCondToReg(
auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
TII->get(X86::getSETFromCond(Cond)), Reg);
(void)SetI;
- DEBUG(dbgs() << " save cond: "; SetI->dump());
+ LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
++NumSetCCsInserted;
return Reg;
}
@@ -633,7 +635,7 @@ void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
auto TestI =
BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
(void)TestI;
- DEBUG(dbgs() << " test cond: "; TestI->dump());
+ LLVM_DEBUG(dbgs() << " test cond: "; TestI->dump());
++NumTestsInserted;
}
@@ -685,7 +687,7 @@ void X86FlagsCopyLoweringPass::rewriteArithmetic(
.addReg(CondReg)
.addImm(Addend);
(void)AddI;
- DEBUG(dbgs() << " add cond: "; AddI->dump());
+ LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
++NumAddsInserted;
FlagUse.setIsKill(true);
}
@@ -715,7 +717,7 @@ void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB,
Inverted ? X86::COND_E : X86::COND_NE, TRI->getRegSizeInBits(CMovRC) / 8,
!CMovI.memoperands_empty())));
FlagUse.setIsKill(true);
- DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
+ LLVM_DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
}
void X86FlagsCopyLoweringPass::rewriteCondJmp(
@@ -739,7 +741,7 @@ void X86FlagsCopyLoweringPass::rewriteCondJmp(
X86::GetCondBranchFromCond(Inverted ? X86::COND_E : X86::COND_NE)));
const int ImplicitEFLAGSOpIdx = 1;
JmpI.getOperand(ImplicitEFLAGSOpIdx).setIsKill(true);
- DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
+ LLVM_DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
}
void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI,
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index ffdb949918ff..ae748901164a 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -435,7 +435,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
PrevMI = &*std::prev(I);
++NumFP; // Keep track of # of pseudo instrs
- DEBUG(dbgs() << "\nFPInst:\t" << MI);
+ LLVM_DEBUG(dbgs() << "\nFPInst:\t" << MI);
// Get dead variables list now because the MI pointer may be deleted as part
// of processing!
@@ -465,13 +465,13 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
// is in the clobber list and marked dead might not be live on the stack.
static_assert(X86::FP7 - X86::FP0 == 7, "sequential FP regnumbers");
if (Reg >= X86::FP0 && Reg <= X86::FP6 && isLive(Reg-X86::FP0)) {
- DEBUG(dbgs() << "Register FP#" << Reg-X86::FP0 << " is dead!\n");
+ LLVM_DEBUG(dbgs() << "Register FP#" << Reg - X86::FP0 << " is dead!\n");
freeStackSlotAfter(I, Reg-X86::FP0);
}
}
// Print out all of the instructions expanded to if -debug
- DEBUG({
+ LLVM_DEBUG({
MachineBasicBlock::iterator PrevI = PrevMI;
if (I == PrevI) {
dbgs() << "Just deleted pseudo instruction\n";
@@ -500,15 +500,15 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
/// setupBlockStack - Use the live bundles to set up our model of the stack
/// to match predecessors' live out stack.
void FPS::setupBlockStack() {
- DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB)
- << " derived from " << MBB->getName() << ".\n");
+ LLVM_DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB)
+ << " derived from " << MBB->getName() << ".\n");
StackTop = 0;
// Get the live-in bundle for MBB.
const LiveBundle &Bundle =
LiveBundles[Bundles->getBundle(MBB->getNumber(), false)];
if (!Bundle.Mask) {
- DEBUG(dbgs() << "Block has no FP live-ins.\n");
+ LLVM_DEBUG(dbgs() << "Block has no FP live-ins.\n");
return;
}
@@ -517,8 +517,8 @@ void FPS::setupBlockStack() {
// Push the fixed live-in registers.
for (unsigned i = Bundle.FixCount; i > 0; --i) {
- DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %fp"
- << unsigned(Bundle.FixStack[i-1]) << '\n');
+ LLVM_DEBUG(dbgs() << "Live-in st(" << (i - 1) << "): %fp"
+ << unsigned(Bundle.FixStack[i - 1]) << '\n');
pushReg(Bundle.FixStack[i-1]);
}
@@ -527,7 +527,7 @@ void FPS::setupBlockStack() {
// to be revived at the end of a short block. It might save a few instrs.
unsigned Mask = calcLiveInMask(MBB, /*RemoveFPs=*/true);
adjustLiveRegs(Mask, MBB->begin());
- DEBUG(MBB->dump());
+ LLVM_DEBUG(MBB->dump());
}
/// finishBlockStack - Revive live-outs that are implicitly defined out of
@@ -539,8 +539,8 @@ void FPS::finishBlockStack() {
if (MBB->succ_empty())
return;
- DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB)
- << " derived from " << MBB->getName() << ".\n");
+ LLVM_DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB)
+ << " derived from " << MBB->getName() << ".\n");
// Get MBB's live-out bundle.
unsigned BundleIdx = Bundles->getBundle(MBB->getNumber(), true);
@@ -552,18 +552,18 @@ void FPS::finishBlockStack() {
adjustLiveRegs(Bundle.Mask, Term);
if (!Bundle.Mask) {
- DEBUG(dbgs() << "No live-outs.\n");
+ LLVM_DEBUG(dbgs() << "No live-outs.\n");
return;
}
// Has the stack order been fixed yet?
- DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
+ LLVM_DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
if (Bundle.isFixed()) {
- DEBUG(dbgs() << "Shuffling stack to match.\n");
+ LLVM_DEBUG(dbgs() << "Shuffling stack to match.\n");
shuffleStackTop(Bundle.FixStack, Bundle.FixCount, Term);
} else {
// Not fixed yet, we get to choose.
- DEBUG(dbgs() << "Fixing stack order now.\n");
+ LLVM_DEBUG(dbgs() << "Fixing stack order now.\n");
Bundle.FixCount = StackTop;
for (unsigned i = 0; i < StackTop; ++i)
Bundle.FixStack[i] = getStackEntry(i);
@@ -895,7 +895,8 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
while (Kills && Defs) {
unsigned KReg = countTrailingZeros(Kills);
unsigned DReg = countTrailingZeros(Defs);
- DEBUG(dbgs() << "Renaming %fp" << KReg << " as imp %fp" << DReg << "\n");
+ LLVM_DEBUG(dbgs() << "Renaming %fp" << KReg << " as imp %fp" << DReg
+ << "\n");
std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
std::swap(RegMap[KReg], RegMap[DReg]);
Kills &= ~(1 << KReg);
@@ -909,7 +910,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
unsigned KReg = getStackEntry(0);
if (!(Kills & (1 << KReg)))
break;
- DEBUG(dbgs() << "Popping %fp" << KReg << "\n");
+ LLVM_DEBUG(dbgs() << "Popping %fp" << KReg << "\n");
popStackAfter(I2);
Kills &= ~(1 << KReg);
}
@@ -918,7 +919,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Manually kill the rest.
while (Kills) {
unsigned KReg = countTrailingZeros(Kills);
- DEBUG(dbgs() << "Killing %fp" << KReg << "\n");
+ LLVM_DEBUG(dbgs() << "Killing %fp" << KReg << "\n");
freeStackSlotBefore(I, KReg);
Kills &= ~(1 << KReg);
}
@@ -926,14 +927,14 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
// Load zeros for all the imp-defs.
while(Defs) {
unsigned DReg = countTrailingZeros(Defs);
- DEBUG(dbgs() << "Defining %fp" << DReg << " as 0\n");
+ LLVM_DEBUG(dbgs() << "Defining %fp" << DReg << " as 0\n");
BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
pushReg(DReg);
Defs &= ~(1 << DReg);
}
// Now we should have the correct registers live.
- DEBUG(dumpStack());
+ LLVM_DEBUG(dumpStack());
assert(StackTop == countPopulation(Mask) && "Live count mismatch");
}
@@ -956,7 +957,7 @@ void FPS::shuffleStackTop(const unsigned char *FixStack,
if (FixCount > 0)
moveToTop(OldReg, I);
}
- DEBUG(dumpStack());
+ LLVM_DEBUG(dumpStack());
}
@@ -1468,7 +1469,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
case TargetOpcode::IMPLICIT_DEF: {
// All FP registers must be explicitly defined, so load a 0 instead.
unsigned Reg = MI.getOperand(0).getReg() - X86::FP0;
- DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
+ LLVM_DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
BuildMI(*MBB, Inst, MI.getDebugLoc(), TII->get(X86::LD_F0));
pushReg(Reg);
break;
@@ -1573,8 +1574,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
MI.emitError("implicitly popped regs must be last on the x87 stack");
unsigned NumSTPopped = countTrailingOnes(STPopped);
- DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
- << NumSTPopped << ", and defines " << NumSTDefs << " regs.\n");
+ LLVM_DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
+ << NumSTPopped << ", and defines " << NumSTDefs
+ << " regs.\n");
#ifndef NDEBUG
// If any input operand uses constraint "f", all output register
@@ -1612,7 +1614,10 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
STUsesArray[I] = I;
shuffleStackTop(STUsesArray, NumSTUses, Inst);
- DEBUG({dbgs() << "Before asm: "; dumpStack();});
+ LLVM_DEBUG({
+ dbgs() << "Before asm: ";
+ dumpStack();
+ });
// With the stack layout fixed, rewrite the FP registers.
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
@@ -1660,7 +1665,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// We want to leave I pointing to the previous instruction, but what if we
// just erased the first instruction?
if (Inst == MBB->begin()) {
- DEBUG(dbgs() << "Inserting dummy KILL\n");
+ LLVM_DEBUG(dbgs() << "Inserting dummy KILL\n");
Inst = BuildMI(*MBB, Inst, DebugLoc(), TII->get(TargetOpcode::KILL));
} else
--Inst;
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 73455912d2ca..c041d0212b06 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1308,10 +1308,10 @@ static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned Depth) {
SDLoc dl(N);
- DEBUG({
- dbgs() << "MatchAddress: ";
- AM.dump(CurDAG);
- });
+ LLVM_DEBUG({
+ dbgs() << "MatchAddress: ";
+ AM.dump(CurDAG);
+ });
// Limit recursion.
if (Depth > 5)
return matchAddressBase(N, AM);
@@ -2744,7 +2744,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
SDLoc dl(Node);
if (Node->isMachineOpcode()) {
- DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
Node->setNodeId(-1);
return; // Already selected.
}
@@ -3025,7 +3025,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
InFlag = ResLo.getValue(2);
}
ReplaceUses(SDValue(Node, 0), ResLo);
- DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
// Copy the high half of the result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
@@ -3036,7 +3037,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
InFlag = ResHi.getValue(2);
}
ReplaceUses(SDValue(Node, 1), ResHi);
- DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
CurDAG->RemoveDeadNode(Node);
@@ -3198,7 +3200,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
}
ReplaceUses(SDValue(Node, 1), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
// Copy the division (low) result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
@@ -3206,7 +3209,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
LoReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 0), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
// Copy the remainder (high) result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
@@ -3214,7 +3218,8 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
HiReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 1), Result);
- DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
+ dbgs() << '\n');
}
CurDAG->RemoveDeadNode(Node);
return;
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 31ce4bdc767b..fe86cf0e8ffd 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -6847,8 +6847,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
report_fatal_error("Unable to copy EFLAGS physical register!");
}
- DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
- << " to " << RI.getName(DestReg) << '\n');
+ LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
+ << RI.getName(DestReg) << '\n');
llvm_unreachable("Cannot emit physreg copy instruction");
}
diff --git a/lib/Target/X86/X86InstructionSelector.cpp b/lib/Target/X86/X86InstructionSelector.cpp
index 4996288a193d..36d36cb11d72 100644
--- a/lib/Target/X86/X86InstructionSelector.cpp
+++ b/lib/Target/X86/X86InstructionSelector.cpp
@@ -296,8 +296,8 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
}
@@ -333,7 +333,7 @@ bool X86InstructionSelector::select(MachineInstr &I,
if (selectImpl(I, CoverageInfo))
return true;
- DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
+ LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
// TODO: This should be implemented by tblgen.
switch (I.getOpcode()) {
@@ -503,7 +503,7 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
- DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
return false;
}
@@ -675,8 +675,8 @@ bool X86InstructionSelector::selectTurnIntoCOPY(
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
I.setDesc(TII.get(X86::COPY));
@@ -700,8 +700,8 @@ bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
if (DstRB.getID() != SrcRB.getID()) {
- DEBUG(dbgs() << TII.getName(I.getOpcode())
- << " input/output on different banks\n");
+ LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
+ << " input/output on different banks\n");
return false;
}
@@ -738,8 +738,8 @@ bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << "\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << "\n");
return false;
}
@@ -792,8 +792,8 @@ bool X86InstructionSelector::selectZext(MachineInstr &I,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
@@ -894,8 +894,8 @@ bool X86InstructionSelector::selectAnyext(MachineInstr &I,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
@@ -1111,7 +1111,7 @@ bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
return false;
}
@@ -1148,7 +1148,7 @@ bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
return false;
}
@@ -1392,8 +1392,8 @@ bool X86InstructionSelector::selectImplicitDefOrPHI(
const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
}
@@ -1544,8 +1544,8 @@ bool X86InstructionSelector::selectSDiv(MachineInstr &I,
if (!RBI.constrainGenericRegister(DividentReg, *RegRC, MRI) ||
!RBI.constrainGenericRegister(DiviserReg, *RegRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
- DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
- << " operand\n");
+ LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
return false;
}
diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp
index 6329375720b4..42db51b3cf01 100644
--- a/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -541,7 +541,7 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
MRI->clearKillFlags(DefMI->getOperand(0).getReg());
++NumSubstLEAs;
- DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
+ LLVM_DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
// Change instruction operands.
MI.getOperand(MemOpNo + X86::AddrBaseReg)
@@ -553,7 +553,7 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
MI.getOperand(MemOpNo + X86::AddrSegmentReg)
.ChangeToRegister(X86::NoRegister, false);
- DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
+ LLVM_DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
Changed = true;
}
@@ -649,7 +649,8 @@ bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
MRI->clearKillFlags(FirstVReg);
++NumRedundantLEAs;
- DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; Last.dump(););
+ LLVM_DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: ";
+ Last.dump(););
// By this moment, all of the Last LEA's uses must be replaced. So we
// can freely remove it.
diff --git a/lib/Target/X86/X86RetpolineThunks.cpp b/lib/Target/X86/X86RetpolineThunks.cpp
index f37c3fbc3994..250deb3523b4 100644
--- a/lib/Target/X86/X86RetpolineThunks.cpp
+++ b/lib/Target/X86/X86RetpolineThunks.cpp
@@ -91,7 +91,7 @@ bool X86RetpolineThunks::doInitialization(Module &M) {
}
bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << getPassName() << '\n');
+ LLVM_DEBUG(dbgs() << getPassName() << '\n');
TM = &MF.getTarget();;
STI = &MF.getSubtarget<X86Subtarget>();
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index a137cb4ed388..963016548f2f 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -231,9 +231,9 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
else
llvm_unreachable("Not 16-bit, 32-bit or 64-bit mode!");
- DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
- << ", 3DNowLevel " << X863DNowLevel
- << ", 64bit " << HasX86_64 << "\n");
+ LLVM_DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
+ << ", 3DNowLevel " << X863DNowLevel << ", 64bit "
+ << HasX86_64 << "\n");
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index 224262830b12..f882b760927c 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -264,8 +264,8 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
}
}
- DEBUG(dbgs() << "MBB #" << MBB.getNumber() << " exit state: "
- << getBlockExitStateName(CurState) << '\n');
+ LLVM_DEBUG(dbgs() << "MBB #" << MBB.getNumber() << " exit state: "
+ << getBlockExitStateName(CurState) << '\n');
if (CurState == EXITS_DIRTY)
for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
@@ -341,8 +341,8 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
// successors need to be added to the worklist (if they haven't been
// already).
if (BBState.ExitState == PASS_THROUGH) {
- DEBUG(dbgs() << "MBB #" << MBB.getNumber()
- << " was Pass-through, is now Dirty-out.\n");
+ LLVM_DEBUG(dbgs() << "MBB #" << MBB.getNumber()
+ << " was Pass-through, is now Dirty-out.\n");
for (MachineBasicBlock *Succ : MBB.successors())
addDirtySuccessor(*Succ);
}
diff --git a/lib/Target/X86/X86WinEHState.cpp b/lib/Target/X86/X86WinEHState.cpp
index 6d6dedc60736..dde9c734f492 100644
--- a/lib/Target/X86/X86WinEHState.cpp
+++ b/lib/Target/X86/X86WinEHState.cpp
@@ -695,10 +695,10 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
Worklist.push_back(BB);
continue;
}
- DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
- << " InitialState=" << InitialState << '\n');
- DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
- << " FinalState=" << FinalState << '\n');
+ LLVM_DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " InitialState=" << InitialState << '\n');
+ LLVM_DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " FinalState=" << FinalState << '\n');
InitialStates.insert({BB, InitialState});
FinalStates.insert({BB, FinalState});
}
@@ -743,8 +743,8 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
continue;
int PrevState = getPredState(FinalStates, F, ParentBaseState, BB);
- DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
- << " PrevState=" << PrevState << '\n');
+ LLVM_DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " PrevState=" << PrevState << '\n');
for (Instruction &I : *BB) {
CallSite CS(&I);
diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp
index 70376d40a37f..1915aaedc35d 100644
--- a/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -274,14 +274,13 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int StackSize = MF.getFrameInfo().getStackSize();
#ifndef NDEBUG
- DEBUG(errs() << "\nFunction : "
- << MF.getName() << "\n");
- DEBUG(errs() << "<--------->\n");
- DEBUG(MI.print(errs()));
- DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n");
- DEBUG(errs() << "FrameOffset : " << Offset << "\n");
- DEBUG(errs() << "StackSize : " << StackSize << "\n");
- #endif
+ LLVM_DEBUG(errs() << "\nFunction : " << MF.getName() << "\n");
+ LLVM_DEBUG(errs() << "<--------->\n");
+ LLVM_DEBUG(MI.print(errs()));
+ LLVM_DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n");
+ LLVM_DEBUG(errs() << "FrameOffset : " << Offset << "\n");
+ LLVM_DEBUG(errs() << "StackSize : " << StackSize << "\n");
+#endif
Offset += StackSize;
@@ -299,7 +298,8 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
assert(Offset%4 == 0 && "Misaligned stack offset");
- DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
+ LLVM_DEBUG(errs() << "Offset : " << Offset << "\n"
+ << "<--------->\n");
Offset/=4;
unsigned Reg = MI.getOperand(0).getReg();
diff --git a/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp b/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
index c72df2d70a46..8289b2d68f8a 100644
--- a/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
+++ b/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
@@ -405,9 +405,10 @@ bool TruncInstCombine::run(Function &F) {
CurrentTruncInst = Worklist.pop_back_val();
if (Type *NewDstSclTy = getBestTruncatedType()) {
- DEBUG(dbgs() << "ICE: TruncInstCombine reducing type of expression dag "
- "dominated by: "
- << CurrentTruncInst << '\n');
+ LLVM_DEBUG(
+ dbgs() << "ICE: TruncInstCombine reducing type of expression dag "
+ "dominated by: "
+ << CurrentTruncInst << '\n');
ReduceExpressionDag(NewDstSclTy);
MadeIRChange = true;
}
diff --git a/lib/Transforms/Coroutines/CoroFrame.cpp b/lib/Transforms/Coroutines/CoroFrame.cpp
index 1676e7a57360..c4172c035dc3 100644
--- a/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -106,8 +106,8 @@ struct SuspendCrossingInfo {
assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def");
bool const Result = Block[UseIndex].Kills[DefIndex];
- DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
- << " answer is " << Result << "\n");
+ LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
+ << " answer is " << Result << "\n");
return Result;
}
@@ -195,8 +195,8 @@ SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
bool Changed;
do {
- DEBUG(dbgs() << "iteration " << ++Iteration);
- DEBUG(dbgs() << "==============\n");
+ LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
+ LLVM_DEBUG(dbgs() << "==============\n");
Changed = false;
for (size_t I = 0; I < N; ++I) {
@@ -240,20 +240,20 @@ SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
if (S.Kills != SavedKills) {
- DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
- << "\n");
- DEBUG(dump("S.Kills", S.Kills));
- DEBUG(dump("SavedKills", SavedKills));
+ LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
+ << "\n");
+ LLVM_DEBUG(dump("S.Kills", S.Kills));
+ LLVM_DEBUG(dump("SavedKills", SavedKills));
}
if (S.Consumes != SavedConsumes) {
- DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
- DEBUG(dump("S.Consume", S.Consumes));
- DEBUG(dump("SavedCons", SavedConsumes));
+ LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
+ LLVM_DEBUG(dump("S.Consume", S.Consumes));
+ LLVM_DEBUG(dump("SavedCons", SavedConsumes));
}
}
}
} while (Changed);
- DEBUG(dump());
+ LLVM_DEBUG(dump());
}
#undef DEBUG_TYPE // "coro-suspend-crossing"
@@ -821,7 +821,7 @@ static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills,
for (User *U : CurrentValue->users()) {
Instruction *I = cast<Instruction>(U);
if (!DT.dominates(CoroBegin, I)) {
- DEBUG(dbgs() << "will move: " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "will move: " << *I << "\n");
// TODO: Make this more robust. Currently if we run into a situation
// where simple instruction move won't work we panic and
@@ -906,7 +906,7 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
break;
// Rewrite materializable instructions to be materialized at the use point.
- DEBUG(dump("Materializations", Spills));
+ LLVM_DEBUG(dump("Materializations", Spills));
rewriteMaterializableInstructions(Builder, Spills);
Spills.clear();
}
@@ -936,7 +936,7 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
Spills.emplace_back(&I, U);
}
}
- DEBUG(dump("Spills", Spills));
+ LLVM_DEBUG(dump("Spills", Spills));
moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin);
Shape.FrameTy = buildFrameType(F, Shape, Spills);
Shape.FramePtr = insertSpills(Spills, Shape);
diff --git a/lib/Transforms/Coroutines/CoroSplit.cpp b/lib/Transforms/Coroutines/CoroSplit.cpp
index 6ab56f25e8b1..e764edf7bc57 100644
--- a/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -654,7 +654,7 @@ getNotRelocatableInstructions(CoroBeginInst *CoroBegin,
// set.
do {
Instruction *Current = Work.pop_back_val();
- DEBUG(dbgs() << "CoroSplit: Will not relocate: " << *Current << "\n");
+ LLVM_DEBUG(dbgs() << "CoroSplit: Will not relocate: " << *Current << "\n");
DoNotRelocate.insert(Current);
for (Value *U : Current->operands()) {
auto *I = dyn_cast<Instruction>(U);
@@ -850,8 +850,8 @@ struct CoroSplit : public CallGraphSCCPass {
for (Function *F : Coroutines) {
Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
StringRef Value = Attr.getValueAsString();
- DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
- << "' state: " << Value << "\n");
+ LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
+ << "' state: " << Value << "\n");
if (Value == UNPREPARED_FOR_SPLIT) {
prepareForSplit(*F, CG);
continue;
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index e159920116ca..f2c2b55b1c5b 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -220,8 +220,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
NF->setSubprogram(F->getSubprogram());
F->setSubprogram(nullptr);
- DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
- << "From: " << *F);
+ LLVM_DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
+ << "From: " << *F);
// Recompute the parameter attributes list based on the new arguments for
// the function.
@@ -426,8 +426,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
I2->setName(I->getName() + ".val");
LI->replaceAllUsesWith(&*I2);
LI->eraseFromParent();
- DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
- << "' in function '" << F->getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
+ << "' in function '" << F->getName() << "'\n");
} else {
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
IndicesVector Operands;
@@ -453,8 +453,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
NewName += ".val";
TheArg->setName(NewName);
- DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
- << "' of function '" << NF->getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
+ << "' of function '" << NF->getName() << "'\n");
// All of the uses must be load instructions. Replace them all with
// the argument specified by ArgNo.
@@ -688,11 +688,11 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByValOrInAlloca,
// to do.
if (ToPromote.find(Operands) == ToPromote.end()) {
if (MaxElements > 0 && ToPromote.size() == MaxElements) {
- DEBUG(dbgs() << "argpromotion not promoting argument '"
- << Arg->getName()
- << "' because it would require adding more "
- << "than " << MaxElements
- << " arguments to the function.\n");
+ LLVM_DEBUG(dbgs() << "argpromotion not promoting argument '"
+ << Arg->getName()
+ << "' because it would require adding more "
+ << "than " << MaxElements
+ << " arguments to the function.\n");
// We limit aggregate promotion to only promoting up to a fixed number
// of elements of the aggregate.
return false;
@@ -901,11 +901,11 @@ promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter,
if (isSafeToPromote) {
if (StructType *STy = dyn_cast<StructType>(AgTy)) {
if (MaxElements > 0 && STy->getNumElements() > MaxElements) {
- DEBUG(dbgs() << "argpromotion disable promoting argument '"
- << PtrArg->getName()
- << "' because it would require adding more"
- << " than " << MaxElements
- << " arguments to the function.\n");
+ LLVM_DEBUG(dbgs() << "argpromotion disable promoting argument '"
+ << PtrArg->getName()
+ << "' because it would require adding more"
+ << " than " << MaxElements
+ << " arguments to the function.\n");
continue;
}
diff --git a/lib/Transforms/IPO/BlockExtractor.cpp b/lib/Transforms/IPO/BlockExtractor.cpp
index 9d30efdb7c8d..ff5ee817da49 100644
--- a/lib/Transforms/IPO/BlockExtractor.cpp
+++ b/lib/Transforms/IPO/BlockExtractor.cpp
@@ -147,8 +147,9 @@ bool BlockExtractor::runOnModule(Module &M) {
// Check if the module contains BB.
if (BB->getParent()->getParent() != &M)
report_fatal_error("Invalid basic block");
- DEBUG(dbgs() << "BlockExtractor: Extracting " << BB->getParent()->getName()
- << ":" << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "BlockExtractor: Extracting "
+ << BB->getParent()->getName() << ":" << BB->getName()
+ << "\n");
SmallVector<BasicBlock *, 2> BlocksToExtractVec;
BlocksToExtractVec.push_back(BB);
if (const InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
@@ -161,8 +162,8 @@ bool BlockExtractor::runOnModule(Module &M) {
// Erase the functions.
if (EraseFunctions || BlockExtractorEraseFuncs) {
for (Function *F : Functions) {
- DEBUG(dbgs() << "BlockExtractor: Trying to delete " << F->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "BlockExtractor: Trying to delete " << F->getName()
+ << "\n");
F->deleteBody();
}
// Set linkage as ExternalLinkage to avoid erasing unreachable functions.
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 4ef8e4b509a3..31e771da3bd3 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -529,8 +529,8 @@ void DeadArgumentEliminationPass::SurveyFunction(const Function &F) {
}
if (HasMustTailCalls) {
- DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName()
- << " has musttail calls\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName()
+ << " has musttail calls\n");
}
if (!F.hasLocalLinkage() && (!ShouldHackArguments || F.isIntrinsic())) {
@@ -538,8 +538,9 @@ void DeadArgumentEliminationPass::SurveyFunction(const Function &F) {
return;
}
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Inspecting callers for fn: "
- << F.getName() << "\n");
+ LLVM_DEBUG(
+ dbgs() << "DeadArgumentEliminationPass - Inspecting callers for fn: "
+ << F.getName() << "\n");
// Keep track of the number of live retvals, so we can skip checks once all
// of them turn out to be live.
unsigned NumLiveRetVals = 0;
@@ -606,16 +607,16 @@ void DeadArgumentEliminationPass::SurveyFunction(const Function &F) {
}
if (HasMustTailCallers) {
- DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName()
- << " has musttail callers\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - " << F.getName()
+ << " has musttail callers\n");
}
// Now we've inspected all callers, record the liveness of our return values.
for (unsigned i = 0; i != RetCount; ++i)
MarkValue(CreateRet(&F, i), RetValLiveness[i], MaybeLiveRetUses[i]);
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Inspecting args for fn: "
- << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Inspecting args for fn: "
+ << F.getName() << "\n");
// Now, check all of our arguments.
unsigned i = 0;
@@ -674,8 +675,8 @@ void DeadArgumentEliminationPass::MarkValue(const RetOrArg &RA, Liveness L,
/// mark any values that are used as this function's parameters or by its return
/// values (according to Uses) live as well.
void DeadArgumentEliminationPass::MarkLive(const Function &F) {
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Intrinsically live fn: "
- << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Intrinsically live fn: "
+ << F.getName() << "\n");
// Mark the function as live.
LiveFunctions.insert(&F);
// Mark all arguments as live.
@@ -696,8 +697,8 @@ void DeadArgumentEliminationPass::MarkLive(const RetOrArg &RA) {
if (!LiveValues.insert(RA).second)
return; // We were already marked Live.
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Marking "
- << RA.getDescription() << " live\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Marking "
+ << RA.getDescription() << " live\n");
PropagateLiveness(RA);
}
@@ -755,9 +756,9 @@ bool DeadArgumentEliminationPass::RemoveDeadStuffFromFunction(Function *F) {
HasLiveReturnedArg |= PAL.hasParamAttribute(i, Attribute::Returned);
} else {
++NumArgumentsEliminated;
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Removing argument " << i
- << " (" << I->getName() << ") from " << F->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Removing argument "
+ << i << " (" << I->getName() << ") from "
+ << F->getName() << "\n");
}
}
@@ -800,8 +801,9 @@ bool DeadArgumentEliminationPass::RemoveDeadStuffFromFunction(Function *F) {
NewRetIdxs[i] = RetTypes.size() - 1;
} else {
++NumRetValsEliminated;
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Removing return value "
- << i << " from " << F->getName() << "\n");
+ LLVM_DEBUG(
+ dbgs() << "DeadArgumentEliminationPass - Removing return value "
+ << i << " from " << F->getName() << "\n");
}
}
if (RetTypes.size() > 1) {
@@ -1084,7 +1086,7 @@ PreservedAnalyses DeadArgumentEliminationPass::run(Module &M,
// removed. We can do this if they never call va_start. This loop cannot be
// fused with the next loop, because deleting a function invalidates
// information computed while surveying other functions.
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Deleting dead varargs\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Deleting dead varargs\n");
for (Module::iterator I = M.begin(), E = M.end(); I != E; ) {
Function &F = *I++;
if (F.getFunctionType()->isVarArg())
@@ -1095,7 +1097,7 @@ PreservedAnalyses DeadArgumentEliminationPass::run(Module &M,
// We assume all arguments are dead unless proven otherwise (allowing us to
// determine that dead arguments passed into recursive functions are dead).
//
- DEBUG(dbgs() << "DeadArgumentEliminationPass - Determining liveness\n");
+ LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Determining liveness\n");
for (auto &F : M)
SurveyFunction(F);
diff --git a/lib/Transforms/IPO/ForceFunctionAttrs.cpp b/lib/Transforms/IPO/ForceFunctionAttrs.cpp
index 16d78e6b00cc..37273f975417 100644
--- a/lib/Transforms/IPO/ForceFunctionAttrs.cpp
+++ b/lib/Transforms/IPO/ForceFunctionAttrs.cpp
@@ -75,8 +75,8 @@ static void addForcedAttributes(Function &F) {
auto Kind = parseAttrKind(KV.second);
if (Kind == Attribute::None) {
- DEBUG(dbgs() << "ForcedAttribute: " << KV.second
- << " unknown or not handled!\n");
+ LLVM_DEBUG(dbgs() << "ForcedAttribute: " << KV.second
+ << " unknown or not handled!\n");
continue;
}
if (F.hasFnAttribute(Kind))
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index eef0738197be..91c855021fd0 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -1013,7 +1013,8 @@ static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) {
if (!Speculative) {
// Mark the function eagerly since we may discover a function
// which prevents us from speculating about the entire SCC
- DEBUG(dbgs() << "Eagerly marking " << F->getName() << " as nonnull\n");
+ LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName()
+ << " as nonnull\n");
F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
++NumNonNullReturn;
MadeChange = true;
@@ -1032,7 +1033,7 @@ static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) {
!F->getReturnType()->isPointerTy())
continue;
- DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n");
+ LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n");
F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
++NumNonNullReturn;
MadeChange = true;
@@ -1218,8 +1219,8 @@ static bool inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes) {
return InstrBreaksNonConvergent(I, SCCNodes);
},
[](Function &F) {
- DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName()
+ << "\n");
F.setNotConvergent();
},
/* RequiresExactDefinition= */ false});
@@ -1239,7 +1240,8 @@ static bool inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes) {
return InstrBreaksNonThrowing(I, SCCNodes);
},
[](Function &F) {
- DEBUG(dbgs() << "Adding nounwind attr to fn " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Adding nounwind attr to fn " << F.getName() << "\n");
F.setDoesNotThrow();
++NumNoUnwind;
},
diff --git a/lib/Transforms/IPO/FunctionImport.cpp b/lib/Transforms/IPO/FunctionImport.cpp
index 246d75caefa2..2ac01095210d 100644
--- a/lib/Transforms/IPO/FunctionImport.cpp
+++ b/lib/Transforms/IPO/FunctionImport.cpp
@@ -136,7 +136,7 @@ static cl::opt<bool>
static std::unique_ptr<Module> loadFile(const std::string &FileName,
LLVMContext &Context) {
SMDiagnostic Err;
- DEBUG(dbgs() << "Loading '" << FileName << "'\n");
+ LLVM_DEBUG(dbgs() << "Loading '" << FileName << "'\n");
// Metadata isn't loaded until functions are imported, to minimize
// the memory overhead.
std::unique_ptr<Module> Result =
@@ -249,11 +249,12 @@ static void computeImportForReferencedGlobals(
StringMap<FunctionImporter::ExportSetTy> *ExportLists) {
for (auto &VI : Summary.refs()) {
if (DefinedGVSummaries.count(VI.getGUID())) {
- DEBUG(dbgs() << "Ref ignored! Target already in destination module.\n");
+ LLVM_DEBUG(
+ dbgs() << "Ref ignored! Target already in destination module.\n");
continue;
}
- DEBUG(dbgs() << " ref -> " << VI.getGUID() << "\n");
+ LLVM_DEBUG(dbgs() << " ref -> " << VI.getGUID() << "\n");
for (auto &RefSummary : VI.getSummaryList())
if (RefSummary->getSummaryKind() == GlobalValueSummary::GlobalVarKind &&
@@ -283,12 +284,12 @@ static void computeImportForFunction(
static int ImportCount = 0;
for (auto &Edge : Summary.calls()) {
ValueInfo VI = Edge.first;
- DEBUG(dbgs() << " edge -> " << VI.getGUID() << " Threshold:" << Threshold
- << "\n");
+ LLVM_DEBUG(dbgs() << " edge -> " << VI.getGUID()
+ << " Threshold:" << Threshold << "\n");
if (ImportCutoff >= 0 && ImportCount >= ImportCutoff) {
- DEBUG(dbgs() << "ignored! import-cutoff value of " << ImportCutoff
- << " reached.\n");
+ LLVM_DEBUG(dbgs() << "ignored! import-cutoff value of " << ImportCutoff
+ << " reached.\n");
continue;
}
@@ -297,7 +298,7 @@ static void computeImportForFunction(
continue;
if (DefinedGVSummaries.count(VI.getGUID())) {
- DEBUG(dbgs() << "ignored! Target already in destination module.\n");
+ LLVM_DEBUG(dbgs() << "ignored! Target already in destination module.\n");
continue;
}
@@ -317,7 +318,8 @@ static void computeImportForFunction(
auto *CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold,
Summary.modulePath());
if (!CalleeSummary) {
- DEBUG(dbgs() << "ignored! No qualifying callee with summary found.\n");
+ LLVM_DEBUG(
+ dbgs() << "ignored! No qualifying callee with summary found.\n");
continue;
}
@@ -346,8 +348,8 @@ static void computeImportForFunction(
/// a second time with a higher threshold. In this case, it is added back to
/// the worklist with the new threshold.
if (ProcessedThreshold && ProcessedThreshold >= AdjThreshold) {
- DEBUG(dbgs() << "ignored! Target was already seen with Threshold "
- << ProcessedThreshold << "\n");
+ LLVM_DEBUG(dbgs() << "ignored! Target was already seen with Threshold "
+ << ProcessedThreshold << "\n");
continue;
}
bool PreviouslyImported = ProcessedThreshold != 0;
@@ -398,7 +400,7 @@ static void ComputeImportForModule(
// module
for (auto &GVSummary : DefinedGVSummaries) {
if (!Index.isGlobalValueLive(GVSummary.second)) {
- DEBUG(dbgs() << "Ignores Dead GUID: " << GVSummary.first << "\n");
+ LLVM_DEBUG(dbgs() << "Ignores Dead GUID: " << GVSummary.first << "\n");
continue;
}
auto *FuncSummary =
@@ -406,7 +408,7 @@ static void ComputeImportForModule(
if (!FuncSummary)
// Skip import for global variables
continue;
- DEBUG(dbgs() << "Initialize import for " << GVSummary.first << "\n");
+ LLVM_DEBUG(dbgs() << "Initialize import for " << GVSummary.first << "\n");
computeImportForFunction(*FuncSummary, Index, ImportInstrLimit,
DefinedGVSummaries, Worklist, ImportList,
ExportLists);
@@ -469,8 +471,8 @@ void llvm::ComputeCrossModuleImport(
// For each module that has function defined, compute the import/export lists.
for (auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) {
auto &ImportList = ImportLists[DefinedGVSummaries.first()];
- DEBUG(dbgs() << "Computing import for Module '"
- << DefinedGVSummaries.first() << "'\n");
+ LLVM_DEBUG(dbgs() << "Computing import for Module '"
+ << DefinedGVSummaries.first() << "'\n");
ComputeImportForModule(DefinedGVSummaries.second, Index, ImportList,
&ExportLists);
}
@@ -492,23 +494,23 @@ void llvm::ComputeCrossModuleImport(
}
#ifndef NDEBUG
- DEBUG(dbgs() << "Import/Export lists for " << ImportLists.size()
- << " modules:\n");
+ LLVM_DEBUG(dbgs() << "Import/Export lists for " << ImportLists.size()
+ << " modules:\n");
for (auto &ModuleImports : ImportLists) {
auto ModName = ModuleImports.first();
auto &Exports = ExportLists[ModName];
unsigned NumGVS = numGlobalVarSummaries(Index, Exports);
- DEBUG(dbgs() << "* Module " << ModName << " exports "
- << Exports.size() - NumGVS << " functions and " << NumGVS
- << " vars. Imports from "
- << ModuleImports.second.size() << " modules.\n");
+ LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports "
+ << Exports.size() - NumGVS << " functions and " << NumGVS
+ << " vars. Imports from " << ModuleImports.second.size()
+ << " modules.\n");
for (auto &Src : ModuleImports.second) {
auto SrcModName = Src.first();
unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
- DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
- << " functions imported from " << SrcModName << "\n");
- DEBUG(dbgs() << " - " << NumGVSPerMod << " global vars imported from "
- << SrcModName << "\n");
+ LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
+ << " functions imported from " << SrcModName << "\n");
+ LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod
+ << " global vars imported from " << SrcModName << "\n");
}
}
#endif
@@ -518,15 +520,15 @@ void llvm::ComputeCrossModuleImport(
static void dumpImportListForModule(const ModuleSummaryIndex &Index,
StringRef ModulePath,
FunctionImporter::ImportMapTy &ImportList) {
- DEBUG(dbgs() << "* Module " << ModulePath << " imports from "
- << ImportList.size() << " modules.\n");
+ LLVM_DEBUG(dbgs() << "* Module " << ModulePath << " imports from "
+ << ImportList.size() << " modules.\n");
for (auto &Src : ImportList) {
auto SrcModName = Src.first();
unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
- DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
- << " functions imported from " << SrcModName << "\n");
- DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from "
- << SrcModName << "\n");
+ LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
+ << " functions imported from " << SrcModName << "\n");
+ LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from "
+ << SrcModName << "\n");
}
}
#endif
@@ -541,7 +543,7 @@ void llvm::ComputeCrossModuleImportForModule(
Index.collectDefinedFunctionsForModule(ModulePath, FunctionSummaryMap);
// Compute the import list for this module.
- DEBUG(dbgs() << "Computing import for Module '" << ModulePath << "'\n");
+ LLVM_DEBUG(dbgs() << "Computing import for Module '" << ModulePath << "'\n");
ComputeImportForModule(FunctionSummaryMap, Index, ImportList);
#ifndef NDEBUG
@@ -601,7 +603,7 @@ void llvm::computeDeadSymbols(
for (const auto &Entry : Index)
for (auto &S : Entry.second.SummaryList)
if (S->isLive()) {
- DEBUG(dbgs() << "Live root: " << Entry.first << "\n");
+ LLVM_DEBUG(dbgs() << "Live root: " << Entry.first << "\n");
Worklist.push_back(ValueInfo(/*IsAnalysis=*/false, &Entry));
++LiveSymbols;
break;
@@ -667,8 +669,8 @@ void llvm::computeDeadSymbols(
Index.setWithGlobalValueDeadStripping();
unsigned DeadSymbols = Index.size() - LiveSymbols;
- DEBUG(dbgs() << LiveSymbols << " symbols Live, and " << DeadSymbols
- << " symbols Dead \n");
+ LLVM_DEBUG(dbgs() << LiveSymbols << " symbols Live, and " << DeadSymbols
+ << " symbols Dead \n");
NumDeadSymbols += DeadSymbols;
NumLiveSymbols += LiveSymbols;
}
@@ -711,7 +713,8 @@ llvm::EmitImportsFiles(StringRef ModulePath, StringRef OutputFilename,
}
bool llvm::convertToDeclaration(GlobalValue &GV) {
- DEBUG(dbgs() << "Converting to a declaration: `" << GV.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Converting to a declaration: `" << GV.getName()
+ << "\n");
if (Function *F = dyn_cast<Function>(&GV)) {
F->deleteBody();
F->clearMetadata();
@@ -787,8 +790,9 @@ void llvm::thinLTOResolveWeakForLinkerModule(
NewLinkage == GlobalValue::WeakODRLinkage)
GV.setVisibility(GlobalValue::HiddenVisibility);
- DEBUG(dbgs() << "ODR fixing up linkage for `" << GV.getName() << "` from "
- << GV.getLinkage() << " to " << NewLinkage << "\n");
+ LLVM_DEBUG(dbgs() << "ODR fixing up linkage for `" << GV.getName()
+ << "` from " << GV.getLinkage() << " to " << NewLinkage
+ << "\n");
GV.setLinkage(NewLinkage);
}
// Remove declarations from comdats, including available_externally
@@ -865,8 +869,8 @@ static Function *replaceAliasWithAliasee(Module *SrcModule, GlobalAlias *GA) {
// index.
Expected<bool> FunctionImporter::importFunctions(
Module &DestModule, const FunctionImporter::ImportMapTy &ImportList) {
- DEBUG(dbgs() << "Starting import for Module "
- << DestModule.getModuleIdentifier() << "\n");
+ LLVM_DEBUG(dbgs() << "Starting import for Module "
+ << DestModule.getModuleIdentifier() << "\n");
unsigned ImportedCount = 0, ImportedGVCount = 0;
IRMover Mover(DestModule);
@@ -899,9 +903,9 @@ Expected<bool> FunctionImporter::importFunctions(
continue;
auto GUID = F.getGUID();
auto Import = ImportGUIDs.count(GUID);
- DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function " << GUID
- << " " << F.getName() << " from "
- << SrcModule->getSourceFileName() << "\n");
+ LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function "
+ << GUID << " " << F.getName() << " from "
+ << SrcModule->getSourceFileName() << "\n");
if (Import) {
if (Error Err = F.materialize())
return std::move(Err);
@@ -921,9 +925,9 @@ Expected<bool> FunctionImporter::importFunctions(
continue;
auto GUID = GV.getGUID();
auto Import = ImportGUIDs.count(GUID);
- DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global " << GUID
- << " " << GV.getName() << " from "
- << SrcModule->getSourceFileName() << "\n");
+ LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global "
+ << GUID << " " << GV.getName() << " from "
+ << SrcModule->getSourceFileName() << "\n");
if (Import) {
if (Error Err = GV.materialize())
return std::move(Err);
@@ -935,9 +939,9 @@ Expected<bool> FunctionImporter::importFunctions(
continue;
auto GUID = GA.getGUID();
auto Import = ImportGUIDs.count(GUID);
- DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias " << GUID
- << " " << GA.getName() << " from "
- << SrcModule->getSourceFileName() << "\n");
+ LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias "
+ << GUID << " " << GA.getName() << " from "
+ << SrcModule->getSourceFileName() << "\n");
if (Import) {
if (Error Err = GA.materialize())
return std::move(Err);
@@ -946,9 +950,9 @@ Expected<bool> FunctionImporter::importFunctions(
if (Error Err = Base->materialize())
return std::move(Err);
auto *Fn = replaceAliasWithAliasee(SrcModule.get(), &GA);
- DEBUG(dbgs() << "Is importing aliasee fn " << Base->getGUID()
- << " " << Base->getName() << " from "
- << SrcModule->getSourceFileName() << "\n");
+ LLVM_DEBUG(dbgs() << "Is importing aliasee fn " << Base->getGUID()
+ << " " << Base->getName() << " from "
+ << SrcModule->getSourceFileName() << "\n");
if (EnableImportMetadata) {
// Add 'thinlto_src_module' metadata for statistics and debugging.
Fn->setMetadata(
@@ -987,12 +991,12 @@ Expected<bool> FunctionImporter::importFunctions(
NumImportedFunctions += (ImportedCount - ImportedGVCount);
NumImportedGlobalVars += ImportedGVCount;
- DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount
- << " functions for Module " << DestModule.getModuleIdentifier()
- << "\n");
- DEBUG(dbgs() << "Imported " << ImportedGVCount
- << " global variables for Module "
- << DestModule.getModuleIdentifier() << "\n");
+ LLVM_DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount
+ << " functions for Module "
+ << DestModule.getModuleIdentifier() << "\n");
+ LLVM_DEBUG(dbgs() << "Imported " << ImportedGVCount
+ << " global variables for Module "
+ << DestModule.getModuleIdentifier() << "\n");
return ImportedCount;
}
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 1ae5dde5fc89..fa9689e79486 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -567,7 +567,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
if (NewGlobals.empty())
return nullptr;
- DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
+ LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
@@ -799,7 +799,8 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
}
if (Changed) {
- DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV << "\n");
+ LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV
+ << "\n");
++NumGlobUses;
}
@@ -813,7 +814,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
}
if (GV->use_empty()) {
- DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
+ LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
Changed = true;
GV->eraseFromParent();
++NumDeleted;
@@ -849,7 +850,8 @@ static GlobalVariable *
OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
ConstantInt *NElements, const DataLayout &DL,
TargetLibraryInfo *TLI) {
- DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
+ LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI
+ << '\n');
Type *GlobalType;
if (NElements->getZExtValue() == 1)
@@ -1285,7 +1287,8 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
Value *NElems, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
- DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
+ LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI
+ << '\n');
Type *MAT = getMallocAllocatedType(CI, TLI);
StructType *STy = cast<StructType>(MAT);
@@ -1624,7 +1627,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
return false;
- DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n");
+ LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n");
// Create the new global, initializing it to false.
GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
@@ -1763,7 +1766,7 @@ static bool deleteIfDead(GlobalValue &GV,
if (!Dead)
return false;
- DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
+ LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
GV.eraseFromParent();
++NumDeleted;
return true;
@@ -1929,7 +1932,7 @@ static bool processInternalGlobal(
LookupDomTree)) {
const DataLayout &DL = GV->getParent()->getDataLayout();
- DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
+ LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
->getEntryBlock().begin());
Type *ElemTy = GV->getValueType();
@@ -1950,7 +1953,7 @@ static bool processInternalGlobal(
// If the global is never loaded (but may be stored to), it is dead.
// Delete it now.
if (!GS.IsLoaded) {
- DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
+ LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
bool Changed;
if (isLeakCheckerRoot(GV)) {
@@ -1972,7 +1975,7 @@ static bool processInternalGlobal(
}
if (GS.StoredType <= GlobalStatus::InitializerStored) {
- DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
+ LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
GV->setConstant(true);
// Clean up any obviously simplifiable users now.
@@ -1980,8 +1983,8 @@ static bool processInternalGlobal(
// If the global is dead now, just nuke it.
if (GV->use_empty()) {
- DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
- << "all users and delete global!\n");
+ LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
+ << "all users and delete global!\n");
GV->eraseFromParent();
++NumDeleted;
return true;
@@ -2009,8 +2012,8 @@ static bool processInternalGlobal(
CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
if (GV->use_empty()) {
- DEBUG(dbgs() << " *** Substituting initializer allowed us to "
- << "simplify all users and delete global!\n");
+ LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to "
+ << "simplify all users and delete global!\n");
GV->eraseFromParent();
++NumDeleted;
}
@@ -2545,9 +2548,9 @@ static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
++NumCtorsEvaluated;
// We succeeded at evaluation: commit the result.
- DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
- << F->getName() << "' to " << Eval.getMutatedMemory().size()
- << " stores.\n");
+ LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
+ << F->getName() << "' to "
+ << Eval.getMutatedMemory().size() << " stores.\n");
BatchCommitValueTo(Eval.getMutatedMemory());
for (GlobalVariable *GV : Eval.getInvariants())
GV->setConstant(true);
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index 26b889e617ef..e4c7af31f67d 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -208,8 +208,8 @@ static void mergeInlinedArrayAllocas(
// Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
// success!
- DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI
- << "\n\t\tINTO: " << *AvailableAlloca << '\n');
+ LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI
+ << "\n\t\tINTO: " << *AvailableAlloca << '\n');
// Move affected dbg.declare calls immediately after the new alloca to
// avoid the situation when a dbg.declare precedes its alloca.
@@ -379,14 +379,14 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost,
Function *Caller = CS.getCaller();
if (IC.isAlways()) {
- DEBUG(dbgs() << " Inlining: cost=always"
- << ", Call: " << *CS.getInstruction() << "\n");
+ LLVM_DEBUG(dbgs() << " Inlining: cost=always"
+ << ", Call: " << *CS.getInstruction() << "\n");
return IC;
}
if (IC.isNever()) {
- DEBUG(dbgs() << " NOT Inlining: cost=never"
- << ", Call: " << *CS.getInstruction() << "\n");
+ LLVM_DEBUG(dbgs() << " NOT Inlining: cost=never"
+ << ", Call: " << *CS.getInstruction() << "\n");
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", Call)
<< NV("Callee", Callee) << " not inlined into "
@@ -397,9 +397,9 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost,
}
if (!IC) {
- DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
- << ", thres=" << IC.getThreshold()
- << ", Call: " << *CS.getInstruction() << "\n");
+ LLVM_DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
+ << ", thres=" << IC.getThreshold()
+ << ", Call: " << *CS.getInstruction() << "\n");
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "TooCostly", Call)
<< NV("Callee", Callee) << " not inlined into "
@@ -412,9 +412,9 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost,
int TotalSecondaryCost = 0;
if (shouldBeDeferred(Caller, CS, IC, TotalSecondaryCost, GetInlineCost)) {
- DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction()
- << " Cost = " << IC.getCost()
- << ", outer Cost = " << TotalSecondaryCost << '\n');
+ LLVM_DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction()
+ << " Cost = " << IC.getCost()
+ << ", outer Cost = " << TotalSecondaryCost << '\n');
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "IncreaseCostInOtherContexts",
Call)
@@ -428,9 +428,9 @@ shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost,
return None;
}
- DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
- << ", thres=" << IC.getThreshold()
- << ", Call: " << *CS.getInstruction() << '\n');
+ LLVM_DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
+ << ", thres=" << IC.getThreshold()
+ << ", Call: " << *CS.getInstruction() << '\n');
return IC;
}
@@ -470,12 +470,12 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
function_ref<AAResults &(Function &)> AARGetter,
ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
SmallPtrSet<Function *, 8> SCCFunctions;
- DEBUG(dbgs() << "Inliner visiting SCC:");
+ LLVM_DEBUG(dbgs() << "Inliner visiting SCC:");
for (CallGraphNode *Node : SCC) {
Function *F = Node->getFunction();
if (F)
SCCFunctions.insert(F);
- DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
+ LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
}
// Scan through and identify all call sites ahead of time so that we only
@@ -524,7 +524,7 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
}
}
- DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
+ LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
// If there are no calls in this function, exit early.
if (CallSites.empty())
@@ -593,7 +593,7 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
// size. This happens because IPSCCP propagates the result out of the
// call and then we're left with the dead call.
if (IsTriviallyDead) {
- DEBUG(dbgs() << " -> Deleting dead call: " << *Instr << "\n");
+ LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << *Instr << "\n");
// Update the call graph by deleting the edge from Callee to Caller.
CG[Caller]->removeCallEdgeFor(CS);
Instr->eraseFromParent();
@@ -657,8 +657,8 @@ inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
// callgraph references to the node, we cannot delete it yet, this
// could invalidate the CGSCC iterator.
CG[Callee]->getNumReferences() == 0) {
- DEBUG(dbgs() << " -> Deleting dead function: " << Callee->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << " -> Deleting dead function: "
+ << Callee->getName() << "\n");
CallGraphNode *CalleeNode = CG[Callee];
// Remove any call graph edges from the callee to its callees.
@@ -896,7 +896,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
if (F.hasFnAttribute(Attribute::OptimizeNone))
continue;
- DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n");
// Get a FunctionAnalysisManager via a proxy for this particular node. We
// do this each time we visit a node as the SCC may have changed and as
@@ -948,9 +948,9 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// and thus hidden from the full inline history.
if (CG.lookupSCC(*CG.lookup(Callee)) == C &&
UR.InlinedInternalEdges.count({&N, C})) {
- DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
- "previously split out of this SCC by inlining: "
- << F.getName() << " -> " << Callee.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
+ "previously split out of this SCC by inlining: "
+ << F.getName() << " -> " << Callee.getName() << "\n");
continue;
}
@@ -1069,7 +1069,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
// change.
LazyCallGraph::SCC *OldC = C;
C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR);
- DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
+ LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
RC = &C->getOuterRefSCC();
// If this causes an SCC to split apart into multiple smaller SCCs, there
@@ -1087,8 +1087,8 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
if (C != OldC && llvm::any_of(InlinedCallees, [&](Function *Callee) {
return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
})) {
- DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
- "retaining this to avoid infinite inlining.\n");
+ LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
+ "retaining this to avoid infinite inlining.\n");
UR.InlinedInternalEdges.insert({&N, OldC});
}
InlinedCallees.clear();
diff --git a/lib/Transforms/IPO/Internalize.cpp b/lib/Transforms/IPO/Internalize.cpp
index 26db1465bb26..a6542d28dfd8 100644
--- a/lib/Transforms/IPO/Internalize.cpp
+++ b/lib/Transforms/IPO/Internalize.cpp
@@ -192,7 +192,7 @@ bool InternalizePass::internalizeModule(Module &M, CallGraph *CG) {
ExternalNode->removeOneAbstractEdgeTo((*CG)[&I]);
++NumFunctions;
- DEBUG(dbgs() << "Internalizing func " << I.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Internalizing func " << I.getName() << "\n");
}
// Never internalize the llvm.used symbol. It is used to implement
@@ -221,7 +221,7 @@ bool InternalizePass::internalizeModule(Module &M, CallGraph *CG) {
Changed = true;
++NumGlobals;
- DEBUG(dbgs() << "Internalized gvar " << GV.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Internalized gvar " << GV.getName() << "\n");
}
// Mark all aliases that are not in the api as internal as well.
@@ -231,7 +231,7 @@ bool InternalizePass::internalizeModule(Module &M, CallGraph *CG) {
Changed = true;
++NumAliases;
- DEBUG(dbgs() << "Internalized alias " << GA.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Internalized alias " << GA.getName() << "\n");
}
return Changed;
diff --git a/lib/Transforms/IPO/LowerTypeTests.cpp b/lib/Transforms/IPO/LowerTypeTests.cpp
index 1f19a3b97796..d2545af85028 100644
--- a/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1026,7 +1026,7 @@ void LowerTypeTestsModule::lowerTypeTestCalls(
for (Metadata *TypeId : TypeIds) {
// Build the bitset.
BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout);
- DEBUG({
+ LLVM_DEBUG({
if (auto MDS = dyn_cast<MDString>(TypeId))
dbgs() << MDS->getString() << ": ";
else
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 76b90391fbb1..080e2f634496 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -407,10 +407,10 @@ bool MergeFunctions::runOnModule(Module &M) {
std::vector<WeakTrackingVH> Worklist;
Deferred.swap(Worklist);
- DEBUG(doSanityCheck(Worklist));
+ LLVM_DEBUG(doSanityCheck(Worklist));
- DEBUG(dbgs() << "size of module: " << M.size() << '\n');
- DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
+ LLVM_DEBUG(dbgs() << "size of module: " << M.size() << '\n');
+ LLVM_DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
// Insert functions and merge them.
for (WeakTrackingVH &I : Worklist) {
@@ -421,7 +421,7 @@ bool MergeFunctions::runOnModule(Module &M) {
Changed |= insert(F);
}
}
- DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n');
+ LLVM_DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n');
} while (!Deferred.empty());
FnTree.clear();
@@ -498,19 +498,20 @@ static Value *createCast(IRBuilder<> &Builder, Value *V, Type *DestTy) {
// parameter debug info, from the entry block.
void MergeFunctions::eraseInstsUnrelatedToPDI(
std::vector<Instruction *> &PDIUnrelatedWL) {
- DEBUG(dbgs() << " Erasing instructions (in reverse order of appearance in "
- "entry block) unrelated to parameter debug info from entry "
- "block: {\n");
+ LLVM_DEBUG(
+ dbgs() << " Erasing instructions (in reverse order of appearance in "
+ "entry block) unrelated to parameter debug info from entry "
+ "block: {\n");
while (!PDIUnrelatedWL.empty()) {
Instruction *I = PDIUnrelatedWL.back();
- DEBUG(dbgs() << " Deleting Instruction: ");
- DEBUG(I->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Deleting Instruction: ");
+ LLVM_DEBUG(I->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
I->eraseFromParent();
PDIUnrelatedWL.pop_back();
}
- DEBUG(dbgs() << " } // Done erasing instructions unrelated to parameter "
- "debug info from entry block. \n");
+ LLVM_DEBUG(dbgs() << " } // Done erasing instructions unrelated to parameter "
+ "debug info from entry block. \n");
}
// Reduce G to its entry block.
@@ -543,99 +544,100 @@ void MergeFunctions::filterInstsUnrelatedToPDI(
for (BasicBlock::iterator BI = GEntryBlock->begin(), BIE = GEntryBlock->end();
BI != BIE; ++BI) {
if (auto *DVI = dyn_cast<DbgValueInst>(&*BI)) {
- DEBUG(dbgs() << " Deciding: ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Deciding: ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
DILocalVariable *DILocVar = DVI->getVariable();
if (DILocVar->isParameter()) {
- DEBUG(dbgs() << " Include (parameter): ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Include (parameter): ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
PDIRelated.insert(&*BI);
} else {
- DEBUG(dbgs() << " Delete (!parameter): ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Delete (!parameter): ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
} else if (auto *DDI = dyn_cast<DbgDeclareInst>(&*BI)) {
- DEBUG(dbgs() << " Deciding: ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Deciding: ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
DILocalVariable *DILocVar = DDI->getVariable();
if (DILocVar->isParameter()) {
- DEBUG(dbgs() << " Parameter: ");
- DEBUG(DILocVar->print(dbgs()));
+ LLVM_DEBUG(dbgs() << " Parameter: ");
+ LLVM_DEBUG(DILocVar->print(dbgs()));
AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
if (AI) {
- DEBUG(dbgs() << " Processing alloca users: ");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Processing alloca users: ");
+ LLVM_DEBUG(dbgs() << "\n");
for (User *U : AI->users()) {
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (Value *Arg = SI->getValueOperand()) {
if (dyn_cast<Argument>(Arg)) {
- DEBUG(dbgs() << " Include: ");
- DEBUG(AI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Include: ");
+ LLVM_DEBUG(AI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
PDIRelated.insert(AI);
- DEBUG(dbgs() << " Include (parameter): ");
- DEBUG(SI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Include (parameter): ");
+ LLVM_DEBUG(SI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
PDIRelated.insert(SI);
- DEBUG(dbgs() << " Include: ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Include: ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
PDIRelated.insert(&*BI);
} else {
- DEBUG(dbgs() << " Delete (!parameter): ");
- DEBUG(SI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Delete (!parameter): ");
+ LLVM_DEBUG(SI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
}
} else {
- DEBUG(dbgs() << " Defer: ");
- DEBUG(U->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Defer: ");
+ LLVM_DEBUG(U->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
}
} else {
- DEBUG(dbgs() << " Delete (alloca NULL): ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Delete (alloca NULL): ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
} else {
- DEBUG(dbgs() << " Delete (!parameter): ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Delete (!parameter): ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
} else if (dyn_cast<TerminatorInst>(BI) == GEntryBlock->getTerminator()) {
- DEBUG(dbgs() << " Will Include Terminator: ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Will Include Terminator: ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
PDIRelated.insert(&*BI);
} else {
- DEBUG(dbgs() << " Defer: ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Defer: ");
+ LLVM_DEBUG(BI->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
}
- DEBUG(dbgs()
- << " Report parameter debug info related/related instructions: {\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Report parameter debug info related/related instructions: {\n");
for (BasicBlock::iterator BI = GEntryBlock->begin(), BE = GEntryBlock->end();
BI != BE; ++BI) {
Instruction *I = &*BI;
if (PDIRelated.find(I) == PDIRelated.end()) {
- DEBUG(dbgs() << " !PDIRelated: ");
- DEBUG(I->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " !PDIRelated: ");
+ LLVM_DEBUG(I->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
PDIUnrelatedWL.push_back(I);
} else {
- DEBUG(dbgs() << " PDIRelated: ");
- DEBUG(I->print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " PDIRelated: ");
+ LLVM_DEBUG(I->print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
}
- DEBUG(dbgs() << " }\n");
+ LLVM_DEBUG(dbgs() << " }\n");
}
// Replace G with a simple tail call to bitcast(F). Also (unless
@@ -674,8 +676,8 @@ void MergeFunctions::writeThunk(Function *F, Function *G) {
// making the function larger.
if (F->size() == 1) {
if (F->front().size() <= 2) {
- DEBUG(dbgs() << "writeThunk: " << F->getName()
- << " is too small to bother creating a thunk for\n");
+ LLVM_DEBUG(dbgs() << "writeThunk: " << F->getName()
+ << " is too small to bother creating a thunk for\n");
return;
}
}
@@ -685,13 +687,14 @@ void MergeFunctions::writeThunk(Function *F, Function *G) {
BasicBlock *BB = nullptr;
Function *NewG = nullptr;
if (MergeFunctionsPDI) {
- DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) Do not create a new "
- "function as thunk; retain original: "
- << G->getName() << "()\n");
+ LLVM_DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) Do not create a new "
+ "function as thunk; retain original: "
+ << G->getName() << "()\n");
GEntryBlock = &G->getEntryBlock();
- DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) filter parameter related "
- "debug info for "
- << G->getName() << "() {\n");
+ LLVM_DEBUG(
+ dbgs() << "writeThunk: (MergeFunctionsPDI) filter parameter related "
+ "debug info for "
+ << G->getName() << "() {\n");
filterInstsUnrelatedToPDI(GEntryBlock, PDIUnrelatedWL);
GEntryBlock->getTerminator()->eraseFromParent();
BB = GEntryBlock;
@@ -730,13 +733,15 @@ void MergeFunctions::writeThunk(Function *F, Function *G) {
CI->setDebugLoc(CIDbgLoc);
RI->setDebugLoc(RIDbgLoc);
} else {
- DEBUG(dbgs() << "writeThunk: (MergeFunctionsPDI) No DISubprogram for "
- << G->getName() << "()\n");
+ LLVM_DEBUG(
+ dbgs() << "writeThunk: (MergeFunctionsPDI) No DISubprogram for "
+ << G->getName() << "()\n");
}
eraseTail(G);
eraseInstsUnrelatedToPDI(PDIUnrelatedWL);
- DEBUG(dbgs() << "} // End of parameter related debug info filtering for: "
- << G->getName() << "()\n");
+ LLVM_DEBUG(
+ dbgs() << "} // End of parameter related debug info filtering for: "
+ << G->getName() << "()\n");
} else {
NewG->copyAttributesFrom(G);
NewG->takeName(G);
@@ -745,7 +750,7 @@ void MergeFunctions::writeThunk(Function *F, Function *G) {
G->eraseFromParent();
}
- DEBUG(dbgs() << "writeThunk: " << H->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "writeThunk: " << H->getName() << '\n');
++NumThunksWritten;
}
@@ -806,7 +811,8 @@ bool MergeFunctions::insert(Function *NewFunction) {
if (Result.second) {
assert(FNodesInTree.count(NewFunction) == 0);
FNodesInTree.insert({NewFunction, Result.first});
- DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName()
+ << '\n');
return false;
}
@@ -827,8 +833,8 @@ bool MergeFunctions::insert(Function *NewFunction) {
assert(OldF.getFunc() != F && "Must have swapped the functions.");
}
- DEBUG(dbgs() << " " << OldF.getFunc()->getName()
- << " == " << NewFunction->getName() << '\n');
+ LLVM_DEBUG(dbgs() << " " << OldF.getFunc()->getName()
+ << " == " << NewFunction->getName() << '\n');
Function *DeleteF = NewFunction;
mergeTwoFunctions(OldF.getFunc(), DeleteF);
@@ -840,7 +846,7 @@ bool MergeFunctions::insert(Function *NewFunction) {
void MergeFunctions::remove(Function *F) {
auto I = FNodesInTree.find(F);
if (I != FNodesInTree.end()) {
- DEBUG(dbgs() << "Deferred " << F->getName()<< ".\n");
+ LLVM_DEBUG(dbgs() << "Deferred " << F->getName() << ".\n");
FnTree.erase(I->second);
// I->second has been invalidated, remove it from the FNodesInTree map to
// preserve the invariant.
diff --git a/lib/Transforms/IPO/SampleProfile.cpp b/lib/Transforms/IPO/SampleProfile.cpp
index 0222d249a85f..945c660aa781 100644
--- a/lib/Transforms/IPO/SampleProfile.cpp
+++ b/lib/Transforms/IPO/SampleProfile.cpp
@@ -557,11 +557,11 @@ ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
return Remark;
});
}
- DEBUG(dbgs() << " " << DLoc.getLine() << "."
- << DIL->getBaseDiscriminator() << ":" << Inst
- << " (line offset: " << LineOffset << "."
- << DIL->getBaseDiscriminator() << " - weight: " << R.get()
- << ")\n");
+ LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "."
+ << DIL->getBaseDiscriminator() << ":" << Inst
+ << " (line offset: " << LineOffset << "."
+ << DIL->getBaseDiscriminator() << " - weight: " << R.get()
+ << ")\n");
}
return R;
}
@@ -595,7 +595,7 @@ ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) {
/// \param F The function to query.
bool SampleProfileLoader::computeBlockWeights(Function &F) {
bool Changed = false;
- DEBUG(dbgs() << "Block weights\n");
+ LLVM_DEBUG(dbgs() << "Block weights\n");
for (const auto &BB : F) {
ErrorOr<uint64_t> Weight = getBlockWeight(&BB);
if (Weight) {
@@ -603,7 +603,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) {
VisitedBlocks.insert(&BB);
Changed = true;
}
- DEBUG(printBlockWeight(dbgs(), &BB));
+ LLVM_DEBUG(printBlockWeight(dbgs(), &BB));
}
return Changed;
@@ -811,9 +811,9 @@ bool SampleProfileLoader::inlineHotFunctions(
inlineCallInstruction(DI))
LocalChanged = true;
} else {
- DEBUG(dbgs()
- << "\nFailed to promote indirect call to "
- << CalleeFunctionName << " because " << Reason << "\n");
+ LLVM_DEBUG(dbgs()
+ << "\nFailed to promote indirect call to "
+ << CalleeFunctionName << " because " << Reason << "\n");
}
}
} else if (CalledFunction && CalledFunction->getSubprogram() &&
@@ -902,14 +902,14 @@ void SampleProfileLoader::findEquivalencesFor(
/// \param F The function to query.
void SampleProfileLoader::findEquivalenceClasses(Function &F) {
SmallVector<BasicBlock *, 8> DominatedBBs;
- DEBUG(dbgs() << "\nBlock equivalence classes\n");
+ LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n");
// Find equivalence sets based on dominance and post-dominance information.
for (auto &BB : F) {
BasicBlock *BB1 = &BB;
// Compute BB1's equivalence class once.
if (EquivalenceClass.count(BB1)) {
- DEBUG(printBlockEquivalence(dbgs(), BB1));
+ LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
continue;
}
@@ -930,7 +930,7 @@ void SampleProfileLoader::findEquivalenceClasses(Function &F) {
DT->getDescendants(BB1, DominatedBBs);
findEquivalencesFor(BB1, DominatedBBs, PDT.get());
- DEBUG(printBlockEquivalence(dbgs(), BB1));
+ LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
}
// Assign weights to equivalence classes.
@@ -939,13 +939,14 @@ void SampleProfileLoader::findEquivalenceClasses(Function &F) {
// the same number of times. Since we know that the head block in
// each equivalence class has the largest weight, assign that weight
// to all the blocks in that equivalence class.
- DEBUG(dbgs() << "\nAssign the same weight to all blocks in the same class\n");
+ LLVM_DEBUG(
+ dbgs() << "\nAssign the same weight to all blocks in the same class\n");
for (auto &BI : F) {
const BasicBlock *BB = &BI;
const BasicBlock *EquivBB = EquivalenceClass[BB];
if (BB != EquivBB)
BlockWeights[BB] = BlockWeights[EquivBB];
- DEBUG(printBlockWeight(dbgs(), BB));
+ LLVM_DEBUG(printBlockWeight(dbgs(), BB));
}
}
@@ -986,7 +987,7 @@ uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges,
bool SampleProfileLoader::propagateThroughEdges(Function &F,
bool UpdateBlockCount) {
bool Changed = false;
- DEBUG(dbgs() << "\nPropagation through edges\n");
+ LLVM_DEBUG(dbgs() << "\nPropagation through edges\n");
for (const auto &BI : F) {
const BasicBlock *BB = &BI;
const BasicBlock *EC = EquivalenceClass[BB];
@@ -1058,9 +1059,9 @@ bool SampleProfileLoader::propagateThroughEdges(Function &F,
if (TotalWeight > BBWeight) {
BBWeight = TotalWeight;
Changed = true;
- DEBUG(dbgs() << "All edge weights for " << BB->getName()
- << " known. Set weight for block: ";
- printBlockWeight(dbgs(), BB););
+ LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName()
+ << " known. Set weight for block: ";
+ printBlockWeight(dbgs(), BB););
}
} else if (NumTotalEdges == 1 &&
EdgeWeights[SingleEdge] < BlockWeights[EC]) {
@@ -1087,8 +1088,8 @@ bool SampleProfileLoader::propagateThroughEdges(Function &F,
EdgeWeights[UnknownEdge] = BlockWeights[OtherEC];
VisitedEdges.insert(UnknownEdge);
Changed = true;
- DEBUG(dbgs() << "Set weight for edge: ";
- printEdgeWeight(dbgs(), UnknownEdge));
+ LLVM_DEBUG(dbgs() << "Set weight for edge: ";
+ printEdgeWeight(dbgs(), UnknownEdge));
}
} else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) {
// If a block Weights 0, all its in/out edges should weight 0.
@@ -1114,8 +1115,8 @@ bool SampleProfileLoader::propagateThroughEdges(Function &F,
EdgeWeights[SelfReferentialEdge] = 0;
VisitedEdges.insert(SelfReferentialEdge);
Changed = true;
- DEBUG(dbgs() << "Set self-referential edge weight to: ";
- printEdgeWeight(dbgs(), SelfReferentialEdge));
+ LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: ";
+ printEdgeWeight(dbgs(), SelfReferentialEdge));
}
if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) {
BlockWeights[EC] = TotalWeight;
@@ -1239,7 +1240,7 @@ void SampleProfileLoader::propagateWeights(Function &F) {
// Generate MD_prof metadata for every branch instruction using the
// edge weights computed during propagation.
- DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
+ LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
LLVMContext &Ctx = F.getContext();
MDBuilder MDB(Ctx);
for (auto &BI : F) {
@@ -1285,10 +1286,10 @@ void SampleProfileLoader::propagateWeights(Function &F) {
continue;
DebugLoc BranchLoc = TI->getDebugLoc();
- DEBUG(dbgs() << "\nGetting weights for branch at line "
- << ((BranchLoc) ? Twine(BranchLoc.getLine())
- : Twine("<UNKNOWN LOCATION>"))
- << ".\n");
+ LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line "
+ << ((BranchLoc) ? Twine(BranchLoc.getLine())
+ : Twine("<UNKNOWN LOCATION>"))
+ << ".\n");
SmallVector<uint32_t, 4> Weights;
uint32_t MaxWeight = 0;
Instruction *MaxDestInst;
@@ -1296,12 +1297,12 @@ void SampleProfileLoader::propagateWeights(Function &F) {
BasicBlock *Succ = TI->getSuccessor(I);
Edge E = std::make_pair(BB, Succ);
uint64_t Weight = EdgeWeights[E];
- DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
+ LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
// Use uint32_t saturated arithmetic to adjust the incoming weights,
// if needed. Sample counts in profiles are 64-bit unsigned values,
// but internally branch weights are expressed as 32-bit values.
if (Weight > std::numeric_limits<uint32_t>::max()) {
- DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
+ LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
Weight = std::numeric_limits<uint32_t>::max();
}
// Weight is added by one to avoid propagation errors introduced by
@@ -1322,7 +1323,7 @@ void SampleProfileLoader::propagateWeights(Function &F) {
// annotation is done twice. If the first annotation already set the
// weights, the second pass does not need to set it.
if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) {
- DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
+ LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
TI->setMetadata(LLVMContext::MD_prof,
MDB.createBranchWeights(Weights));
ORE->emit([&]() {
@@ -1331,7 +1332,7 @@ void SampleProfileLoader::propagateWeights(Function &F) {
<< ore::NV("CondBranchesLoc", BranchLoc);
});
} else {
- DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
+ LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
}
}
}
@@ -1426,8 +1427,8 @@ bool SampleProfileLoader::emitAnnotations(Function &F) {
if (getFunctionLoc(F) == 0)
return false;
- DEBUG(dbgs() << "Line number for the first instruction in " << F.getName()
- << ": " << getFunctionLoc(F) << "\n");
+ LLVM_DEBUG(dbgs() << "Line number for the first instruction in "
+ << F.getName() << ": " << getFunctionLoc(F) << "\n");
DenseSet<GlobalValue::GUID> InlinedGUIDs;
Changed |= inlineHotFunctions(F, InlinedGUIDs);
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 1300a92a24c6..4a01c1106d30 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3780,8 +3780,8 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// Remove the convergent attr on calls when the callee is not convergent.
if (CS.isConvergent() && !CalleeF->isConvergent() &&
!CalleeF->isIntrinsic()) {
- DEBUG(dbgs() << "Removing convergent attr from instr "
- << CS.getInstruction() << "\n");
+ LLVM_DEBUG(dbgs() << "Removing convergent attr from instr "
+ << CS.getInstruction() << "\n");
CS.setNotConvergent();
return CS.getInstruction();
}
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 16136777731e..f01bfca5abe8 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -697,8 +697,10 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// If this cast is a truncate, evaluting in a different type always
// eliminates the cast, so it is always a win.
- DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
- " to avoid cast: " << CI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "ICE: EvaluateInDifferentType converting expression type"
+ " to avoid cast: "
+ << CI << '\n');
Value *Res = EvaluateInDifferentType(Src, DestTy, false);
assert(Res->getType() == DestTy);
return replaceInstUsesWith(CI, Res);
@@ -1070,8 +1072,10 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
"Can't clear more bits than in SrcTy");
// Okay, we can transform this! Insert the new expression now.
- DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
- " to avoid zero extend: " << CI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "ICE: EvaluateInDifferentType converting expression type"
+ " to avoid zero extend: "
+ << CI << '\n');
Value *Res = EvaluateInDifferentType(Src, DestTy, false);
assert(Res->getType() == DestTy);
@@ -1344,8 +1348,10 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
canEvaluateSExtd(Src, DestTy)) {
// Okay, we can transform this! Insert the new expression now.
- DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
- " to avoid sign extend: " << CI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "ICE: EvaluateInDifferentType converting expression type"
+ " to avoid sign extend: "
+ << CI << '\n');
Value *Res = EvaluateInDifferentType(Src, DestTy, true);
assert(Res->getType() == DestTy);
diff --git a/lib/Transforms/InstCombine/InstCombineInternal.h b/lib/Transforms/InstCombine/InstCombineInternal.h
index a3a485c21117..38dc75335e2d 100644
--- a/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -536,8 +536,8 @@ public:
if (&I == V)
V = UndefValue::get(I.getType());
- DEBUG(dbgs() << "IC: Replacing " << I << "\n"
- << " with " << *V << '\n');
+ LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
+ << " with " << *V << '\n');
I.replaceAllUsesWith(V);
return &I;
@@ -559,7 +559,7 @@ public:
/// value, we can't rely on DCE to delete the instruction. Instead, visit
/// methods should return the value returned by this function.
Instruction *eraseInstFromFunction(Instruction &I) {
- DEBUG(dbgs() << "IC: ERASE " << I << '\n');
+ LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
assert(I.use_empty() && "Cannot erase instruction that is used!");
salvageDebugInfo(I);
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index b78de0fa691e..41cbd647ca72 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -270,7 +270,7 @@ void PointerReplacer::findLoadAndReplace(Instruction &I) {
auto *Inst = dyn_cast<Instruction>(&*U);
if (!Inst)
return;
- DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
+ LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
if (isa<LoadInst>(Inst)) {
for (auto P : Path)
replace(P);
@@ -405,8 +405,8 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
if (AI.getAlignment() <= SourceAlign &&
isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
- DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
- DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
+ LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
+ LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
eraseInstFromFunction(*ToDelete[i]);
Constant *TheSrc = cast<Constant>(Copy->getSource());
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 0827a34006ca..91850cee77f2 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -1008,10 +1008,9 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
// extracted out of it. First, sort the users by their offset and size.
array_pod_sort(PHIUsers.begin(), PHIUsers.end());
- DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
- for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
- dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
- );
+ LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
+ for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i) dbgs()
+ << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';);
// PredValues - This is a temporary used when rewriting PHI nodes. It is
// hoisted out here to avoid construction/destruction thrashing.
@@ -1092,8 +1091,8 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
}
PredValues.clear();
- DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
- << *EltPHI << '\n');
+ LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
+ << *EltPHI << '\n');
ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
}
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 4b5c96ccbf12..167828063978 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -356,8 +356,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
// cast of lshr(shl(x,c1),c2) as well as other more complex cases.
if (I.getOpcode() != Instruction::AShr &&
canEvaluateShifted(Op0, Op1C->getZExtValue(), isLeftShift, *this, &I)) {
- DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"
- " to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n");
+ LLVM_DEBUG(
+ dbgs() << "ICE: GetShiftedValue propagating shift through expression"
+ " to eliminate shift:\n IN: "
+ << *Op0 << "\n SH: " << I << "\n");
return replaceInstUsesWith(
I, getShiftedValue(Op0, Op1C->getZExtValue(), isLeftShift, *this, DL));
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index ac4e568d5297..2549257bc975 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2939,7 +2939,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
for (auto *DII : DbgUsers) {
if (DII->getParent() == SrcBlock) {
DII->moveBefore(&*InsertPos);
- DEBUG(dbgs() << "SINK: " << *DII << '\n');
+ LLVM_DEBUG(dbgs() << "SINK: " << *DII << '\n');
}
}
return true;
@@ -2952,7 +2952,7 @@ bool InstCombiner::run() {
// Check to see if we can DCE the instruction.
if (isInstructionTriviallyDead(I, &TLI)) {
- DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
eraseInstFromFunction(*I);
++NumDeadInst;
MadeIRChange = true;
@@ -2966,7 +2966,8 @@ bool InstCombiner::run() {
if (!I->use_empty() &&
(I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
- DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I
+ << '\n');
// Add operands to the worklist.
replaceInstUsesWith(*I, C);
@@ -2985,8 +2986,8 @@ bool InstCombiner::run() {
KnownBits Known = computeKnownBits(I, /*Depth*/0, I);
if (Known.isConstant()) {
Constant *C = ConstantInt::get(Ty, Known.getConstant());
- DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
- " from: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C
+ << " from: " << *I << '\n');
// Add operands to the worklist.
replaceInstUsesWith(*I, C);
@@ -3025,7 +3026,7 @@ bool InstCombiner::run() {
if (UserIsSuccessor && UserParent->getUniquePredecessor()) {
// Okay, the CFG is simple enough, try to sink this instruction.
if (TryToSinkInstruction(I, UserParent)) {
- DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
MadeIRChange = true;
// We'll add uses of the sunk instruction below, but since sinking
// can expose opportunities for it's *operands* add them to the
@@ -3045,15 +3046,15 @@ bool InstCombiner::run() {
#ifndef NDEBUG
std::string OrigI;
#endif
- DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
- DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
+ LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
+ LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
if (Instruction *Result = visit(*I)) {
++NumCombined;
// Should we replace the old instruction with a new one?
if (Result != I) {
- DEBUG(dbgs() << "IC: Old = " << *I << '\n'
- << " New = " << *Result << '\n');
+ LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
+ << " New = " << *Result << '\n');
if (I->getDebugLoc())
Result->setDebugLoc(I->getDebugLoc());
@@ -3080,8 +3081,8 @@ bool InstCombiner::run() {
eraseInstFromFunction(*I);
} else {
- DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
- << " New = " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
+ << " New = " << *I << '\n');
// If the instruction was modified, it's possible that it is now dead.
// if so, remove it.
@@ -3132,7 +3133,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
// DCE instruction if trivially dead.
if (isInstructionTriviallyDead(Inst, TLI)) {
++NumDeadInst;
- DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
salvageDebugInfo(*Inst);
Inst->eraseFromParent();
MadeIRChange = true;
@@ -3143,8 +3144,8 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
if (!Inst->use_empty() &&
(Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0))))
if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
- DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
- << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *Inst
+ << '\n');
Inst->replaceAllUsesWith(C);
++NumConstProp;
if (isInstructionTriviallyDead(Inst, TLI))
@@ -3166,9 +3167,9 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
FoldRes = C;
if (FoldRes != C) {
- DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst
- << "\n Old = " << *C
- << "\n New = " << *FoldRes << '\n');
+ LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst
+ << "\n Old = " << *C
+ << "\n New = " << *FoldRes << '\n');
U = FoldRes;
MadeIRChange = true;
}
@@ -3271,8 +3272,8 @@ static bool combineInstructionsOverFunction(
int Iteration = 0;
while (true) {
++Iteration;
- DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
- << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
+ << F.getName() << "\n");
MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 5715c6067e94..eadc9ce50a83 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -877,7 +877,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
processStaticAllocas();
if (ClDebugStack) {
- DEBUG(dbgs() << F);
+ LLVM_DEBUG(dbgs() << F);
}
return true;
}
@@ -1617,7 +1617,7 @@ void AddressSanitizerModule::createInitializerPoisonCalls(
bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
Type *Ty = G->getValueType();
- DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
+ LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
if (GlobalsMD.get(G).IsBlacklisted) return false;
if (!Ty->isSized()) return false;
@@ -1659,7 +1659,8 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// See https://github.com/google/sanitizers/issues/305
// and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
if (Section.startswith(".CRT")) {
- DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
+ LLVM_DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G
+ << "\n");
return false;
}
@@ -1676,7 +1677,7 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// them.
if (ParsedSegment == "__OBJC" ||
(ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
- DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
+ LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
return false;
}
// See https://github.com/google/sanitizers/issues/32
@@ -1688,13 +1689,13 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// Therefore there's no point in placing redzones into __DATA,__cfstring.
// Moreover, it causes the linker to crash on OS X 10.7
if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
- DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
+ LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
return false;
}
// The linker merges the contents of cstring_literals and removes the
// trailing zeroes.
if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
- DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
+ LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
return false;
}
}
@@ -2161,7 +2162,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool
if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true;
- DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
+ LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
Initializers[i] = Initializer;
}
@@ -2195,7 +2196,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool
if (HasDynamicallyInitializedGlobals)
createInitializerPoisonCalls(M, ModuleName);
- DEBUG(dbgs() << M);
+ LLVM_DEBUG(dbgs() << M);
return true;
}
@@ -2436,7 +2437,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// Leave if the function doesn't need instrumentation.
if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
- DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
+ LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
initializeCallbacks(*F.getParent());
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
@@ -2549,8 +2550,8 @@ bool AddressSanitizer::runOnFunction(Function &F) {
if (NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty())
FunctionModified = true;
- DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
- << F << "\n");
+ LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
+ << F << "\n");
return FunctionModified;
}
@@ -2866,7 +2867,7 @@ void FunctionStackPoisoner::processStaticAllocas() {
}
auto DescriptionString = ComputeASanStackFrameDescription(SVD);
- DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
+ LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
uint64_t LocalStackSize = L.FrameSize;
bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
LocalStackSize <= kMaxStackMallocSize;
@@ -3101,7 +3102,8 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
} else if (GetElementPtrInst *EP = dyn_cast<GetElementPtrInst>(V)) {
Res = findAllocaForValue(EP->getPointerOperand());
} else {
- DEBUG(dbgs() << "Alloca search canceled on unknown instruction: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "Alloca search canceled on unknown instruction: " << *V
+ << "\n");
}
if (Res) AllocaForValue[V] = Res;
return Res;
diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp
index be9a22a8681b..87b3019c9108 100644
--- a/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -62,8 +62,8 @@ static bool instrumentMemAccess(Value *Ptr, Value *InstVal,
BuilderTy &IRB,
GetTrapBBT GetTrapBB) {
uint64_t NeededSize = DL.getTypeStoreSize(InstVal->getType());
- DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
- << " bytes\n");
+ LLVM_DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
+ << " bytes\n");
SizeOffsetEvalType SizeOffset = ObjSizeEval.compute(Ptr);
diff --git a/lib/Transforms/Instrumentation/CFGMST.h b/lib/Transforms/Instrumentation/CFGMST.h
index 54a36eb716ae..cc9b149d0b6a 100644
--- a/lib/Transforms/Instrumentation/CFGMST.h
+++ b/lib/Transforms/Instrumentation/CFGMST.h
@@ -97,7 +97,7 @@ public:
// Edges with large weight will be put into MST first so they are less likely
// to be instrumented.
void buildEdges() {
- DEBUG(dbgs() << "Build Edge on " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Build Edge on " << F.getName() << "\n");
const BasicBlock *Entry = &(F.getEntryBlock());
uint64_t EntryWeight = (BFI != nullptr ? BFI->getEntryFreq() : 2);
@@ -107,8 +107,8 @@ public:
// Add a fake edge to the entry.
EntryIncoming = &addEdge(nullptr, Entry, EntryWeight);
- DEBUG(dbgs() << " Edge: from fake node to " << Entry->getName()
- << " w = " << EntryWeight << "\n");
+ LLVM_DEBUG(dbgs() << " Edge: from fake node to " << Entry->getName()
+ << " w = " << EntryWeight << "\n");
// Special handling for single BB functions.
if (succ_empty(Entry)) {
@@ -138,8 +138,8 @@ public:
Weight = BPI->getEdgeProbability(&*BB, TargetBB).scale(scaleFactor);
auto *E = &addEdge(&*BB, TargetBB, Weight);
E->IsCritical = Critical;
- DEBUG(dbgs() << " Edge: from " << BB->getName() << " to "
- << TargetBB->getName() << " w=" << Weight << "\n");
+ LLVM_DEBUG(dbgs() << " Edge: from " << BB->getName() << " to "
+ << TargetBB->getName() << " w=" << Weight << "\n");
// Keep track of entry/exit edges:
if (&*BB == Entry) {
@@ -164,8 +164,8 @@ public:
MaxExitOutWeight = BBWeight;
ExitOutgoing = ExitO;
}
- DEBUG(dbgs() << " Edge: from " << BB->getName() << " to fake exit"
- << " w = " << BBWeight << "\n");
+ LLVM_DEBUG(dbgs() << " Edge: from " << BB->getName() << " to fake exit"
+ << " w = " << BBWeight << "\n");
}
}
diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 49f7c330be88..c8eb680bbdf6 100644
--- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -316,7 +316,7 @@ namespace {
ReturnBlock(1, os) {
this->os = os;
- DEBUG(dbgs() << "Function: " << getFunctionName(SP) << "\n");
+ LLVM_DEBUG(dbgs() << "Function: " << getFunctionName(SP) << "\n");
uint32_t i = 0;
for (auto &BB : *F) {
@@ -384,7 +384,7 @@ namespace {
for (int i = 0, e = Blocks.size() + 1; i != e; ++i) {
write(0); // No flags on our blocks.
}
- DEBUG(dbgs() << Blocks.size() << " blocks.\n");
+ LLVM_DEBUG(dbgs() << Blocks.size() << " blocks.\n");
// Emit edges between blocks.
if (Blocks.empty()) return;
@@ -397,8 +397,8 @@ namespace {
write(Block.OutEdges.size() * 2 + 1);
write(Block.Number);
for (int i = 0, e = Block.OutEdges.size(); i != e; ++i) {
- DEBUG(dbgs() << Block.Number << " -> " << Block.OutEdges[i]->Number
- << "\n");
+ LLVM_DEBUG(dbgs() << Block.Number << " -> "
+ << Block.OutEdges[i]->Number << "\n");
write(Block.OutEdges[i]->Number);
write(0); // no flags
}
diff --git a/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 75061749fbbb..a1205d81c91d 100644
--- a/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -227,7 +227,7 @@ FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
///
/// inserts a call to __hwasan_init to the module's constructor list.
bool HWAddressSanitizer::doInitialization(Module &M) {
- DEBUG(dbgs() << "Init " << M.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
auto &DL = M.getDataLayout();
TargetTriple = Triple(M.getTargetTriple());
@@ -457,7 +457,7 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
}
bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
- DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
bool IsWrite = false;
unsigned Alignment = 0;
uint64_t TypeSize = 0;
@@ -684,7 +684,7 @@ bool HWAddressSanitizer::runOnFunction(Function &F) {
if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
return false;
- DEBUG(dbgs() << "Function: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
initializeCallbacks(*F.getParent());
diff --git a/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
index ec035c8474ba..27fb0e4393af 100644
--- a/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
+++ b/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
@@ -223,12 +223,12 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite(
uint64_t TotalCount, uint32_t NumCandidates) {
std::vector<PromotionCandidate> Ret;
- DEBUG(dbgs() << " \nWork on callsite #" << NumOfPGOICallsites << *Inst
- << " Num_targets: " << ValueDataRef.size()
- << " Num_candidates: " << NumCandidates << "\n");
+ LLVM_DEBUG(dbgs() << " \nWork on callsite #" << NumOfPGOICallsites << *Inst
+ << " Num_targets: " << ValueDataRef.size()
+ << " Num_candidates: " << NumCandidates << "\n");
NumOfPGOICallsites++;
if (ICPCSSkip != 0 && NumOfPGOICallsites <= ICPCSSkip) {
- DEBUG(dbgs() << " Skip: User options.\n");
+ LLVM_DEBUG(dbgs() << " Skip: User options.\n");
return Ret;
}
@@ -236,11 +236,11 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite(
uint64_t Count = ValueDataRef[I].Count;
assert(Count <= TotalCount);
uint64_t Target = ValueDataRef[I].Value;
- DEBUG(dbgs() << " Candidate " << I << " Count=" << Count
- << " Target_func: " << Target << "\n");
+ LLVM_DEBUG(dbgs() << " Candidate " << I << " Count=" << Count
+ << " Target_func: " << Target << "\n");
if (ICPInvokeOnly && dyn_cast<CallInst>(Inst)) {
- DEBUG(dbgs() << " Not promote: User options.\n");
+ LLVM_DEBUG(dbgs() << " Not promote: User options.\n");
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst)
<< " Not promote: User options";
@@ -248,7 +248,7 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite(
break;
}
if (ICPCallOnly && dyn_cast<InvokeInst>(Inst)) {
- DEBUG(dbgs() << " Not promote: User option.\n");
+ LLVM_DEBUG(dbgs() << " Not promote: User option.\n");
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst)
<< " Not promote: User options";
@@ -256,7 +256,7 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite(
break;
}
if (ICPCutOff != 0 && NumOfPGOICallPromotion >= ICPCutOff) {
- DEBUG(dbgs() << " Not promote: Cutoff reached.\n");
+ LLVM_DEBUG(dbgs() << " Not promote: Cutoff reached.\n");
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "CutOffReached", Inst)
<< " Not promote: Cutoff reached";
@@ -266,7 +266,7 @@ ICallPromotionFunc::getPromotionCandidatesForCallSite(
Function *TargetFunction = Symtab->getFunction(Target);
if (TargetFunction == nullptr) {
- DEBUG(dbgs() << " Not promote: Cannot find the target\n");
+ LLVM_DEBUG(dbgs() << " Not promote: Cannot find the target\n");
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UnableToFindTarget", Inst)
<< "Cannot promote indirect call: target not found";
@@ -387,7 +387,7 @@ static bool promoteIndirectCalls(Module &M, ProfileSummaryInfo *PSI,
InstrProfSymtab Symtab;
if (Error E = Symtab.create(M, InLTO)) {
std::string SymtabFailure = toString(std::move(E));
- DEBUG(dbgs() << "Failed to create symtab: " << SymtabFailure << "\n");
+ LLVM_DEBUG(dbgs() << "Failed to create symtab: " << SymtabFailure << "\n");
(void)SymtabFailure;
return false;
}
@@ -412,12 +412,12 @@ static bool promoteIndirectCalls(Module &M, ProfileSummaryInfo *PSI,
ICallPromotionFunc ICallPromotion(F, &M, &Symtab, SamplePGO, *ORE);
bool FuncChanged = ICallPromotion.processFunction(PSI);
if (ICPDUMPAFTER && FuncChanged) {
- DEBUG(dbgs() << "\n== IR Dump After =="; F.print(dbgs()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n== IR Dump After =="; F.print(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n");
}
Changed |= FuncChanged;
if (ICPCutOff != 0 && NumOfPGOICallPromotion >= ICPCutOff) {
- DEBUG(dbgs() << " Stop: Cutoff reached.\n");
+ LLVM_DEBUG(dbgs() << " Stop: Cutoff reached.\n");
break;
}
}
diff --git a/lib/Transforms/Instrumentation/InstrProfiling.cpp b/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 0aaf87499f63..8be1638ce403 100644
--- a/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -271,8 +271,8 @@ public:
break;
}
- DEBUG(dbgs() << Promoted << " counters promoted for loop (depth="
- << L.getLoopDepth() << ")\n");
+ LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth="
+ << L.getLoopDepth() << ")\n");
return Promoted != 0;
}
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index e18ea717ee34..2fad41a10210 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -796,9 +796,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MS.initializeCallbacks(*F.getParent());
ActualFnStart = &F.getEntryBlock();
- DEBUG(if (!InsertChecks)
- dbgs() << "MemorySanitizer is not inserting checks into '"
- << F.getName() << "'\n");
+ LLVM_DEBUG(if (!InsertChecks) dbgs()
+ << "MemorySanitizer is not inserting checks into '"
+ << F.getName() << "'\n");
}
Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
@@ -901,7 +901,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
- DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
+ LLVM_DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
if (ClCheckAccessAddress)
insertShadowCheck(Addr, NewSI);
@@ -932,9 +932,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
bool AsCall) {
IRBuilder<> IRB(OrigIns);
- DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
+ LLVM_DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
- DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
+ LLVM_DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
if (ConstantShadow) {
@@ -964,7 +964,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.SetInsertPoint(CheckTerm);
insertWarningFn(IRB, Origin);
- DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
+ LLVM_DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
}
}
@@ -975,7 +975,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Origin = ShadowData.Origin;
materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
}
- DEBUG(dbgs() << "DONE:\n" << F);
+ LLVM_DEBUG(dbgs() << "DONE:\n" << F);
}
/// Add MemorySanitizer instrumentation to a function.
@@ -1048,7 +1048,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
Elements.push_back(getShadowTy(ST->getElementType(i)));
StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
- DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
+ LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
return Res;
}
uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
@@ -1182,7 +1182,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void setOrigin(Value *V, Value *Origin) {
if (!MS.TrackOrigins) return;
assert(!OriginMap.count(V) && "Values may only have one origin");
- DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
+ LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
OriginMap[V] = Origin;
}
@@ -1245,7 +1245,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// For instructions the shadow is already stored in the map.
Value *Shadow = ShadowMap[V];
if (!Shadow) {
- DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
+ LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
(void)I;
assert(Shadow && "No shadow for a value");
}
@@ -1253,7 +1253,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
if (UndefValue *U = dyn_cast<UndefValue>(V)) {
Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
- DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
+ LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
(void)U;
return AllOnes;
}
@@ -1268,7 +1268,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
const DataLayout &DL = F->getParent()->getDataLayout();
for (auto &FArg : F->args()) {
if (!FArg.getType()->isSized()) {
- DEBUG(dbgs() << "Arg is not sized\n");
+ LLVM_DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
unsigned Size =
@@ -1300,7 +1300,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
CopyAlign, Size);
- DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
+ LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
(void)Cpy;
}
*ShadowPtr = getCleanShadow(V);
@@ -1313,8 +1313,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
}
}
- DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
- **ShadowPtr << "\n");
+ LLVM_DEBUG(dbgs()
+ << " ARG: " << FArg << " ==> " << **ShadowPtr << "\n");
if (MS.TrackOrigins && !Overflow) {
Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
@@ -2790,13 +2790,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
unsigned ArgOffset = 0;
- DEBUG(dbgs() << " CallSite: " << I << "\n");
+ LLVM_DEBUG(dbgs() << " CallSite: " << I << "\n");
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
unsigned i = ArgIt - CS.arg_begin();
if (!A->getType()->isSized()) {
- DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
continue;
}
unsigned Size = 0;
@@ -2806,8 +2806,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// __msan_param_tls.
Value *ArgShadow = getShadow(A);
Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
- DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
- " Shadow: " << *ArgShadow << "\n");
+ LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A
+ << " Shadow: " << *ArgShadow << "\n");
bool ArgIsInitialized = false;
const DataLayout &DL = F.getParent()->getDataLayout();
if (CS.paramHasAttr(i, Attribute::ByVal)) {
@@ -2836,10 +2836,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
getOriginPtrForArgument(A, IRB, ArgOffset));
(void)Store;
assert(Size != 0 && Store != nullptr);
- DEBUG(dbgs() << " Param:" << *Store << "\n");
+ LLVM_DEBUG(dbgs() << " Param:" << *Store << "\n");
ArgOffset += alignTo(Size, 8);
}
- DEBUG(dbgs() << " done with call args\n");
+ LLVM_DEBUG(dbgs() << " done with call args\n");
FunctionType *FT =
cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
@@ -3046,24 +3046,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitExtractValueInst(ExtractValueInst &I) {
IRBuilder<> IRB(&I);
Value *Agg = I.getAggregateOperand();
- DEBUG(dbgs() << "ExtractValue: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "ExtractValue: " << I << "\n");
Value *AggShadow = getShadow(Agg);
- DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
+ LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
- DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
+ LLVM_DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
setShadow(&I, ResShadow);
setOriginForNaryOp(I);
}
void visitInsertValueInst(InsertValueInst &I) {
IRBuilder<> IRB(&I);
- DEBUG(dbgs() << "InsertValue: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "InsertValue: " << I << "\n");
Value *AggShadow = getShadow(I.getAggregateOperand());
Value *InsShadow = getShadow(I.getInsertedValueOperand());
- DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
- DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
+ LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
+ LLVM_DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
- DEBUG(dbgs() << " Res: " << *Res << "\n");
+ LLVM_DEBUG(dbgs() << " Res: " << *Res << "\n");
setShadow(&I, Res);
setOriginForNaryOp(I);
}
@@ -3078,17 +3078,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void visitResumeInst(ResumeInst &I) {
- DEBUG(dbgs() << "Resume: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
// Nothing to do here.
}
void visitCleanupReturnInst(CleanupReturnInst &CRI) {
- DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
+ LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
// Nothing to do here.
}
void visitCatchReturnInst(CatchReturnInst &CRI) {
- DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
+ LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
// Nothing to do here.
}
@@ -3129,7 +3129,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Everything else: stop propagating and check for poisoned shadow.
if (ClDumpStrictInstructions)
dumpInst(I);
- DEBUG(dbgs() << "DEFAULT: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
Value *Operand = I.getOperand(i);
if (Operand->getType()->isSized())
diff --git a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index 3121d102c6d9..307b7eaa2196 100644
--- a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -546,7 +546,7 @@ public:
computeCFGHash();
if (!ComdatMembers.empty())
renameComdatFunction();
- DEBUG(dumpInfo("after CFGMST"));
+ LLVM_DEBUG(dumpInfo("after CFGMST"));
NumOfPGOBB += MST.BBInfos.size();
for (auto &E : MST.AllEdges) {
@@ -596,12 +596,12 @@ void FuncPGOInstrumentation<Edge, BBInfo>::computeCFGHash() {
FunctionHash = (uint64_t)SIVisitor.getNumOfSelectInsts() << 56 |
(uint64_t)ValueSites[IPVK_IndirectCallTarget].size() << 48 |
(uint64_t)MST.AllEdges.size() << 32 | JC.getCRC();
- DEBUG(dbgs() << "Function Hash Computation for " << F.getName() << ":\n"
- << " CRC = " << JC.getCRC()
- << ", Selects = " << SIVisitor.getNumOfSelectInsts()
- << ", Edges = " << MST.AllEdges.size()
- << ", ICSites = " << ValueSites[IPVK_IndirectCallTarget].size()
- << ", Hash = " << FunctionHash << "\n";);
+ LLVM_DEBUG(dbgs() << "Function Hash Computation for " << F.getName() << ":\n"
+ << " CRC = " << JC.getCRC()
+ << ", Selects = " << SIVisitor.getNumOfSelectInsts()
+ << ", Edges = " << MST.AllEdges.size() << ", ICSites = "
+ << ValueSites[IPVK_IndirectCallTarget].size()
+ << ", Hash = " << FunctionHash << "\n";);
}
// Check if we can safely rename this Comdat function.
@@ -702,8 +702,8 @@ BasicBlock *FuncPGOInstrumentation<Edge, BBInfo>::getInstrBB(Edge *E) {
// For a critical edge, we have to split. Instrument the newly
// created BB.
NumOfPGOSplit++;
- DEBUG(dbgs() << "Split critical edge: " << getBBInfo(SrcBB).Index << " --> "
- << getBBInfo(DestBB).Index << "\n");
+ LLVM_DEBUG(dbgs() << "Split critical edge: " << getBBInfo(SrcBB).Index
+ << " --> " << getBBInfo(DestBB).Index << "\n");
unsigned SuccNum = GetSuccessorNumber(SrcBB, DestBB);
BasicBlock *InstrBB = SplitCriticalEdge(TI, SuccNum);
assert(InstrBB && "Critical edge is not split");
@@ -753,8 +753,8 @@ static void instrumentOneFunc(
for (auto &I : FuncInfo.ValueSites[IPVK_IndirectCallTarget]) {
CallSite CS(I);
Value *Callee = CS.getCalledValue();
- DEBUG(dbgs() << "Instrument one indirect call: CallSite Index = "
- << NumIndirectCallSites << "\n");
+ LLVM_DEBUG(dbgs() << "Instrument one indirect call: CallSite Index = "
+ << NumIndirectCallSites << "\n");
IRBuilder<> Builder(I);
assert(Builder.GetInsertPoint() != I->getParent()->end() &&
"Cannot get the Instrumentation point");
@@ -1042,14 +1042,14 @@ bool PGOUseFunc::readCounters(IndexedInstrProfReader *PGOReader) {
std::vector<uint64_t> &CountFromProfile = ProfileRecord.Counts;
NumOfPGOFunc++;
- DEBUG(dbgs() << CountFromProfile.size() << " counts\n");
+ LLVM_DEBUG(dbgs() << CountFromProfile.size() << " counts\n");
uint64_t ValueSum = 0;
for (unsigned I = 0, S = CountFromProfile.size(); I < S; I++) {
- DEBUG(dbgs() << " " << I << ": " << CountFromProfile[I] << "\n");
+ LLVM_DEBUG(dbgs() << " " << I << ": " << CountFromProfile[I] << "\n");
ValueSum += CountFromProfile[I];
}
- DEBUG(dbgs() << "SUM = " << ValueSum << "\n");
+ LLVM_DEBUG(dbgs() << "SUM = " << ValueSum << "\n");
getBBInfo(nullptr).UnknownCountOutEdge = 2;
getBBInfo(nullptr).UnknownCountInEdge = 2;
@@ -1129,7 +1129,7 @@ void PGOUseFunc::populateCounters() {
}
}
- DEBUG(dbgs() << "Populate counts in " << NumPasses << " passes.\n");
+ LLVM_DEBUG(dbgs() << "Populate counts in " << NumPasses << " passes.\n");
#ifndef NDEBUG
// Assert every BB has a valid counter.
for (auto &BB : F) {
@@ -1154,13 +1154,13 @@ void PGOUseFunc::populateCounters() {
FuncInfo.SIVisitor.annotateSelects(F, this, &CountPosition);
assert(CountPosition == ProfileCountSize);
- DEBUG(FuncInfo.dumpInfo("after reading profile."));
+ LLVM_DEBUG(FuncInfo.dumpInfo("after reading profile."));
}
// Assign the scaled count values to the BB with multiple out edges.
void PGOUseFunc::setBranchWeights() {
// Generate MD_prof metadata for every branch instruction.
- DEBUG(dbgs() << "\nSetting branch weights.\n");
+ LLVM_DEBUG(dbgs() << "\nSetting branch weights.\n");
for (auto &BB : F) {
TerminatorInst *TI = BB.getTerminator();
if (TI->getNumSuccessors() < 2)
@@ -1201,7 +1201,7 @@ static bool isIndirectBrTarget(BasicBlock *BB) {
}
void PGOUseFunc::annotateIrrLoopHeaderWeights() {
- DEBUG(dbgs() << "\nAnnotating irreducible loop header weights.\n");
+ LLVM_DEBUG(dbgs() << "\nAnnotating irreducible loop header weights.\n");
// Find irr loop headers
for (auto &BB : F) {
// As a heuristic also annotate indrectbr targets as they have a high chance
@@ -1334,9 +1334,9 @@ void PGOUseFunc::annotateValueSites(uint32_t Kind) {
}
for (auto &I : ValueSites) {
- DEBUG(dbgs() << "Read one value site profile (kind = " << Kind
- << "): Index = " << ValueSiteIndex << " out of "
- << NumValueSites << "\n");
+ LLVM_DEBUG(dbgs() << "Read one value site profile (kind = " << Kind
+ << "): Index = " << ValueSiteIndex << " out of "
+ << NumValueSites << "\n");
annotateValueSite(*M, *I, ProfileRecord,
static_cast<InstrProfValueKind>(Kind), ValueSiteIndex,
Kind == IPVK_MemOPSize ? MaxNumMemOPAnnotations
@@ -1432,7 +1432,7 @@ static bool annotateAllFunctions(
Module &M, StringRef ProfileFileName,
function_ref<BranchProbabilityInfo *(Function &)> LookupBPI,
function_ref<BlockFrequencyInfo *(Function &)> LookupBFI) {
- DEBUG(dbgs() << "Read in profile counters: ");
+ LLVM_DEBUG(dbgs() << "Read in profile counters: ");
auto &Ctx = M.getContext();
// Read the counter array from file.
auto ReaderOrErr = IndexedInstrProfReader::create(ProfileFileName);
@@ -1518,12 +1518,13 @@ static bool annotateAllFunctions(
// inconsistent MST between prof-gen and prof-use.
for (auto &F : HotFunctions) {
F->addFnAttr(Attribute::InlineHint);
- DEBUG(dbgs() << "Set inline attribute to function: " << F->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "Set inline attribute to function: " << F->getName()
+ << "\n");
}
for (auto &F : ColdFunctions) {
F->addFnAttr(Attribute::Cold);
- DEBUG(dbgs() << "Set cold attribute to function: " << F->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Set cold attribute to function: " << F->getName()
+ << "\n");
}
return true;
}
@@ -1586,9 +1587,10 @@ void llvm::setProfMetadata(Module *M, Instruction *TI,
for (const auto &ECI : EdgeCounts)
Weights.push_back(scaleBranchCount(ECI, Scale));
- DEBUG(dbgs() << "Weight is: ";
- for (const auto &W : Weights) { dbgs() << W << " "; }
- dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "Weight is: "; for (const auto &W
+ : Weights) {
+ dbgs() << W << " ";
+ } dbgs() << "\n";);
TI->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
if (EmitBranchProbability) {
std::string BrCondStr = getBranchCondString(TI);
diff --git a/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
index 494aad10d2f2..73d419aa6cc8 100644
--- a/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
+++ b/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
@@ -151,8 +151,9 @@ public:
if (perform(MI)) {
Changed = true;
++NumOfPGOMemOPOpt;
- DEBUG(dbgs() << "MemOP call: " << MI->getCalledFunction()->getName()
- << "is Transformed.\n");
+ LLVM_DEBUG(dbgs() << "MemOP call: "
+ << MI->getCalledFunction()->getName()
+ << "is Transformed.\n");
}
}
}
@@ -245,9 +246,9 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) {
}
ArrayRef<InstrProfValueData> VDs(ValueDataArray.get(), NumVals);
- DEBUG(dbgs() << "Read one memory intrinsic profile with count " << ActualCount
- << "\n");
- DEBUG(
+ LLVM_DEBUG(dbgs() << "Read one memory intrinsic profile with count "
+ << ActualCount << "\n");
+ LLVM_DEBUG(
for (auto &VD
: VDs) { dbgs() << " (" << VD.Value << "," << VD.Count << ")\n"; });
@@ -260,8 +261,8 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) {
TotalCount = ActualCount;
if (MemOPScaleCount)
- DEBUG(dbgs() << "Scale counts: numerator = " << ActualCount
- << " denominator = " << SavedTotalCount << "\n");
+ LLVM_DEBUG(dbgs() << "Scale counts: numerator = " << ActualCount
+ << " denominator = " << SavedTotalCount << "\n");
// Keeping track of the count of the default case:
uint64_t RemainCount = TotalCount;
@@ -310,9 +311,9 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) {
uint64_t SumForOpt = TotalCount - RemainCount;
- DEBUG(dbgs() << "Optimize one memory intrinsic call to " << Version
- << " Versions (covering " << SumForOpt << " out of "
- << TotalCount << ")\n");
+ LLVM_DEBUG(dbgs() << "Optimize one memory intrinsic call to " << Version
+ << " Versions (covering " << SumForOpt << " out of "
+ << TotalCount << ")\n");
// mem_op(..., size)
// ==>
@@ -331,8 +332,8 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) {
// merge_bb:
BasicBlock *BB = MI->getParent();
- DEBUG(dbgs() << "\n\n== Basic Block Before ==\n");
- DEBUG(dbgs() << *BB << "\n");
+ LLVM_DEBUG(dbgs() << "\n\n== Basic Block Before ==\n");
+ LLVM_DEBUG(dbgs() << *BB << "\n");
auto OrigBBFreq = BFI.getBlockFreq(BB);
BasicBlock *DefaultBB = SplitBlock(BB, MI);
@@ -358,7 +359,7 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) {
annotateValueSite(*Func.getParent(), *MI, VDs.slice(Version),
SavedRemainCount, IPVK_MemOPSize, NumVals);
- DEBUG(dbgs() << "\n\n== Basic Block After==\n");
+ LLVM_DEBUG(dbgs() << "\n\n== Basic Block After==\n");
for (uint64_t SizeId : SizeIds) {
BasicBlock *CaseBB = BasicBlock::Create(
@@ -374,13 +375,13 @@ bool MemOPSizeOpt::perform(MemIntrinsic *MI) {
IRBuilder<> IRBCase(CaseBB);
IRBCase.CreateBr(MergeBB);
SI->addCase(CaseSizeId, CaseBB);
- DEBUG(dbgs() << *CaseBB << "\n");
+ LLVM_DEBUG(dbgs() << *CaseBB << "\n");
}
setProfMetadata(Func.getParent(), SI, CaseCounts, MaxCount);
- DEBUG(dbgs() << *BB << "\n");
- DEBUG(dbgs() << *DefaultBB << "\n");
- DEBUG(dbgs() << *MergeBB << "\n");
+ LLVM_DEBUG(dbgs() << *BB << "\n");
+ LLVM_DEBUG(dbgs() << *DefaultBB << "\n");
+ LLVM_DEBUG(dbgs() << *MergeBB << "\n");
ORE.emit([&]() {
using namespace ore;
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index b84f3916907b..17168a7bd962 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -502,7 +502,7 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
if (Idx < 0)
return false;
if (IsWrite && isVtableAccess(I)) {
- DEBUG(dbgs() << " VPTR : " << *I << "\n");
+ LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
// StoredValue may be a vector type if we are storing several vptrs at once.
// In this case, just take the first element of the vector since this is
diff --git a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
index fb4eef523baf..8d3ef8fde534 100644
--- a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
@@ -103,10 +103,12 @@ bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
// zap the pair.
if (Push && cast<CallInst>(Inst)->getArgOperand(0) == Push) {
Changed = true;
- DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop "
- "autorelease pair:\n"
- " Pop: " << *Inst << "\n"
- << " Push: " << *Push << "\n");
+ LLVM_DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop "
+ "autorelease pair:\n"
+ " Pop: "
+ << *Inst << "\n"
+ << " Push: " << *Push
+ << "\n");
Inst->eraseFromParent();
Push->eraseFromParent();
}
diff --git a/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index bab2d1c585d3..55881b01b451 100644
--- a/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -132,16 +132,18 @@ bool ObjCARCContract::optimizeRetainCall(Function &F, Instruction *Retain) {
Changed = true;
++NumPeeps;
- DEBUG(dbgs() << "Transforming objc_retain => "
- "objc_retainAutoreleasedReturnValue since the operand is a "
- "return value.\nOld: "<< *Retain << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Transforming objc_retain => "
+ "objc_retainAutoreleasedReturnValue since the operand is a "
+ "return value.\nOld: "
+ << *Retain << "\n");
// We do not have to worry about tail calls/does not throw since
// retain/retainRV have the same properties.
Constant *Decl = EP.get(ARCRuntimeEntryPointKind::RetainRV);
cast<CallInst>(Retain)->setCalledFunction(Decl);
- DEBUG(dbgs() << "New: " << *Retain << "\n");
+ LLVM_DEBUG(dbgs() << "New: " << *Retain << "\n");
return true;
}
@@ -180,16 +182,19 @@ bool ObjCARCContract::contractAutorelease(
Changed = true;
++NumPeeps;
- DEBUG(dbgs() << " Fusing retain/autorelease!\n"
- " Autorelease:" << *Autorelease << "\n"
- " Retain: " << *Retain << "\n");
+ LLVM_DEBUG(dbgs() << " Fusing retain/autorelease!\n"
+ " Autorelease:"
+ << *Autorelease
+ << "\n"
+ " Retain: "
+ << *Retain << "\n");
Constant *Decl = EP.get(Class == ARCInstKind::AutoreleaseRV
? ARCRuntimeEntryPointKind::RetainAutoreleaseRV
: ARCRuntimeEntryPointKind::RetainAutorelease);
Retain->setCalledFunction(Decl);
- DEBUG(dbgs() << " New RetainAutorelease: " << *Retain << "\n");
+ LLVM_DEBUG(dbgs() << " New RetainAutorelease: " << *Retain << "\n");
EraseInstruction(Autorelease);
return true;
@@ -387,7 +392,7 @@ void ObjCARCContract::tryToContractReleaseIntoStoreStrong(
Changed = true;
++NumStoreStrongs;
- DEBUG(
+ LLVM_DEBUG(
llvm::dbgs() << " Contracting retain, release into objc_storeStrong.\n"
<< " Old:\n"
<< " Store: " << *Store << "\n"
@@ -414,7 +419,8 @@ void ObjCARCContract::tryToContractReleaseIntoStoreStrong(
// we can set the tail flag once we know it's safe.
StoreStrongCalls.insert(StoreStrong);
- DEBUG(llvm::dbgs() << " New Store Strong: " << *StoreStrong << "\n");
+ LLVM_DEBUG(llvm::dbgs() << " New Store Strong: " << *StoreStrong
+ << "\n");
if (&*Iter == Retain) ++Iter;
if (&*Iter == Store) ++Iter;
@@ -472,8 +478,8 @@ bool ObjCARCContract::tryToPeepholeInstruction(
} while (IsNoopInstruction(&*BBI));
if (&*BBI == GetArgRCIdentityRoot(Inst)) {
- DEBUG(dbgs() << "Adding inline asm marker for the return value "
- "optimization.\n");
+ LLVM_DEBUG(dbgs() << "Adding inline asm marker for the return value "
+ "optimization.\n");
Changed = true;
InlineAsm *IA = InlineAsm::get(
FunctionType::get(Type::getVoidTy(Inst->getContext()),
@@ -495,8 +501,8 @@ bool ObjCARCContract::tryToPeepholeInstruction(
Changed = true;
new StoreInst(Null, CI->getArgOperand(0), CI);
- DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n"
- << " New = " << *Null << "\n");
+ LLVM_DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n"
+ << " New = " << *Null << "\n");
CI->replaceAllUsesWith(Null);
CI->eraseFromParent();
@@ -547,7 +553,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
isFuncletEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
BlockColors = colorEHFunclets(F);
- DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n");
+ LLVM_DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n");
// Track whether it's ok to mark objc_storeStrong calls with the "tail"
// keyword. Be conservative if the function has variadic arguments.
@@ -565,7 +571,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E;) {
Instruction *Inst = &*I++;
- DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
// First try to peephole Inst. If there is nothing further we can do in
// terms of undoing objc-arc-expand, process the next inst.
diff --git a/lib/Transforms/ObjCARC/ObjCARCExpand.cpp b/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
index fab9845faccb..6a345ef56e1b 100644
--- a/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
@@ -91,12 +91,13 @@ bool ObjCARCExpand::runOnFunction(Function &F) {
bool Changed = false;
- DEBUG(dbgs() << "ObjCARCExpand: Visiting Function: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "ObjCARCExpand: Visiting Function: " << F.getName()
+ << "\n");
for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ++I) {
Instruction *Inst = &*I;
- DEBUG(dbgs() << "ObjCARCExpand: Visiting: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "ObjCARCExpand: Visiting: " << *Inst << "\n");
switch (GetBasicARCInstKind(Inst)) {
case ARCInstKind::Retain:
@@ -111,8 +112,10 @@ bool ObjCARCExpand::runOnFunction(Function &F) {
// emitted here. We'll redo them in the contract pass.
Changed = true;
Value *Value = cast<CallInst>(Inst)->getArgOperand(0);
- DEBUG(dbgs() << "ObjCARCExpand: Old = " << *Inst << "\n"
- " New = " << *Value << "\n");
+ LLVM_DEBUG(dbgs() << "ObjCARCExpand: Old = " << *Inst
+ << "\n"
+ " New = "
+ << *Value << "\n");
Inst->replaceAllUsesWith(Value);
break;
}
@@ -121,7 +124,7 @@ bool ObjCARCExpand::runOnFunction(Function &F) {
}
}
- DEBUG(dbgs() << "ObjCARCExpand: Finished List.\n\n");
+ LLVM_DEBUG(dbgs() << "ObjCARCExpand: Finished List.\n\n");
return Changed;
}
diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index 7df2fe52caea..db09cc3487ac 100644
--- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -423,7 +423,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
// Dump the pointers we are tracking.
OS << " TopDown State:\n";
if (!BBInfo.hasTopDownPtrs()) {
- DEBUG(dbgs() << " NONE!\n");
+ LLVM_DEBUG(dbgs() << " NONE!\n");
} else {
for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
I != E; ++I) {
@@ -443,7 +443,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
OS << " BottomUp State:\n";
if (!BBInfo.hasBottomUpPtrs()) {
- DEBUG(dbgs() << " NONE!\n");
+ LLVM_DEBUG(dbgs() << " NONE!\n");
} else {
for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
I != E; ++I) {
@@ -613,8 +613,8 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
Changed = true;
++NumPeeps;
- DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
- << "Erasing " << *RetainRV << "\n");
+ LLVM_DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
+ << "Erasing " << *RetainRV << "\n");
EraseInstruction(&*I);
EraseInstruction(RetainRV);
@@ -626,14 +626,15 @@ ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
Changed = true;
++NumPeeps;
- DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
- "objc_retain since the operand is not a return value.\n"
- "Old = " << *RetainRV << "\n");
+ LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
+ "objc_retain since the operand is not a return value.\n"
+ "Old = "
+ << *RetainRV << "\n");
Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
- DEBUG(dbgs() << "New = " << *RetainRV << "\n");
+ LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
return false;
}
@@ -671,10 +672,12 @@ void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
Changed = true;
++NumPeeps;
- DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
- "objc_autorelease since its operand is not used as a return "
- "value.\n"
- "Old = " << *AutoreleaseRV << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Transforming objc_autoreleaseReturnValue => "
+ "objc_autorelease since its operand is not used as a return "
+ "value.\n"
+ "Old = "
+ << *AutoreleaseRV << "\n");
CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
@@ -682,7 +685,7 @@ void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
Class = ARCInstKind::Autorelease;
- DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
+ LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
}
namespace {
@@ -713,7 +716,7 @@ CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
/// Visit each call, one at a time, and make simplifications without doing any
/// additional analysis.
void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
- DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
+ LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
// Reset all the flags in preparation for recomputing them.
UsedInThisFunction = 0;
@@ -728,7 +731,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
ARCInstKind Class = GetBasicARCInstKind(Inst);
- DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
switch (Class) {
default: break;
@@ -744,7 +747,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
case ARCInstKind::NoopCast:
Changed = true;
++NumNoops;
- DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
EraseInstruction(Inst);
continue;
@@ -762,8 +765,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
Constant::getNullValue(Ty),
CI);
Value *NewValue = UndefValue::get(CI->getType());
- DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
- "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
+ LLVM_DEBUG(
+ dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
+ "\nOld = "
+ << *CI << "\nNew = " << *NewValue << "\n");
CI->replaceAllUsesWith(NewValue);
CI->eraseFromParent();
continue;
@@ -782,8 +787,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
CI);
Value *NewValue = UndefValue::get(CI->getType());
- DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
- "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
+ LLVM_DEBUG(
+ dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
+ "\nOld = "
+ << *CI << "\nNew = " << *NewValue << "\n");
CI->replaceAllUsesWith(NewValue);
CI->eraseFromParent();
@@ -818,9 +825,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
MDNode::get(C, None));
- DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
- "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
- << *NewCall << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
+ "since x is otherwise unused.\nOld: "
+ << *Call << "\nNew: " << *NewCall << "\n");
EraseInstruction(Call);
Inst = NewCall;
@@ -832,8 +840,10 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
// a tail keyword.
if (IsAlwaysTail(Class)) {
Changed = true;
- DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
- "passed stack args: " << *Inst << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Adding tail keyword to function since it can never be "
+ "passed stack args: "
+ << *Inst << "\n");
cast<CallInst>(Inst)->setTailCall();
}
@@ -841,16 +851,16 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
// semantics of ARC truly do not do so.
if (IsNeverTail(Class)) {
Changed = true;
- DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
- "\n");
+ LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
+ << "\n");
cast<CallInst>(Inst)->setTailCall(false);
}
// Set nounwind as needed.
if (IsNoThrow(Class)) {
Changed = true;
- DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: "
+ << *Inst << "\n");
cast<CallInst>(Inst)->setDoesNotThrow();
}
@@ -865,8 +875,8 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
if (IsNullOrUndef(Arg)) {
Changed = true;
++NumNoops;
- DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
- << "\n");
+ LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
+ << "\n");
EraseInstruction(Inst);
continue;
}
@@ -967,14 +977,15 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
Clone->setArgOperand(0, Op);
Clone->insertBefore(InsertPos);
- DEBUG(dbgs() << "Cloning "
- << *CInst << "\n"
- "And inserting clone at " << *InsertPos << "\n");
+ LLVM_DEBUG(dbgs() << "Cloning " << *CInst
+ << "\n"
+ "And inserting clone at "
+ << *InsertPos << "\n");
Worklist.push_back(std::make_pair(Clone, Incoming));
}
}
// Erase the original call.
- DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
+ LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
EraseInstruction(CInst);
continue;
}
@@ -1151,7 +1162,7 @@ bool ObjCARCOpt::VisitInstructionBottomUp(
ARCInstKind Class = GetARCInstKind(Inst);
const Value *Arg = nullptr;
- DEBUG(dbgs() << " Class: " << Class << "\n");
+ LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
switch (Class) {
case ARCInstKind::Release: {
@@ -1174,7 +1185,7 @@ bool ObjCARCOpt::VisitInstructionBottomUp(
// Don't do retain+release tracking for ARCInstKind::RetainRV, because
// it's better to let it remain as the first instruction after a call.
if (Class != ARCInstKind::RetainRV) {
- DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
Retains[Inst] = S.GetRRInfo();
}
S.ClearSequenceProgress();
@@ -1216,7 +1227,7 @@ bool ObjCARCOpt::VisitInstructionBottomUp(
bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
BlotMapVector<Value *, RRInfo> &Retains) {
- DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
+ LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
bool NestingDetected = false;
BBState &MyStates = BBStates[BB];
@@ -1239,8 +1250,9 @@ bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
}
}
- DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n"
- << "Performing Dataflow:\n");
+ LLVM_DEBUG(dbgs() << "Before:\n"
+ << BBStates[BB] << "\n"
+ << "Performing Dataflow:\n");
// Visit all the instructions, bottom-up.
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
@@ -1250,7 +1262,7 @@ bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
if (isa<InvokeInst>(Inst))
continue;
- DEBUG(dbgs() << " Visiting " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n");
NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
}
@@ -1265,7 +1277,7 @@ bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
}
- DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
+ LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
return NestingDetected;
}
@@ -1278,7 +1290,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
ARCInstKind Class = GetARCInstKind(Inst);
const Value *Arg = nullptr;
- DEBUG(dbgs() << " Class: " << Class << "\n");
+ LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
switch (Class) {
case ARCInstKind::RetainBlock:
@@ -1304,7 +1316,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
if (S.MatchWithRelease(MDKindCache, Inst)) {
// If we succeed, copy S's RRInfo into the Release -> {Retain Set
// Map}. Then we clear S.
- DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
Releases[Inst] = S.GetRRInfo();
S.ClearSequenceProgress();
}
@@ -1344,7 +1356,7 @@ bool
ObjCARCOpt::VisitTopDown(BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
DenseMap<Value *, RRInfo> &Releases) {
- DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
+ LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
bool NestingDetected = false;
BBState &MyStates = BBStates[BB];
@@ -1366,20 +1378,21 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
}
}
- DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n"
- << "Performing Dataflow:\n");
+ LLVM_DEBUG(dbgs() << "Before:\n"
+ << BBStates[BB] << "\n"
+ << "Performing Dataflow:\n");
// Visit all the instructions, top-down.
for (Instruction &Inst : *BB) {
- DEBUG(dbgs() << " Visiting " << Inst << "\n");
+ LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n");
NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
}
- DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
- << BBStates[BB] << "\n\n");
+ LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
+ << BBStates[BB] << "\n\n");
CheckForCFGHazards(BB, BBStates, MyStates);
- DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
+ LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
return NestingDetected;
}
@@ -1502,7 +1515,7 @@ void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
Type *ArgTy = Arg->getType();
Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
- DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
+ LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
// Insert the new retain and release calls.
for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
@@ -1513,8 +1526,10 @@ void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
Call->setDoesNotThrow();
Call->setTailCall();
- DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
- "At insertion point: " << *InsertPt << "\n");
+ LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
+ << "\n"
+ "At insertion point: "
+ << *InsertPt << "\n");
}
for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
Value *MyArg = ArgTy == ParamTy ? Arg :
@@ -1528,20 +1543,22 @@ void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
if (ReleasesToMove.IsTailCallRelease)
Call->setTailCall();
- DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
- "At insertion point: " << *InsertPt << "\n");
+ LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
+ << "\n"
+ "At insertion point: "
+ << *InsertPt << "\n");
}
// Delete the original retain and release calls.
for (Instruction *OrigRetain : RetainsToMove.Calls) {
Retains.blot(OrigRetain);
DeadInsts.push_back(OrigRetain);
- DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
+ LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
}
for (Instruction *OrigRelease : ReleasesToMove.Calls) {
Releases.erase(OrigRelease);
DeadInsts.push_back(OrigRelease);
- DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
+ LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
}
}
@@ -1747,7 +1764,7 @@ bool ObjCARCOpt::PerformCodePlacement(
DenseMap<const BasicBlock *, BBState> &BBStates,
BlotMapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases, Module *M) {
- DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
+ LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
bool AnyPairsCompletelyEliminated = false;
SmallVector<Instruction *, 8> DeadInsts;
@@ -1761,7 +1778,7 @@ bool ObjCARCOpt::PerformCodePlacement(
Instruction *Retain = cast<Instruction>(V);
- DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
Value *Arg = GetArgRCIdentityRoot(Retain);
@@ -1806,7 +1823,7 @@ bool ObjCARCOpt::PerformCodePlacement(
/// Weak pointer optimizations.
void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
- DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
+ LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
// First, do memdep-style RLE and S2L optimizations. We can't use memdep
// itself because it uses AliasAnalysis and we need to do provenance
@@ -1814,7 +1831,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
Instruction *Inst = &*I++;
- DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
ARCInstKind Class = GetBasicARCInstKind(Inst);
if (Class != ARCInstKind::LoadWeak &&
@@ -2073,7 +2090,7 @@ void ObjCARCOpt::OptimizeReturns(Function &F) {
if (!F.getReturnType()->isPointerTy())
return;
- DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
+ LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
SmallPtrSet<Instruction *, 4> DependingInstructions;
SmallPtrSet<const BasicBlock *, 4> Visited;
@@ -2082,7 +2099,7 @@ void ObjCARCOpt::OptimizeReturns(Function &F) {
if (!Ret)
continue;
- DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
@@ -2120,8 +2137,8 @@ void ObjCARCOpt::OptimizeReturns(Function &F) {
// If so, we can zap the retain and autorelease.
Changed = true;
++NumRets;
- DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
- << *Autorelease << "\n");
+ LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
+ << "\n");
EraseInstruction(Retain);
EraseInstruction(Autorelease);
}
@@ -2181,8 +2198,9 @@ bool ObjCARCOpt::runOnFunction(Function &F) {
Changed = false;
- DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
- "\n");
+ LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
+ << " >>>"
+ "\n");
PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
@@ -2230,7 +2248,7 @@ bool ObjCARCOpt::runOnFunction(Function &F) {
}
#endif
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
return Changed;
}
diff --git a/lib/Transforms/ObjCARC/PtrState.cpp b/lib/Transforms/ObjCARC/PtrState.cpp
index e1774b88fd35..b6c48529de43 100644
--- a/lib/Transforms/ObjCARC/PtrState.cpp
+++ b/lib/Transforms/ObjCARC/PtrState.cpp
@@ -126,22 +126,23 @@ bool RRInfo::Merge(const RRInfo &Other) {
//===----------------------------------------------------------------------===//
void PtrState::SetKnownPositiveRefCount() {
- DEBUG(dbgs() << " Setting Known Positive.\n");
+ LLVM_DEBUG(dbgs() << " Setting Known Positive.\n");
KnownPositiveRefCount = true;
}
void PtrState::ClearKnownPositiveRefCount() {
- DEBUG(dbgs() << " Clearing Known Positive.\n");
+ LLVM_DEBUG(dbgs() << " Clearing Known Positive.\n");
KnownPositiveRefCount = false;
}
void PtrState::SetSeq(Sequence NewSeq) {
- DEBUG(dbgs() << " Old: " << GetSeq() << "; New: " << NewSeq << "\n");
+ LLVM_DEBUG(dbgs() << " Old: " << GetSeq() << "; New: " << NewSeq
+ << "\n");
Seq = NewSeq;
}
void PtrState::ResetSequenceProgress(Sequence NewSeq) {
- DEBUG(dbgs() << " Resetting sequence progress.\n");
+ LLVM_DEBUG(dbgs() << " Resetting sequence progress.\n");
SetSeq(NewSeq);
Partial = false;
RRI.clear();
@@ -184,7 +185,8 @@ bool BottomUpPtrState::InitBottomUp(ARCMDKindCache &Cache, Instruction *I) {
// simple and avoids adding overhead for the non-nested case.
bool NestingDetected = false;
if (GetSeq() == S_Release || GetSeq() == S_MovableRelease) {
- DEBUG(dbgs() << " Found nested releases (i.e. a release pair)\n");
+ LLVM_DEBUG(
+ dbgs() << " Found nested releases (i.e. a release pair)\n");
NestingDetected = true;
}
@@ -234,8 +236,8 @@ bool BottomUpPtrState::HandlePotentialAlterRefCount(Instruction *Inst,
if (!CanAlterRefCount(Inst, Ptr, PA, Class))
return false;
- DEBUG(dbgs() << " CanAlterRefCount: Seq: " << S << "; " << *Ptr
- << "\n");
+ LLVM_DEBUG(dbgs() << " CanAlterRefCount: Seq: " << S << "; "
+ << *Ptr << "\n");
switch (S) {
case S_Use:
SetSeq(S_CanRelease);
@@ -277,26 +279,26 @@ void BottomUpPtrState::HandlePotentialUse(BasicBlock *BB, Instruction *Inst,
case S_Release:
case S_MovableRelease:
if (CanUse(Inst, Ptr, PA, Class)) {
- DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " << *Ptr
- << "\n");
+ LLVM_DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; "
+ << *Ptr << "\n");
SetSeqAndInsertReverseInsertPt(S_Use);
} else if (Seq == S_Release && IsUser(Class)) {
- DEBUG(dbgs() << " PreciseReleaseUse: Seq: " << GetSeq() << "; "
- << *Ptr << "\n");
+ LLVM_DEBUG(dbgs() << " PreciseReleaseUse: Seq: " << GetSeq()
+ << "; " << *Ptr << "\n");
// Non-movable releases depend on any possible objc pointer use.
SetSeqAndInsertReverseInsertPt(S_Stop);
} else if (const auto *Call = getreturnRVOperand(*Inst, Class)) {
if (CanUse(Call, Ptr, PA, GetBasicARCInstKind(Call))) {
- DEBUG(dbgs() << " ReleaseUse: Seq: " << GetSeq() << "; "
- << *Ptr << "\n");
+ LLVM_DEBUG(dbgs() << " ReleaseUse: Seq: " << GetSeq() << "; "
+ << *Ptr << "\n");
SetSeqAndInsertReverseInsertPt(S_Stop);
}
}
break;
case S_Stop:
if (CanUse(Inst, Ptr, PA, Class)) {
- DEBUG(dbgs() << " PreciseStopUse: Seq: " << GetSeq() << "; "
- << *Ptr << "\n");
+ LLVM_DEBUG(dbgs() << " PreciseStopUse: Seq: " << GetSeq()
+ << "; " << *Ptr << "\n");
SetSeq(S_Use);
}
break;
@@ -377,8 +379,8 @@ bool TopDownPtrState::HandlePotentialAlterRefCount(Instruction *Inst,
Class != ARCInstKind::IntrinsicUser)
return false;
- DEBUG(dbgs() << " CanAlterRefCount: Seq: " << GetSeq() << "; " << *Ptr
- << "\n");
+ LLVM_DEBUG(dbgs() << " CanAlterRefCount: Seq: " << GetSeq() << "; "
+ << *Ptr << "\n");
ClearKnownPositiveRefCount();
switch (GetSeq()) {
case S_Retain:
@@ -410,8 +412,8 @@ void TopDownPtrState::HandlePotentialUse(Instruction *Inst, const Value *Ptr,
case S_CanRelease:
if (!CanUse(Inst, Ptr, PA, Class))
return;
- DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " << *Ptr
- << "\n");
+ LLVM_DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; "
+ << *Ptr << "\n");
SetSeq(S_Use);
return;
case S_Retain:
diff --git a/lib/Transforms/Scalar/ADCE.cpp b/lib/Transforms/Scalar/ADCE.cpp
index 03a44849645d..ce09a477b5f5 100644
--- a/lib/Transforms/Scalar/ADCE.cpp
+++ b/lib/Transforms/Scalar/ADCE.cpp
@@ -298,8 +298,8 @@ void AggressiveDeadCodeElimination::initialize() {
auto &Info = BlockInfo[BB];
// Real function return
if (isa<ReturnInst>(Info.Terminator)) {
- DEBUG(dbgs() << "post-dom root child is a return: " << BB->getName()
- << '\n';);
+ LLVM_DEBUG(dbgs() << "post-dom root child is a return: " << BB->getName()
+ << '\n';);
continue;
}
@@ -356,7 +356,7 @@ void AggressiveDeadCodeElimination::markLiveInstructions() {
// where we need to mark the inputs as live.
while (!Worklist.empty()) {
Instruction *LiveInst = Worklist.pop_back_val();
- DEBUG(dbgs() << "work live: "; LiveInst->dump(););
+ LLVM_DEBUG(dbgs() << "work live: "; LiveInst->dump(););
for (Use &OI : LiveInst->operands())
if (Instruction *Inst = dyn_cast<Instruction>(OI))
@@ -378,7 +378,7 @@ void AggressiveDeadCodeElimination::markLive(Instruction *I) {
if (Info.Live)
return;
- DEBUG(dbgs() << "mark live: "; I->dump());
+ LLVM_DEBUG(dbgs() << "mark live: "; I->dump());
Info.Live = true;
Worklist.push_back(I);
@@ -402,7 +402,7 @@ void AggressiveDeadCodeElimination::markLive(Instruction *I) {
void AggressiveDeadCodeElimination::markLive(BlockInfoType &BBInfo) {
if (BBInfo.Live)
return;
- DEBUG(dbgs() << "mark block live: " << BBInfo.BB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "mark block live: " << BBInfo.BB->getName() << '\n');
BBInfo.Live = true;
if (!BBInfo.CFLive) {
BBInfo.CFLive = true;
@@ -463,7 +463,7 @@ void AggressiveDeadCodeElimination::markLiveBranchesFromControlDependences() {
if (BlocksWithDeadTerminators.empty())
return;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "new live blocks:\n";
for (auto *BB : NewLiveBlocks)
dbgs() << "\t" << BB->getName() << '\n';
@@ -487,7 +487,7 @@ void AggressiveDeadCodeElimination::markLiveBranchesFromControlDependences() {
// Dead terminators which control live blocks are now marked live.
for (auto *BB : IDFBlocks) {
- DEBUG(dbgs() << "live control in: " << BB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "live control in: " << BB->getName() << '\n');
markLive(BB->getTerminator());
}
}
@@ -501,7 +501,7 @@ bool AggressiveDeadCodeElimination::removeDeadInstructions() {
// Updates control and dataflow around dead blocks
updateDeadRegions();
- DEBUG({
+ LLVM_DEBUG({
for (Instruction &I : instructions(F)) {
// Check if the instruction is alive.
if (isLive(&I))
@@ -555,7 +555,7 @@ bool AggressiveDeadCodeElimination::removeDeadInstructions() {
// A dead region is the set of dead blocks with a common live post-dominator.
void AggressiveDeadCodeElimination::updateDeadRegions() {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "final dead terminator blocks: " << '\n';
for (auto *BB : BlocksWithDeadTerminators)
dbgs() << '\t' << BB->getName()
@@ -607,8 +607,9 @@ void AggressiveDeadCodeElimination::updateDeadRegions() {
// It might have happened that the same successor appeared multiple times
// and the CFG edge wasn't really removed.
if (Succ != PreferredSucc->BB) {
- DEBUG(dbgs() << "ADCE: (Post)DomTree edge enqueued for deletion"
- << BB->getName() << " -> " << Succ->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "ADCE: (Post)DomTree edge enqueued for deletion"
+ << BB->getName() << " -> " << Succ->getName()
+ << "\n");
DeletedEdges.push_back({DominatorTree::Delete, BB, Succ});
}
}
@@ -652,7 +653,7 @@ void AggressiveDeadCodeElimination::makeUnconditional(BasicBlock *BB,
InstInfo[PredTerm].Live = true;
return;
}
- DEBUG(dbgs() << "making unconditional " << BB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "making unconditional " << BB->getName() << '\n');
NumBranchesRemoved += 1;
IRBuilder<> Builder(PredTerm);
auto *NewTerm = Builder.CreateBr(Target);
diff --git a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index b84528271a70..fa7bcec677f7 100644
--- a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -98,8 +98,8 @@ static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV,
const SCEV *DiffAlign = SE->getMulExpr(DiffAlignDiv, AlignSCEV);
const SCEV *DiffUnitsSCEV = SE->getMinusSCEV(DiffAlign, DiffSCEV);
- DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is " <<
- *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n");
+ LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is "
+ << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n");
if (const SCEVConstant *ConstDUSCEV =
dyn_cast<SCEVConstant>(DiffUnitsSCEV)) {
@@ -139,12 +139,12 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
// address. This address is displaced by the provided offset.
DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV);
- DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to " <<
- *AlignSCEV << " and offset " << *OffSCEV <<
- " using diff " << *DiffSCEV << "\n");
+ LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to "
+ << *AlignSCEV << " and offset " << *OffSCEV
+ << " using diff " << *DiffSCEV << "\n");
unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE);
- DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n");
+ LLVM_DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n");
if (NewAlignment) {
return NewAlignment;
@@ -160,8 +160,8 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
const SCEV *DiffStartSCEV = DiffARSCEV->getStart();
const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE);
- DEBUG(dbgs() << "\ttrying start/inc alignment using start " <<
- *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n");
+ LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start "
+ << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n");
// Now compute the new alignment using the displacement to the value in the
// first iteration, and also the alignment using the per-iteration delta.
@@ -170,26 +170,26 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
- DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n");
- DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n");
+ LLVM_DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n");
+ LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n");
if (!NewAlignment || !NewIncAlignment) {
return 0;
} else if (NewAlignment > NewIncAlignment) {
if (NewAlignment % NewIncAlignment == 0) {
- DEBUG(dbgs() << "\tnew start/inc alignment: " <<
- NewIncAlignment << "\n");
+ LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment
+ << "\n");
return NewIncAlignment;
}
} else if (NewIncAlignment > NewAlignment) {
if (NewIncAlignment % NewAlignment == 0) {
- DEBUG(dbgs() << "\tnew start/inc alignment: " <<
- NewAlignment << "\n");
+ LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
+ << "\n");
return NewAlignment;
}
} else if (NewIncAlignment == NewAlignment) {
- DEBUG(dbgs() << "\tnew start/inc alignment: " <<
- NewAlignment << "\n");
+ LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
+ << "\n");
return NewAlignment;
}
}
@@ -339,7 +339,7 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
MI->getDest(), SE);
- DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";);
+ LLVM_DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";);
if (NewDestAlignment > MI->getDestAlignment()) {
MI->setDestAlignment(NewDestAlignment);
++NumMemIntAlignChanged;
@@ -351,7 +351,7 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
MTI->getSource(), SE);
- DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";);
+ LLVM_DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";);
if (NewSrcAlignment > MTI->getSourceAlignment()) {
MTI->setSourceAlignment(NewSrcAlignment);
diff --git a/lib/Transforms/Scalar/BDCE.cpp b/lib/Transforms/Scalar/BDCE.cpp
index c39e7443030b..dd763847501b 100644
--- a/lib/Transforms/Scalar/BDCE.cpp
+++ b/lib/Transforms/Scalar/BDCE.cpp
@@ -100,7 +100,7 @@ static bool bitTrackingDCE(Function &F, DemandedBits &DB) {
// For live instructions that have all dead bits, first make them dead by
// replacing all uses with something else. Then, if they don't need to
// remain live (because they have side effects, etc.) we can remove them.
- DEBUG(dbgs() << "BDCE: Trivializing: " << I << " (all bits dead)\n");
+ LLVM_DEBUG(dbgs() << "BDCE: Trivializing: " << I << " (all bits dead)\n");
clearAssumptionsOfUsers(&I, DB);
diff --git a/lib/Transforms/Scalar/CallSiteSplitting.cpp b/lib/Transforms/Scalar/CallSiteSplitting.cpp
index dac52a6e3b78..098ad920a76d 100644
--- a/lib/Transforms/Scalar/CallSiteSplitting.cpp
+++ b/lib/Transforms/Scalar/CallSiteSplitting.cpp
@@ -316,7 +316,7 @@ static void splitCallSite(
if (!IsMustTailCall && !Instr->use_empty())
CallPN = PHINode::Create(Instr->getType(), Preds.size(), "phi.call");
- DEBUG(dbgs() << "split call-site : " << *Instr << " into \n");
+ LLVM_DEBUG(dbgs() << "split call-site : " << *Instr << " into \n");
assert(Preds.size() == 2 && "The ValueToValueMaps array has size 2.");
// ValueToValueMapTy is neither copy nor moveable, so we use a simple array
@@ -344,8 +344,8 @@ static void splitCallSite(
++ArgNo;
}
}
- DEBUG(dbgs() << " " << *NewCI << " in " << SplitBlock->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << " " << *NewCI << " in " << SplitBlock->getName()
+ << "\n");
if (CallPN)
CallPN->addIncoming(NewCI, SplitBlock);
diff --git a/lib/Transforms/Scalar/ConstantHoisting.cpp b/lib/Transforms/Scalar/ConstantHoisting.cpp
index 470e687a7221..7f7107616110 100644
--- a/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -132,8 +132,8 @@ bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) {
if (skipFunction(Fn))
return false;
- DEBUG(dbgs() << "********** Begin Constant Hoisting **********\n");
- DEBUG(dbgs() << "********** Function: " << Fn.getName() << '\n');
+ LLVM_DEBUG(dbgs() << "********** Begin Constant Hoisting **********\n");
+ LLVM_DEBUG(dbgs() << "********** Function: " << Fn.getName() << '\n');
bool MadeChange =
Impl.runImpl(Fn, getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn),
@@ -144,11 +144,11 @@ bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) {
Fn.getEntryBlock());
if (MadeChange) {
- DEBUG(dbgs() << "********** Function after Constant Hoisting: "
- << Fn.getName() << '\n');
- DEBUG(dbgs() << Fn);
+ LLVM_DEBUG(dbgs() << "********** Function after Constant Hoisting: "
+ << Fn.getName() << '\n');
+ LLVM_DEBUG(dbgs() << Fn);
}
- DEBUG(dbgs() << "********** End Constant Hoisting **********\n");
+ LLVM_DEBUG(dbgs() << "********** End Constant Hoisting **********\n");
return MadeChange;
}
@@ -364,14 +364,13 @@ void ConstantHoistingPass::collectConstantCandidates(
Itr->second = ConstCandVec.size() - 1;
}
ConstCandVec[Itr->second].addUser(Inst, Idx, Cost);
- DEBUG(if (isa<ConstantInt>(Inst->getOperand(Idx)))
- dbgs() << "Collect constant " << *ConstInt << " from " << *Inst
+ LLVM_DEBUG(if (isa<ConstantInt>(Inst->getOperand(Idx))) dbgs()
+ << "Collect constant " << *ConstInt << " from " << *Inst
<< " with cost " << Cost << '\n';
- else
- dbgs() << "Collect constant " << *ConstInt << " indirectly from "
- << *Inst << " via " << *Inst->getOperand(Idx) << " with cost "
- << Cost << '\n';
- );
+ else dbgs() << "Collect constant " << *ConstInt
+ << " indirectly from " << *Inst << " via "
+ << *Inst->getOperand(Idx) << " with cost " << Cost
+ << '\n';);
}
}
@@ -501,20 +500,21 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S,
return NumUses;
}
- DEBUG(dbgs() << "== Maximize constants in range ==\n");
+ LLVM_DEBUG(dbgs() << "== Maximize constants in range ==\n");
int MaxCost = -1;
for (auto ConstCand = S; ConstCand != E; ++ConstCand) {
auto Value = ConstCand->ConstInt->getValue();
Type *Ty = ConstCand->ConstInt->getType();
int Cost = 0;
NumUses += ConstCand->Uses.size();
- DEBUG(dbgs() << "= Constant: " << ConstCand->ConstInt->getValue() << "\n");
+ LLVM_DEBUG(dbgs() << "= Constant: " << ConstCand->ConstInt->getValue()
+ << "\n");
for (auto User : ConstCand->Uses) {
unsigned Opcode = User.Inst->getOpcode();
unsigned OpndIdx = User.OpndIdx;
Cost += TTI->getIntImmCost(Opcode, OpndIdx, Value, Ty);
- DEBUG(dbgs() << "Cost: " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "Cost: " << Cost << "\n");
for (auto C2 = S; C2 != E; ++C2) {
Optional<APInt> Diff = calculateOffsetDiff(
@@ -524,18 +524,18 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S,
const int ImmCosts =
TTI->getIntImmCodeSizeCost(Opcode, OpndIdx, Diff.getValue(), Ty);
Cost -= ImmCosts;
- DEBUG(dbgs() << "Offset " << Diff.getValue() << " "
- << "has penalty: " << ImmCosts << "\n"
- << "Adjusted cost: " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "Offset " << Diff.getValue() << " "
+ << "has penalty: " << ImmCosts << "\n"
+ << "Adjusted cost: " << Cost << "\n");
}
}
}
- DEBUG(dbgs() << "Cumulative cost: " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "Cumulative cost: " << Cost << "\n");
if (Cost > MaxCost) {
MaxCost = Cost;
MaxCostItr = ConstCand;
- DEBUG(dbgs() << "New candidate: " << MaxCostItr->ConstInt->getValue()
- << "\n");
+ LLVM_DEBUG(dbgs() << "New candidate: " << MaxCostItr->ConstInt->getValue()
+ << "\n");
}
}
return NumUses;
@@ -641,19 +641,20 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base,
Mat = BinaryOperator::Create(Instruction::Add, Base, Offset,
"const_mat", InsertionPt);
- DEBUG(dbgs() << "Materialize constant (" << *Base->getOperand(0)
- << " + " << *Offset << ") in BB "
- << Mat->getParent()->getName() << '\n' << *Mat << '\n');
+ LLVM_DEBUG(dbgs() << "Materialize constant (" << *Base->getOperand(0)
+ << " + " << *Offset << ") in BB "
+ << Mat->getParent()->getName() << '\n'
+ << *Mat << '\n');
Mat->setDebugLoc(ConstUser.Inst->getDebugLoc());
}
Value *Opnd = ConstUser.Inst->getOperand(ConstUser.OpndIdx);
// Visit constant integer.
if (isa<ConstantInt>(Opnd)) {
- DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
+ LLVM_DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, Mat) && Offset)
Mat->eraseFromParent();
- DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
+ LLVM_DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
return;
}
@@ -669,13 +670,13 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base,
ClonedCastInst->insertAfter(CastInst);
// Use the same debug location as the original cast instruction.
ClonedCastInst->setDebugLoc(CastInst->getDebugLoc());
- DEBUG(dbgs() << "Clone instruction: " << *CastInst << '\n'
- << "To : " << *ClonedCastInst << '\n');
+ LLVM_DEBUG(dbgs() << "Clone instruction: " << *CastInst << '\n'
+ << "To : " << *ClonedCastInst << '\n');
}
- DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
+ LLVM_DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ClonedCastInst);
- DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
+ LLVM_DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
return;
}
@@ -689,15 +690,15 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base,
// Use the same debug location as the instruction we are about to update.
ConstExprInst->setDebugLoc(ConstUser.Inst->getDebugLoc());
- DEBUG(dbgs() << "Create instruction: " << *ConstExprInst << '\n'
- << "From : " << *ConstExpr << '\n');
- DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
+ LLVM_DEBUG(dbgs() << "Create instruction: " << *ConstExprInst << '\n'
+ << "From : " << *ConstExpr << '\n');
+ LLVM_DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n');
if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ConstExprInst)) {
ConstExprInst->eraseFromParent();
if (Offset)
Mat->eraseFromParent();
}
- DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
+ LLVM_DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n');
return;
}
}
@@ -720,9 +721,9 @@ bool ConstantHoistingPass::emitBaseConstants() {
Base->setDebugLoc(IP->getDebugLoc());
- DEBUG(dbgs() << "Hoist constant (" << *ConstInfo.BaseConstant
- << ") to BB " << IP->getParent()->getName() << '\n'
- << *Base << '\n');
+ LLVM_DEBUG(dbgs() << "Hoist constant (" << *ConstInfo.BaseConstant
+ << ") to BB " << IP->getParent()->getName() << '\n'
+ << *Base << '\n');
// Emit materialization code for all rebased constants.
unsigned Uses = 0;
diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index a85daaa0780a..15df482652c0 100644
--- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -228,7 +228,7 @@ static bool processPHI(PHINode *P, LazyValueInfo *LVI,
V = SI->getTrueValue();
}
- DEBUG(dbgs() << "CVP: Threading PHI over " << *SI << '\n');
+ LLVM_DEBUG(dbgs() << "CVP: Threading PHI over " << *SI << '\n');
}
P->setIncomingValue(i, V);
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 1d56a6e8aaa0..fd2cacf461bb 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -420,9 +420,10 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// Insert our part of the overlap into the map.
auto &IM = IOL[DepWrite];
- DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " <<
- int64_t(EarlierOff + Earlier.Size) << ") Later [" <<
- LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n");
+ LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
+ << ", " << int64_t(EarlierOff + Earlier.Size)
+ << ") Later [" << LaterOff << ", "
+ << int64_t(LaterOff + Later.Size) << ")\n");
// Make sure that we only insert non-overlapping intervals and combine
// adjacent intervals. The intervals are stored in the map with the ending
@@ -459,11 +460,11 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
ILI = IM.begin();
if (ILI->second <= EarlierOff &&
ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
- DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" <<
- EarlierOff << ", " <<
- int64_t(EarlierOff + Earlier.Size) <<
- ") Composite Later [" <<
- ILI->second << ", " << ILI->first << ")\n");
+ LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
+ << EarlierOff << ", "
+ << int64_t(EarlierOff + Earlier.Size)
+ << ") Composite Later [" << ILI->second << ", "
+ << ILI->first << ")\n");
++NumCompletePartials;
return OW_Complete;
}
@@ -474,10 +475,11 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
int64_t(EarlierOff + Earlier.Size) > LaterOff &&
uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) {
- DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" << EarlierOff
- << ", " << int64_t(EarlierOff + Earlier.Size)
- << ") by a later store [" << LaterOff << ", "
- << int64_t(LaterOff + Later.Size) << ")\n");
+ LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
+ << EarlierOff << ", "
+ << int64_t(EarlierOff + Earlier.Size)
+ << ") by a later store [" << LaterOff << ", "
+ << int64_t(LaterOff + Later.Size) << ")\n");
// TODO: Maybe come up with a better name?
return OW_PartialEarlierWithFullLater;
}
@@ -677,8 +679,9 @@ static bool handleFree(CallInst *F, AliasAnalysis *AA,
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
break;
- DEBUG(dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
- << *Dependency << '\n');
+ LLVM_DEBUG(
+ dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
+ << *Dependency << '\n');
// DCE instructions only used to calculate that store.
BasicBlock::iterator BBI(Dependency);
@@ -787,15 +790,16 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
if (AllDead) {
Instruction *Dead = &*BBI;
- DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
- << *Dead << "\n Objects: ";
- for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
- E = Pointers.end(); I != E; ++I) {
- dbgs() << **I;
- if (std::next(I) != E)
- dbgs() << ", ";
- }
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
+ << *Dead << "\n Objects: ";
+ for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
+ E = Pointers.end();
+ I != E; ++I) {
+ dbgs() << **I;
+ if (std::next(I) != E)
+ dbgs() << ", ";
+ } dbgs()
+ << '\n');
// DCE instructions only used to calculate that store.
deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
@@ -807,8 +811,8 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
// Remove any dead non-memory-mutating instructions.
if (isInstructionTriviallyDead(&*BBI, TLI)) {
- DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
- << *&*BBI << '\n');
+ LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
+ << *&*BBI << '\n');
deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
++NumFastOther;
MadeChange = true;
@@ -917,10 +921,10 @@ static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
return false;
}
- DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
- << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
- << "\n KILLER (offset " << LaterOffset << ", " << EarlierSize
- << ")\n");
+ LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
+ << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
+ << *EarlierWrite << "\n KILLER (offset " << LaterOffset
+ << ", " << EarlierSize << ")\n");
Value *EarlierWriteLength = EarlierIntrinsic->getLength();
Value *TrimmedLength =
@@ -1025,8 +1029,9 @@ static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
- DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
- << *DepLoad << "\n STORE: " << *SI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
+ << *DepLoad << "\n STORE: " << *SI << '\n');
deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
++NumRedundantStores;
@@ -1042,7 +1047,7 @@ static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
- DEBUG(
+ LLVM_DEBUG(
dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
<< *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
@@ -1173,8 +1178,8 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
InstWriteOffset, DepWrite, IOL, *AA);
if (OR == OW_Complete) {
- DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
- << *DepWrite << "\n KILLER: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite
+ << "\n KILLER: " << *Inst << '\n');
// Delete the store and now-dead instructions that feed it.
deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
@@ -1232,9 +1237,9 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
// store, shifted appropriately.
APInt Merged =
(EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
- DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite
- << "\n Later: " << *Inst
- << "\n Merged Value: " << Merged << '\n');
+ LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite
+ << "\n Later: " << *Inst
+ << "\n Merged Value: " << Merged << '\n');
auto *SI = new StoreInst(
ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index 4380812968ac..d19ceab38890 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -728,11 +728,11 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
? ConstantInt::getTrue(BB->getContext())
: ConstantInt::getFalse(BB->getContext());
AvailableValues.insert(CondInst, TorF);
- DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
- << CondInst->getName() << "' as " << *TorF << " in "
- << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
+ << CondInst->getName() << "' as " << *TorF << " in "
+ << BB->getName() << "\n");
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
} else {
// Replace all dominated uses with the known value.
if (unsigned Count = replaceDominatedUsesWith(
@@ -758,9 +758,9 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// Dead instructions should just be removed.
if (isInstructionTriviallyDead(Inst, &TLI)) {
- DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
continue;
}
salvageDebugInfo(*Inst);
@@ -779,16 +779,17 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
auto *CondI =
dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
if (CondI && SimpleValue::canHandle(CondI)) {
- DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
+ << '\n');
AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
} else
- DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
continue;
}
// Skip sideeffect intrinsics, for the same reason as assume intrinsics.
if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
- DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
continue;
}
@@ -826,7 +827,8 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// Is the condition known to be true?
if (isa<ConstantInt>(KnownCond) &&
cast<ConstantInt>(KnownCond)->isOne()) {
- DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs()
+ << "EarlyCSE removing guard: " << *Inst << '\n');
removeMSSA(Inst);
Inst->eraseFromParent();
Changed = true;
@@ -851,9 +853,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
// its simpler value.
if (Value *V = SimplifyInstruction(Inst, SQ)) {
- DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V
+ << '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
} else {
bool Killed = false;
if (!Inst->use_empty()) {
@@ -877,9 +880,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (SimpleValue::canHandle(Inst)) {
// See if the instruction has an available value. If so, use it.
if (Value *V = AvailableValues.lookup(Inst)) {
- DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V
+ << '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
continue;
}
if (auto *I = dyn_cast<Instruction>(V))
@@ -937,10 +941,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
InVal.DefInst, Inst))) {
Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
if (Op != nullptr) {
- DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
- << " to: " << *InVal.DefInst << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
+ << " to: " << *InVal.DefInst << '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
continue;
}
if (!Inst->use_empty())
@@ -980,10 +984,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (InVal.first != nullptr &&
isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
Inst)) {
- DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
- << " to: " << *InVal.first << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
+ << " to: " << *InVal.first << '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
continue;
}
if (!Inst->use_empty())
@@ -1036,9 +1040,9 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
MemInst.getPointerOperand() ||
MSSA) &&
"can't have an intervening store if not using MemorySSA!");
- DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
continue;
}
removeMSSA(Inst);
@@ -1071,10 +1075,10 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
!LastStoreMemInst.isVolatile() &&
"Violated invariant");
if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
- DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
- << " due to: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
+ << " due to: " << *Inst << '\n');
if (!DebugCounter::shouldExecute(CSECounter)) {
- DEBUG(dbgs() << "Skipping due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
} else {
removeMSSA(LastStore);
LastStore->eraseFromParent();
diff --git a/lib/Transforms/Scalar/Float2Int.cpp b/lib/Transforms/Scalar/Float2Int.cpp
index b105ece8dc7c..f2828e80bc58 100644
--- a/lib/Transforms/Scalar/Float2Int.cpp
+++ b/lib/Transforms/Scalar/Float2Int.cpp
@@ -138,7 +138,7 @@ void Float2IntPass::findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots) {
// Helper - mark I as having been traversed, having range R.
void Float2IntPass::seen(Instruction *I, ConstantRange R) {
- DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n");
+ LLVM_DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n");
auto IT = SeenInsts.find(I);
if (IT != SeenInsts.end())
IT->second = std::move(R);
@@ -359,7 +359,7 @@ bool Float2IntPass::validateAndTransform() {
for (User *U : I->users()) {
Instruction *UI = dyn_cast<Instruction>(U);
if (!UI || SeenInsts.find(UI) == SeenInsts.end()) {
- DEBUG(dbgs() << "F2I: Failing because of " << *U << "\n");
+ LLVM_DEBUG(dbgs() << "F2I: Failing because of " << *U << "\n");
Fail = true;
break;
}
@@ -380,7 +380,7 @@ bool Float2IntPass::validateAndTransform() {
// lower limits, plus one so it can be signed.
unsigned MinBW = std::max(R.getLower().getMinSignedBits(),
R.getUpper().getMinSignedBits()) + 1;
- DEBUG(dbgs() << "F2I: MinBitwidth=" << MinBW << ", R: " << R << "\n");
+ LLVM_DEBUG(dbgs() << "F2I: MinBitwidth=" << MinBW << ", R: " << R << "\n");
// If we've run off the realms of the exactly representable integers,
// the floating point result will differ from an integer approximation.
@@ -391,11 +391,12 @@ bool Float2IntPass::validateAndTransform() {
unsigned MaxRepresentableBits
= APFloat::semanticsPrecision(ConvertedToTy->getFltSemantics()) - 1;
if (MinBW > MaxRepresentableBits) {
- DEBUG(dbgs() << "F2I: Value not guaranteed to be representable!\n");
+ LLVM_DEBUG(dbgs() << "F2I: Value not guaranteed to be representable!\n");
continue;
}
if (MinBW > 64) {
- DEBUG(dbgs() << "F2I: Value requires more than 64 bits to represent!\n");
+ LLVM_DEBUG(
+ dbgs() << "F2I: Value requires more than 64 bits to represent!\n");
continue;
}
@@ -490,7 +491,7 @@ void Float2IntPass::cleanup() {
}
bool Float2IntPass::runImpl(Function &F) {
- DEBUG(dbgs() << "F2I: Looking at function " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "F2I: Looking at function " << F.getName() << "\n");
// Clear out all state.
ECs = EquivalenceClasses<Instruction*>();
SeenInsts.clear();
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 59b87e9a77da..ff3c2f9fbd2a 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -784,9 +784,10 @@ Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI,
if (Res->getType() != LoadTy) {
Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
- DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
- << *getSimpleValue() << '\n'
- << *Res << '\n' << "\n\n\n");
+ LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
+ << " " << *getSimpleValue() << '\n'
+ << *Res << '\n'
+ << "\n\n\n");
}
} else if (isCoercedLoadValue()) {
LoadInst *Load = getCoercedLoadValue();
@@ -800,20 +801,21 @@ Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI,
// but then there all of the operations based on it would need to be
// rehashed. Just leave the dead load around.
gvn.getMemDep().removeInstruction(Load);
- DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " "
- << *getCoercedLoadValue() << '\n'
- << *Res << '\n'
- << "\n\n\n");
+ LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
+ << " " << *getCoercedLoadValue() << '\n'
+ << *Res << '\n'
+ << "\n\n\n");
}
} else if (isMemIntrinValue()) {
Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
InsertPt, DL);
- DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
- << " " << *getMemIntrinValue() << '\n'
- << *Res << '\n' << "\n\n\n");
+ LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
+ << " " << *getMemIntrinValue() << '\n'
+ << *Res << '\n'
+ << "\n\n\n");
} else {
assert(isUndefValue() && "Should be UndefVal");
- DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
+ LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
return UndefValue::get(LoadTy);
}
assert(Res && "failed to materialize?");
@@ -915,13 +917,11 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
}
}
// Nothing known about this clobber, have to be conservative
- DEBUG(
- // fast print dep, using operator<< on instruction is too slow.
- dbgs() << "GVN: load ";
- LI->printAsOperand(dbgs());
- Instruction *I = DepInfo.getInst();
- dbgs() << " is clobbered by " << *I << '\n';
- );
+ LLVM_DEBUG(
+ // fast print dep, using operator<< on instruction is too slow.
+ dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
+ Instruction *I = DepInfo.getInst();
+ dbgs() << " is clobbered by " << *I << '\n';);
if (ORE->allowExtraAnalysis(DEBUG_TYPE))
reportMayClobberedLoad(LI, DepInfo, DT, ORE);
@@ -979,12 +979,10 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
}
// Unknown def - must be conservative
- DEBUG(
- // fast print dep, using operator<< on instruction is too slow.
- dbgs() << "GVN: load ";
- LI->printAsOperand(dbgs());
- dbgs() << " has unknown def " << *DepInst << '\n';
- );
+ LLVM_DEBUG(
+ // fast print dep, using operator<< on instruction is too slow.
+ dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
+ dbgs() << " has unknown def " << *DepInst << '\n';);
return false;
}
@@ -1114,9 +1112,9 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// If any predecessor block is an EH pad that does not allow non-PHI
// instructions before the terminator, we can't PRE the load.
if (Pred->getTerminator()->isEHPad()) {
- DEBUG(dbgs()
- << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
- << Pred->getName() << "': " << *LI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
+ << Pred->getName() << "': " << *LI << '\n');
return false;
}
@@ -1126,15 +1124,16 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
if (Pred->getTerminator()->getNumSuccessors() != 1) {
if (isa<IndirectBrInst>(Pred->getTerminator())) {
- DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
- << Pred->getName() << "': " << *LI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
+ << Pred->getName() << "': " << *LI << '\n');
return false;
}
if (LoadBB->isEHPad()) {
- DEBUG(dbgs()
- << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
- << Pred->getName() << "': " << *LI << '\n');
+ LLVM_DEBUG(
+ dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
+ << Pred->getName() << "': " << *LI << '\n');
return false;
}
@@ -1162,8 +1161,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
PredLoads[NewPred] = nullptr;
- DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
- << LoadBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
+ << LoadBB->getName() << '\n');
}
// Check if the load can safely be moved to all the unavailable predecessors.
@@ -1187,8 +1186,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// If we couldn't find or insert a computation of this phi translated value,
// we fail PRE.
if (!LoadPtr) {
- DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
- << *LI->getPointerOperand() << "\n");
+ LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
+ << *LI->getPointerOperand() << "\n");
CanDoPRE = false;
break;
}
@@ -1209,10 +1208,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// Okay, we can eliminate this load by inserting a reload in the predecessor
// and using PHI construction to get the value in the other predecessors, do
// it.
- DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
- DEBUG(if (!NewInsts.empty())
- dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
- << *NewInsts.back() << '\n');
+ LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
+ LLVM_DEBUG(if (!NewInsts.empty()) dbgs()
+ << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back()
+ << '\n');
// Assign value numbers to the new instructions.
for (Instruction *I : NewInsts) {
@@ -1263,7 +1262,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
NewLoad));
MD->invalidateCachedPointerInfo(LoadPtr);
- DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
+ LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
}
// Perform PHI construction.
@@ -1321,11 +1320,8 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// clobber in the current block. Reject this early.
if (NumDeps == 1 &&
!Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
- DEBUG(
- dbgs() << "GVN: non-local load ";
- LI->printAsOperand(dbgs());
- dbgs() << " has unknown dependencies\n";
- );
+ LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs());
+ dbgs() << " has unknown dependencies\n";);
return false;
}
@@ -1354,7 +1350,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// load, then it is fully redundant and we can use PHI insertion to compute
// its value. Insert PHIs and remove the fully redundant value now.
if (UnavailableBlocks.empty()) {
- DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
+ LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
@@ -1507,12 +1503,10 @@ bool GVN::processLoad(LoadInst *L) {
// Only handle the local case below
if (!Dep.isDef() && !Dep.isClobber()) {
// This might be a NonFuncLocal or an Unknown
- DEBUG(
- // fast print dep, using operator<< on instruction is too slow.
- dbgs() << "GVN: load ";
- L->printAsOperand(dbgs());
- dbgs() << " has unknown dependence\n";
- );
+ LLVM_DEBUG(
+ // fast print dep, using operator<< on instruction is too slow.
+ dbgs() << "GVN: load "; L->printAsOperand(dbgs());
+ dbgs() << " has unknown dependence\n";);
return false;
}
@@ -1696,8 +1690,8 @@ bool GVN::replaceOperandsWithConsts(Instruction *Instr) const {
if (it != ReplaceWithConstMap.end()) {
assert(!isa<Constant>(Operand) &&
"Replacing constants with constants is invalid");
- DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " << *it->second
- << " in instruction " << *Instr << '\n');
+ LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
+ << *it->second << " in instruction " << *Instr << '\n');
Instr->setOperand(OpNum, it->second);
Changed = true;
}
@@ -2039,7 +2033,7 @@ bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
unsigned Iteration = 0;
while (ShouldContinue) {
- DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
+ LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
ShouldContinue = iterateOnFunction(F);
Changed |= ShouldContinue;
++Iteration;
@@ -2105,10 +2099,10 @@ bool GVN::processBlock(BasicBlock *BB) {
const Instruction *MaybeFirstICF = FirstImplicitControlFlowInsts.lookup(BB);
for (auto *I : InstrsToErase) {
assert(I->getParent() == BB && "Removing instruction from wrong block?");
- DEBUG(dbgs() << "GVN removed: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
salvageDebugInfo(*I);
if (MD) MD->removeInstruction(I);
- DEBUG(verifyRemoved(I));
+ LLVM_DEBUG(verifyRemoved(I));
if (MaybeFirstICF == I) {
// We have erased the first ICF in block. The map needs to be updated.
InvalidateImplicitCF = true;
@@ -2290,7 +2284,7 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
PREInstr = CurInst->clone();
if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
// If we failed insertion, make sure we remove the instruction.
- DEBUG(verifyRemoved(PREInstr));
+ LLVM_DEBUG(verifyRemoved(PREInstr));
PREInstr->deleteValue();
return false;
}
@@ -2328,10 +2322,10 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
VN.erase(CurInst);
removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
- DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
+ LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
if (MD)
MD->removeInstruction(CurInst);
- DEBUG(verifyRemoved(CurInst));
+ LLVM_DEBUG(verifyRemoved(CurInst));
bool InvalidateImplicitCF =
FirstImplicitControlFlowInsts.lookup(CurInst->getParent()) == CurInst;
// FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
diff --git a/lib/Transforms/Scalar/GVNHoist.cpp b/lib/Transforms/Scalar/GVNHoist.cpp
index a8b33d19d179..6994d964a9e8 100644
--- a/lib/Transforms/Scalar/GVNHoist.cpp
+++ b/lib/Transforms/Scalar/GVNHoist.cpp
@@ -622,7 +622,7 @@ private:
// Iterate in reverse order to keep lower ranked values on the top.
for (std::pair<VNType, Instruction *> &VI : reverse(it1->second)) {
// Get the value of instruction I
- DEBUG(dbgs() << "\nPushing on stack: " << *VI.second);
+ LLVM_DEBUG(dbgs() << "\nPushing on stack: " << *VI.second);
RenameStack[VI.first].push_back(VI.second);
}
}
@@ -636,7 +636,7 @@ private:
if (P == CHIBBs.end()) {
continue;
}
- DEBUG(dbgs() << "\nLooking at CHIs in: " << Pred->getName(););
+ LLVM_DEBUG(dbgs() << "\nLooking at CHIs in: " << Pred->getName(););
// A CHI is found (BB -> Pred is an edge in the CFG)
// Pop the stack until Top(V) = Ve.
auto &VCHI = P->second;
@@ -651,9 +651,9 @@ private:
DT->properlyDominates(Pred, si->second.back()->getParent())) {
C.Dest = BB; // Assign the edge
C.I = si->second.pop_back_val(); // Assign the argument
- DEBUG(dbgs() << "\nCHI Inserted in BB: " << C.Dest->getName()
- << *C.I << ", VN: " << C.VN.first << ", "
- << C.VN.second);
+ LLVM_DEBUG(dbgs()
+ << "\nCHI Inserted in BB: " << C.Dest->getName() << *C.I
+ << ", VN: " << C.VN.first << ", " << C.VN.second);
}
// Move to next CHI of a different value
It = std::find_if(It, VCHI.end(),
@@ -798,8 +798,8 @@ private:
// Ignore spurious PDFs.
if (DT->properlyDominates(IDFB, V[i]->getParent())) {
OutValue[IDFB].push_back(C);
- DEBUG(dbgs() << "\nInsertion a CHI for BB: " << IDFB->getName()
- << ", for Insn: " << *V[i]);
+ LLVM_DEBUG(dbgs() << "\nInsertion a CHI for BB: " << IDFB->getName()
+ << ", for Insn: " << *V[i]);
}
}
}
diff --git a/lib/Transforms/Scalar/GVNSink.cpp b/lib/Transforms/Scalar/GVNSink.cpp
index 4368b68582ec..9d8a6d1c552a 100644
--- a/lib/Transforms/Scalar/GVNSink.cpp
+++ b/lib/Transforms/Scalar/GVNSink.cpp
@@ -561,7 +561,8 @@ public:
GVNSink() = default;
bool run(Function &F) {
- DEBUG(dbgs() << "GVNSink: running on function @" << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "GVNSink: running on function @" << F.getName()
+ << "\n");
unsigned NumSunk = 0;
ReversePostOrderTraversal<Function*> RPOT(&F);
@@ -629,15 +630,15 @@ Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking(
LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents) {
auto Insts = *LRI;
- DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I
- : Insts) {
+ LLVM_DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I
+ : Insts) {
I->dump();
} dbgs() << " ]\n";);
DenseMap<uint32_t, unsigned> VNums;
for (auto *I : Insts) {
uint32_t N = VN.lookupOrAdd(I);
- DEBUG(dbgs() << " VN=" << Twine::utohexstr(N) << " for" << *I << "\n");
+ LLVM_DEBUG(dbgs() << " VN=" << Twine::utohexstr(N) << " for" << *I << "\n");
if (N == ~0U)
return None;
VNums[N]++;
@@ -749,8 +750,8 @@ Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking(
}
unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
- DEBUG(dbgs() << "GVNSink: running on basic block ";
- BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "GVNSink: running on basic block ";
+ BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
SmallVector<BasicBlock *, 4> Preds;
for (auto *B : predecessors(BBEnd)) {
auto *T = B->getTerminator();
@@ -794,23 +795,23 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
Candidates.begin(), Candidates.end(),
[](const SinkingInstructionCandidate &A,
const SinkingInstructionCandidate &B) { return A > B; });
- DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C
- : Candidates) dbgs()
- << " " << C << "\n";);
+ LLVM_DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C
+ : Candidates) dbgs()
+ << " " << C << "\n";);
// Pick the top candidate, as long it is positive!
if (Candidates.empty() || Candidates.front().Cost <= 0)
return 0;
auto C = Candidates.front();
- DEBUG(dbgs() << " -- Sinking: " << C << "\n");
+ LLVM_DEBUG(dbgs() << " -- Sinking: " << C << "\n");
BasicBlock *InsertBB = BBEnd;
if (C.Blocks.size() < NumOrigPreds) {
- DEBUG(dbgs() << " -- Splitting edge to "; BBEnd->printAsOperand(dbgs());
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " -- Splitting edge to ";
+ BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
InsertBB = SplitBlockPredecessors(BBEnd, C.Blocks, ".gvnsink.split");
if (!InsertBB) {
- DEBUG(dbgs() << " -- FAILED to split edge!\n");
+ LLVM_DEBUG(dbgs() << " -- FAILED to split edge!\n");
// Edge couldn't be split.
return 0;
}
diff --git a/lib/Transforms/Scalar/GuardWidening.cpp b/lib/Transforms/Scalar/GuardWidening.cpp
index 55d2f6ee81c3..ad1598d7b8bf 100644
--- a/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/lib/Transforms/Scalar/GuardWidening.cpp
@@ -302,9 +302,9 @@ bool GuardWideningImpl::eliminateGuardViaWidening(
for (auto *Candidate : make_range(I, E)) {
auto Score =
computeWideningScore(GuardInst, GuardInstLoop, Candidate, CurLoop);
- DEBUG(dbgs() << "Score between " << *GuardInst->getArgOperand(0)
- << " and " << *Candidate->getArgOperand(0) << " is "
- << scoreTypeToString(Score) << "\n");
+ LLVM_DEBUG(dbgs() << "Score between " << *GuardInst->getArgOperand(0)
+ << " and " << *Candidate->getArgOperand(0) << " is "
+ << scoreTypeToString(Score) << "\n");
if (Score > BestScoreSoFar) {
BestScoreSoFar = Score;
BestSoFar = Candidate;
@@ -313,15 +313,16 @@ bool GuardWideningImpl::eliminateGuardViaWidening(
}
if (BestScoreSoFar == WS_IllegalOrNegative) {
- DEBUG(dbgs() << "Did not eliminate guard " << *GuardInst << "\n");
+ LLVM_DEBUG(dbgs() << "Did not eliminate guard " << *GuardInst << "\n");
return false;
}
assert(BestSoFar != GuardInst && "Should have never visited same guard!");
assert(DT.dominates(BestSoFar, GuardInst) && "Should be!");
- DEBUG(dbgs() << "Widening " << *GuardInst << " into " << *BestSoFar
- << " with score " << scoreTypeToString(BestScoreSoFar) << "\n");
+ LLVM_DEBUG(dbgs() << "Widening " << *GuardInst << " into " << *BestSoFar
+ << " with score " << scoreTypeToString(BestScoreSoFar)
+ << "\n");
widenGuard(BestSoFar, GuardInst->getArgOperand(0));
GuardInst->setArgOperand(0, ConstantInt::getTrue(GuardInst->getContext()));
EliminatedGuards.push_back(GuardInst);
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index ae0c3eb243ec..86b0269e1f71 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -210,8 +210,8 @@ bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) {
if (FromBase == ToBase)
return true;
- DEBUG(dbgs() << "INDVARS: GEP rewrite bail out "
- << *FromBase << " != " << *ToBase << "\n");
+ LLVM_DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " << *FromBase
+ << " != " << *ToBase << "\n");
return false;
}
@@ -653,8 +653,9 @@ void IndVarSimplify::rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
Value *ExitVal =
expandSCEVIfNeeded(Rewriter, ExitValue, L, Inst, PN->getType());
- DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n'
- << " LoopVal = " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal
+ << '\n'
+ << " LoopVal = " << *Inst << "\n");
if (!isValidRewrite(Inst, ExitVal)) {
DeadInsts.push_back(ExitVal);
@@ -1084,7 +1085,7 @@ Instruction *WidenIV::cloneBitwiseIVUser(NarrowIVDefUse DU) {
Instruction *NarrowDef = DU.NarrowDef;
Instruction *WideDef = DU.WideDef;
- DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
+ LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
// Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
// about the narrow operand yet so must insert a [sz]ext. It is probably loop
@@ -1115,7 +1116,7 @@ Instruction *WidenIV::cloneArithmeticIVUser(NarrowIVDefUse DU,
Instruction *NarrowDef = DU.NarrowDef;
Instruction *WideDef = DU.WideDef;
- DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
+ LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
@@ -1315,8 +1316,8 @@ WidenIV::WidenedRecTy WidenIV::getWideRecurrence(NarrowIVDefUse DU) {
/// This IV user cannot be widen. Replace this use of the original narrow IV
/// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT, LoopInfo *LI) {
- DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef
- << " for user " << *DU.NarrowUse << "\n");
+ LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
+ << *DU.NarrowUse << "\n");
IRBuilder<> Builder(
getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI));
Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
@@ -1396,8 +1397,8 @@ Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
UsePhi->replaceAllUsesWith(Trunc);
DeadInsts.emplace_back(UsePhi);
- DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi
- << " to " << *WidePhi << "\n");
+ LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
+ << *WidePhi << "\n");
}
return nullptr;
}
@@ -1428,15 +1429,16 @@ Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// A wider extend was hidden behind a narrower one. This may induce
// another round of IV widening in which the intermediate IV becomes
// dead. It should be very rare.
- DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
- << " not wide enough to subsume " << *DU.NarrowUse << "\n");
+ LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
+ << " not wide enough to subsume " << *DU.NarrowUse
+ << "\n");
DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
NewDef = DU.NarrowUse;
}
}
if (NewDef != DU.NarrowUse) {
- DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
- << " replaced by " << *DU.WideDef << "\n");
+ LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
+ << " replaced by " << *DU.WideDef << "\n");
++NumElimExt;
DU.NarrowUse->replaceAllUsesWith(NewDef);
DeadInsts.emplace_back(DU.NarrowUse);
@@ -1491,8 +1493,9 @@ Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// absolutely guarantee it. Hence the following failsafe check. In rare cases
// where it fails, we simply throw away the newly created wide use.
if (WideAddRec.first != SE->getSCEV(WideUse)) {
- DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse
- << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first << "\n");
+ LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
+ << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
+ << "\n");
DeadInsts.emplace_back(WideUse);
return nullptr;
}
@@ -1597,7 +1600,7 @@ PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
WideInc->setDebugLoc(OrigInc->getDebugLoc());
}
- DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
+ LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
++NumWidened;
// Traverse the def-use chain using a worklist starting at the original IV.
@@ -2231,12 +2234,12 @@ linearFunctionTestReplace(Loop *L,
else
P = ICmpInst::ICMP_EQ;
- DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n"
- << " LHS:" << *CmpIndVar << '\n'
- << " op:\t"
- << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n"
- << " RHS:\t" << *ExitCnt << "\n"
- << " IVCount:\t" << *IVCount << "\n");
+ LLVM_DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n"
+ << " LHS:" << *CmpIndVar << '\n'
+ << " op:\t" << (P == ICmpInst::ICMP_NE ? "!=" : "==")
+ << "\n"
+ << " RHS:\t" << *ExitCnt << "\n"
+ << " IVCount:\t" << *IVCount << "\n");
IRBuilder<> Builder(BI);
@@ -2272,7 +2275,7 @@ linearFunctionTestReplace(Loop *L,
NewLimit = Start + Count;
ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit);
- DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n");
+ LLVM_DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n");
} else {
// We try to extend trip count first. If that doesn't work we truncate IV.
// Zext(trunc(IV)) == IV implies equivalence of the following two:
diff --git a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 79004ddba182..92dc21456e55 100644
--- a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -716,12 +716,13 @@ static bool isSafeDecreasingBound(const SCEV *Start,
assert(SE.isKnownNegative(Step) && "expecting negative step");
- DEBUG(dbgs() << "irce: isSafeDecreasingBound with:\n");
- DEBUG(dbgs() << "irce: Start: " << *Start << "\n");
- DEBUG(dbgs() << "irce: Step: " << *Step << "\n");
- DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n");
- DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred) << "\n");
- DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n");
+ LLVM_DEBUG(dbgs() << "irce: isSafeDecreasingBound with:\n");
+ LLVM_DEBUG(dbgs() << "irce: Start: " << *Start << "\n");
+ LLVM_DEBUG(dbgs() << "irce: Step: " << *Step << "\n");
+ LLVM_DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n");
+ LLVM_DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred)
+ << "\n");
+ LLVM_DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n");
bool IsSigned = ICmpInst::isSigned(Pred);
// The predicate that we need to check that the induction variable lies
@@ -763,12 +764,13 @@ static bool isSafeIncreasingBound(const SCEV *Start,
if (!SE.isAvailableAtLoopEntry(BoundSCEV, L))
return false;
- DEBUG(dbgs() << "irce: isSafeIncreasingBound with:\n");
- DEBUG(dbgs() << "irce: Start: " << *Start << "\n");
- DEBUG(dbgs() << "irce: Step: " << *Step << "\n");
- DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n");
- DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred) << "\n");
- DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n");
+ LLVM_DEBUG(dbgs() << "irce: isSafeIncreasingBound with:\n");
+ LLVM_DEBUG(dbgs() << "irce: Start: " << *Start << "\n");
+ LLVM_DEBUG(dbgs() << "irce: Step: " << *Step << "\n");
+ LLVM_DEBUG(dbgs() << "irce: BoundSCEV: " << *BoundSCEV << "\n");
+ LLVM_DEBUG(dbgs() << "irce: Pred: " << ICmpInst::getPredicateName(Pred)
+ << "\n");
+ LLVM_DEBUG(dbgs() << "irce: LatchExitBrIdx: " << LatchBrExitIdx << "\n");
bool IsSigned = ICmpInst::isSigned(Pred);
// The predicate that we need to check that the induction variable lies
@@ -1473,7 +1475,7 @@ bool LoopConstrainer::run() {
bool IsSignedPredicate = MainLoopStructure.IsSignedPredicate;
Optional<SubRanges> MaybeSR = calculateSubRanges(IsSignedPredicate);
if (!MaybeSR.hasValue()) {
- DEBUG(dbgs() << "irce: could not compute subranges\n");
+ LLVM_DEBUG(dbgs() << "irce: could not compute subranges\n");
return false;
}
@@ -1509,17 +1511,18 @@ bool LoopConstrainer::run() {
IsSignedPredicate))
ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS);
else {
- DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
- << "preloop exit limit. HighLimit = " << *(*SR.HighLimit)
- << "\n");
+ LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
+ << "preloop exit limit. HighLimit = "
+ << *(*SR.HighLimit) << "\n");
return false;
}
}
if (!isSafeToExpandAt(ExitPreLoopAtSCEV, InsertPt, SE)) {
- DEBUG(dbgs() << "irce: could not prove that it is safe to expand the"
- << " preloop exit limit " << *ExitPreLoopAtSCEV
- << " at block " << InsertPt->getParent()->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "irce: could not prove that it is safe to expand the"
+ << " preloop exit limit " << *ExitPreLoopAtSCEV
+ << " at block " << InsertPt->getParent()->getName()
+ << "\n");
return false;
}
@@ -1537,17 +1540,18 @@ bool LoopConstrainer::run() {
IsSignedPredicate))
ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS);
else {
- DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
- << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit)
- << "\n");
+ LLVM_DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
+ << "mainloop exit limit. LowLimit = "
+ << *(*SR.LowLimit) << "\n");
return false;
}
}
if (!isSafeToExpandAt(ExitMainLoopAtSCEV, InsertPt, SE)) {
- DEBUG(dbgs() << "irce: could not prove that it is safe to expand the"
- << " main loop exit limit " << *ExitMainLoopAtSCEV
- << " at block " << InsertPt->getParent()->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "irce: could not prove that it is safe to expand the"
+ << " main loop exit limit " << *ExitMainLoopAtSCEV
+ << " at block " << InsertPt->getParent()->getName()
+ << "\n");
return false;
}
@@ -1826,13 +1830,13 @@ bool IRCELegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) {
bool InductiveRangeCheckElimination::run(
Loop *L, function_ref<void(Loop *, bool)> LPMAddNewLoop) {
if (L->getBlocks().size() >= LoopSizeCutoff) {
- DEBUG(dbgs() << "irce: giving up constraining loop, too large\n");
+ LLVM_DEBUG(dbgs() << "irce: giving up constraining loop, too large\n");
return false;
}
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
- DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
+ LLVM_DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
return false;
}
@@ -1855,7 +1859,7 @@ bool InductiveRangeCheckElimination::run(
IRC.print(OS);
};
- DEBUG(PrintRecognizedRangeChecks(dbgs()));
+ LLVM_DEBUG(PrintRecognizedRangeChecks(dbgs()));
if (PrintRangeChecks)
PrintRecognizedRangeChecks(errs());
@@ -1864,8 +1868,8 @@ bool InductiveRangeCheckElimination::run(
Optional<LoopStructure> MaybeLoopStructure =
LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason);
if (!MaybeLoopStructure.hasValue()) {
- DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason
- << "\n";);
+ LLVM_DEBUG(dbgs() << "irce: could not parse loop structure: "
+ << FailureReason << "\n";);
return false;
}
LoopStructure LS = MaybeLoopStructure.getValue();
@@ -1915,7 +1919,7 @@ bool InductiveRangeCheckElimination::run(
L->print(dbgs());
};
- DEBUG(PrintConstrainedLoopInfo());
+ LLVM_DEBUG(PrintConstrainedLoopInfo());
if (PrintChangedLoops)
PrintConstrainedLoopInfo();
diff --git a/lib/Transforms/Scalar/InferAddressSpaces.cpp b/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 454ea254b888..e6fea56f2642 100644
--- a/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -653,13 +653,13 @@ void InferAddressSpaces::inferAddressSpaces(
// Tries to update the address space of the stack top according to the
// address spaces of its operands.
- DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n');
+ LLVM_DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n');
Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
if (!NewAS.hasValue())
continue;
// If any updates are made, grabs its users to the worklist because
// their address spaces can also be possibly updated.
- DEBUG(dbgs() << " to " << NewAS.getValue() << '\n');
+ LLVM_DEBUG(dbgs() << " to " << NewAS.getValue() << '\n');
(*InferredAddrSpace)[V] = NewAS.getValue();
for (Value *User : V->users()) {
@@ -901,15 +901,15 @@ bool InferAddressSpaces::rewriteWithNewAddressSpaces(
if (NewV == nullptr)
continue;
- DEBUG(dbgs() << "Replacing the uses of " << *V
- << "\n with\n " << *NewV << '\n');
+ LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n "
+ << *NewV << '\n');
if (Constant *C = dyn_cast<Constant>(V)) {
Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
C->getType());
if (C != Replace) {
- DEBUG(dbgs() << "Inserting replacement const cast: "
- << Replace << ": " << *Replace << '\n');
+ LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
+ << ": " << *Replace << '\n');
C->replaceAllUsesWith(Replace);
V = Replace;
}
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 5f379253ea5c..cf9919ebf9d2 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -340,7 +340,7 @@ bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
DeferredDominance *DDT_, bool HasProfileData_,
std::unique_ptr<BlockFrequencyInfo> BFI_,
std::unique_ptr<BranchProbabilityInfo> BPI_) {
- DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
TLI = TLI_;
LVI = LVI_;
AA = AA_;
@@ -386,8 +386,9 @@ bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
if (pred_empty(&BB)) {
// When ProcessBlock makes BB unreachable it doesn't bother to fix up
// the instructions in it. We must remove BB to prevent invalid IR.
- DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName()
- << "' with terminator: " << *BB.getTerminator() << '\n');
+ LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName()
+ << "' with terminator: " << *BB.getTerminator()
+ << '\n');
LoopHeaders.erase(&BB);
LVI->eraseBlock(&BB);
DeleteDeadBlock(&BB, DDT);
@@ -1084,8 +1085,8 @@ bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) {
Updates.push_back({DominatorTree::Delete, BB, Succ});
}
- DEBUG(dbgs() << " In block '" << BB->getName()
- << "' folding undef terminator: " << *BBTerm << '\n');
+ LLVM_DEBUG(dbgs() << " In block '" << BB->getName()
+ << "' folding undef terminator: " << *BBTerm << '\n');
BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm);
BBTerm->eraseFromParent();
DDT->applyUpdates(Updates);
@@ -1096,8 +1097,9 @@ bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) {
// terminator to an unconditional branch. This can occur due to threading in
// other blocks.
if (getKnownConstant(Condition, Preference)) {
- DEBUG(dbgs() << " In block '" << BB->getName()
- << "' folding terminator: " << *BB->getTerminator() << '\n');
+ LLVM_DEBUG(dbgs() << " In block '" << BB->getName()
+ << "' folding terminator: " << *BB->getTerminator()
+ << '\n');
++NumFolds;
ConstantFoldTerminator(BB, true, nullptr, DDT);
return true;
@@ -1574,12 +1576,12 @@ bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
assert(!PredValues.empty() &&
"ComputeValueKnownInPredecessors returned true with no values");
- DEBUG(dbgs() << "IN BB: " << *BB;
- for (const auto &PredValue : PredValues) {
- dbgs() << " BB '" << BB->getName() << "': FOUND condition = "
- << *PredValue.first
- << " for pred '" << PredValue.second->getName() << "'.\n";
- });
+ LLVM_DEBUG(dbgs() << "IN BB: " << *BB; for (const auto &PredValue
+ : PredValues) {
+ dbgs() << " BB '" << BB->getName()
+ << "': FOUND condition = " << *PredValue.first << " for pred '"
+ << PredValue.second->getName() << "'.\n";
+ });
// Decide what we want to thread through. Convert our list of known values to
// a list of known destinations for each pred. This also discards duplicate
@@ -1901,15 +1903,15 @@ bool JumpThreadingPass::ThreadEdge(BasicBlock *BB,
BasicBlock *SuccBB) {
// If threading to the same block as we come from, we would infinite loop.
if (SuccBB == BB) {
- DEBUG(dbgs() << " Not threading across BB '" << BB->getName()
- << "' - would thread to self!\n");
+ LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName()
+ << "' - would thread to self!\n");
return false;
}
// If threading this would thread across a loop header, don't thread the edge.
// See the comments above FindLoopHeaders for justifications and caveats.
if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) {
- DEBUG({
+ LLVM_DEBUG({
bool BBIsHeader = LoopHeaders.count(BB);
bool SuccIsHeader = LoopHeaders.count(SuccBB);
dbgs() << " Not threading across "
@@ -1923,8 +1925,8 @@ bool JumpThreadingPass::ThreadEdge(BasicBlock *BB,
unsigned JumpThreadCost =
getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold);
if (JumpThreadCost > BBDupThreshold) {
- DEBUG(dbgs() << " Not threading BB '" << BB->getName()
- << "' - Cost is too high: " << JumpThreadCost << "\n");
+ LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName()
+ << "' - Cost is too high: " << JumpThreadCost << "\n");
return false;
}
@@ -1933,16 +1935,16 @@ bool JumpThreadingPass::ThreadEdge(BasicBlock *BB,
if (PredBBs.size() == 1)
PredBB = PredBBs[0];
else {
- DEBUG(dbgs() << " Factoring out " << PredBBs.size()
- << " common predecessors.\n");
+ LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()
+ << " common predecessors.\n");
PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm");
}
// And finally, do it!
- DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '"
- << SuccBB->getName() << "' with cost: " << JumpThreadCost
- << ", across block:\n "
- << *BB << "\n");
+ LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName()
+ << "' to '" << SuccBB->getName()
+ << "' with cost: " << JumpThreadCost
+ << ", across block:\n " << *BB << "\n");
if (DDT->pending())
LVI->disableDT();
@@ -2235,17 +2237,17 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred(
// cause us to transform this into an irreducible loop, don't do this.
// See the comments above FindLoopHeaders for justifications and caveats.
if (LoopHeaders.count(BB)) {
- DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName()
- << "' into predecessor block '" << PredBBs[0]->getName()
- << "' - it might create an irreducible loop!\n");
+ LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName()
+ << "' into predecessor block '" << PredBBs[0]->getName()
+ << "' - it might create an irreducible loop!\n");
return false;
}
unsigned DuplicationCost =
getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold);
if (DuplicationCost > BBDupThreshold) {
- DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()
- << "' - Cost is too high: " << DuplicationCost << "\n");
+ LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()
+ << "' - Cost is too high: " << DuplicationCost << "\n");
return false;
}
@@ -2255,17 +2257,18 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred(
if (PredBBs.size() == 1)
PredBB = PredBBs[0];
else {
- DEBUG(dbgs() << " Factoring out " << PredBBs.size()
- << " common predecessors.\n");
+ LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()
+ << " common predecessors.\n");
PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm");
}
Updates.push_back({DominatorTree::Delete, PredBB, BB});
// Okay, we decided to do this! Clone all the instructions in BB onto the end
// of PredBB.
- DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '"
- << PredBB->getName() << "' to eliminate branch on phi. Cost: "
- << DuplicationCost << " block is:" << *BB << "\n");
+ LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName()
+ << "' into end of '" << PredBB->getName()
+ << "' to eliminate branch on phi. Cost: "
+ << DuplicationCost << " block is:" << *BB << "\n");
// Unless PredBB ends with an unconditional branch, split the edge so that we
// can just clone the bits from BB into the end of the new PredBB.
@@ -2357,7 +2360,7 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred(
if (UsesToRename.empty())
continue;
- DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n");
// We found a use of I outside of BB. Rename all uses of I that are outside
// its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
@@ -2368,7 +2371,7 @@ bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred(
while (!UsesToRename.empty())
SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
}
// PredBB no longer jumps to BB, remove entries in the PHI node for the edge
@@ -2658,8 +2661,8 @@ bool JumpThreadingPass::ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard,
BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween(
BB, PredUnguardedBlock, Guard, UnguardedMapping);
assert(UnguardedBlock && "Could not create the unguarded block?");
- DEBUG(dbgs() << "Moved guard " << *Guard << " to block "
- << GuardedBlock->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block "
+ << GuardedBlock->getName() << "\n");
// DuplicateInstructionsInSplitBetween inserts a new block "BB.split" between
// PredBB and BB. We need to perform two inserts and one delete for each of
// the above calls to update Dominators.
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index b9d10d9f4c86..8c0747782d7d 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -392,7 +392,7 @@ bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
// If the instruction is dead, we would try to sink it because it isn't
// used in the loop, instead, just delete it.
if (isInstructionTriviallyDead(&I, TLI)) {
- DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
+ LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
salvageDebugInfo(I);
++II;
CurAST->deleteValue(&I);
@@ -461,7 +461,8 @@ bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
// just fold it.
if (Constant *C = ConstantFoldInstruction(
&I, I.getModule()->getDataLayout(), TLI)) {
- DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
+ LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C
+ << '\n');
CurAST->copyValue(&I, C);
I.replaceAllUsesWith(C);
if (isInstructionTriviallyDead(&I, TLI)) {
@@ -927,7 +928,7 @@ static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
const Loop *CurLoop, LoopSafetyInfo *SafetyInfo,
OptimizationRemarkEmitter *ORE, bool FreeInLoop) {
- DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
<< "sinking " << ore::NV("Inst", &I);
@@ -1029,8 +1030,8 @@ static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
const LoopSafetyInfo *SafetyInfo,
OptimizationRemarkEmitter *ORE) {
auto *Preheader = CurLoop->getLoopPreheader();
- DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I
- << "\n");
+ LLVM_DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I
+ << "\n");
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
<< ore::NV("Inst", &I);
@@ -1410,8 +1411,8 @@ bool llvm::promoteLoopAccessesToScalars(
return false;
// Otherwise, this is safe to promote, lets do it!
- DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
- << '\n');
+ LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
+ << '\n');
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
LoopUses[0])
diff --git a/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index c804115f415d..3b41b5d96c86 100644
--- a/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -244,9 +244,9 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) {
if (ItersAhead > getMaxPrefetchIterationsAhead())
return MadeChange;
- DEBUG(dbgs() << "Prefetching " << ItersAhead
- << " iterations ahead (loop size: " << LoopSize << ") in "
- << L->getHeader()->getParent()->getName() << ": " << *L);
+ LLVM_DEBUG(dbgs() << "Prefetching " << ItersAhead
+ << " iterations ahead (loop size: " << LoopSize << ") in "
+ << L->getHeader()->getParent()->getName() << ": " << *L);
SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>, 16> PrefLoads;
for (const auto BB : L->blocks()) {
@@ -320,8 +320,8 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) {
ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1),
ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)});
++NumPrefetches;
- DEBUG(dbgs() << " Access: " << *PtrValue << ", SCEV: " << *LSCEV
- << "\n");
+ LLVM_DEBUG(dbgs() << " Access: " << *PtrValue << ", SCEV: " << *LSCEV
+ << "\n");
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "Prefetched", MemI)
<< "prefetched memory access";
diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp
index 15cd1086f209..d412025d7e94 100644
--- a/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -142,14 +142,15 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT,
// of trouble.
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader || !L->hasDedicatedExits()) {
- DEBUG(dbgs()
- << "Deletion requires Loop with preheader and dedicated exits.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Deletion requires Loop with preheader and dedicated exits.\n");
return LoopDeletionResult::Unmodified;
}
// We can't remove loops that contain subloops. If the subloops were dead,
// they would already have been removed in earlier executions of this pass.
if (L->begin() != L->end()) {
- DEBUG(dbgs() << "Loop contains subloops.\n");
+ LLVM_DEBUG(dbgs() << "Loop contains subloops.\n");
return LoopDeletionResult::Unmodified;
}
@@ -157,7 +158,7 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT,
BasicBlock *ExitBlock = L->getUniqueExitBlock();
if (ExitBlock && isLoopNeverExecuted(L)) {
- DEBUG(dbgs() << "Loop is proven to never execute, delete it!");
+ LLVM_DEBUG(dbgs() << "Loop is proven to never execute, delete it!");
// Set incoming value to undef for phi nodes in the exit block.
for (PHINode &P : ExitBlock->phis()) {
std::fill(P.incoming_values().begin(), P.incoming_values().end(),
@@ -178,13 +179,13 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT,
// block will be branched to, or trying to preserve the branching logic in
// a loop invariant manner.
if (!ExitBlock) {
- DEBUG(dbgs() << "Deletion requires single exit block\n");
+ LLVM_DEBUG(dbgs() << "Deletion requires single exit block\n");
return LoopDeletionResult::Unmodified;
}
// Finally, we have to check that the loop really is dead.
bool Changed = false;
if (!isLoopDead(L, SE, ExitingBlocks, ExitBlock, Changed, Preheader)) {
- DEBUG(dbgs() << "Loop is not invariant, cannot delete.\n");
+ LLVM_DEBUG(dbgs() << "Loop is not invariant, cannot delete.\n");
return Changed ? LoopDeletionResult::Modified
: LoopDeletionResult::Unmodified;
}
@@ -193,12 +194,12 @@ static LoopDeletionResult deleteLoopIfDead(Loop *L, DominatorTree &DT,
// They could be infinite, in which case we'd be changing program behavior.
const SCEV *S = SE.getMaxBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(S)) {
- DEBUG(dbgs() << "Could not compute SCEV MaxBackedgeTakenCount.\n");
+ LLVM_DEBUG(dbgs() << "Could not compute SCEV MaxBackedgeTakenCount.\n");
return Changed ? LoopDeletionResult::Modified
: LoopDeletionResult::Unmodified;
}
- DEBUG(dbgs() << "Loop is invariant, delete it!");
+ LLVM_DEBUG(dbgs() << "Loop is invariant, delete it!");
deleteDeadLoop(L, &DT, &SE, &LI);
++NumDeleted;
@@ -209,8 +210,8 @@ PreservedAnalyses LoopDeletionPass::run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &Updater) {
- DEBUG(dbgs() << "Analyzing Loop for deletion: ");
- DEBUG(L.dump());
+ LLVM_DEBUG(dbgs() << "Analyzing Loop for deletion: ");
+ LLVM_DEBUG(L.dump());
std::string LoopName = L.getName();
auto Result = deleteLoopIfDead(&L, AR.DT, AR.SE, AR.LI);
if (Result == LoopDeletionResult::Unmodified)
@@ -255,8 +256,8 @@ bool LoopDeletionLegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) {
ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- DEBUG(dbgs() << "Analyzing Loop for deletion: ");
- DEBUG(L->dump());
+ LLVM_DEBUG(dbgs() << "Analyzing Loop for deletion: ");
+ LLVM_DEBUG(L->dump());
LoopDeletionResult Result = deleteLoopIfDead(L, DT, SE, LI);
diff --git a/lib/Transforms/Scalar/LoopDistribute.cpp b/lib/Transforms/Scalar/LoopDistribute.cpp
index a4da0940e332..06083a4f5086 100644
--- a/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -362,9 +362,11 @@ public:
std::tie(LoadToPart, NewElt) =
LoadToPartition.insert(std::make_pair(Inst, PartI));
if (!NewElt) {
- DEBUG(dbgs() << "Merging partitions due to this load in multiple "
- << "partitions: " << PartI << ", "
- << LoadToPart->second << "\n" << *Inst << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Merging partitions due to this load in multiple "
+ << "partitions: " << PartI << ", " << LoadToPart->second
+ << "\n"
+ << *Inst << "\n");
auto PartJ = I;
do {
@@ -602,7 +604,7 @@ public:
const SmallVectorImpl<Dependence> &Dependences) {
Accesses.append(Instructions.begin(), Instructions.end());
- DEBUG(dbgs() << "Backward dependences:\n");
+ LLVM_DEBUG(dbgs() << "Backward dependences:\n");
for (auto &Dep : Dependences)
if (Dep.isPossiblyBackward()) {
// Note that the designations source and destination follow the program
@@ -611,7 +613,7 @@ public:
++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd;
--Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd;
- DEBUG(Dep.print(dbgs(), 2, Instructions));
+ LLVM_DEBUG(Dep.print(dbgs(), 2, Instructions));
}
}
@@ -632,8 +634,9 @@ public:
bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
assert(L->empty() && "Only process inner loops.");
- DEBUG(dbgs() << "\nLDist: In \"" << L->getHeader()->getParent()->getName()
- << "\" checking " << *L << "\n");
+ LLVM_DEBUG(dbgs() << "\nLDist: In \""
+ << L->getHeader()->getParent()->getName()
+ << "\" checking " << *L << "\n");
if (!L->getExitBlock())
return fail("MultipleExitBlocks", "multiple exit blocks");
@@ -705,7 +708,7 @@ public:
for (auto *Inst : DefsUsedOutside)
Partitions.addToNewNonCyclicPartition(Inst);
- DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
+ LLVM_DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
if (Partitions.getSize() < 2)
return fail("CantIsolateUnsafeDeps",
"cannot isolate unsafe dependencies");
@@ -713,20 +716,20 @@ public:
// Run the merge heuristics: Merge non-cyclic adjacent partitions since we
// should be able to vectorize these together.
Partitions.mergeBeforePopulating();
- DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
+ LLVM_DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
if (Partitions.getSize() < 2)
return fail("CantIsolateUnsafeDeps",
"cannot isolate unsafe dependencies");
// Now, populate the partitions with non-memory operations.
Partitions.populateUsedSet();
- DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
+ LLVM_DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
// In order to preserve original lexical order for loads, keep them in the
// partition that we set up in the MemoryInstructionDependences loop.
if (Partitions.mergeToAvoidDuplicatedLoads()) {
- DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
- << Partitions);
+ LLVM_DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
+ << Partitions);
if (Partitions.getSize() < 2)
return fail("CantIsolateUnsafeDeps",
"cannot isolate unsafe dependencies");
@@ -740,7 +743,7 @@ public:
return fail("TooManySCEVRuntimeChecks",
"too many SCEV run-time checks needed.\n");
- DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
+ LLVM_DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
// We're done forming the partitions set up the reverse mapping from
// instructions to partitions.
Partitions.setupPartitionIdOnInstructions();
@@ -759,8 +762,8 @@ public:
RtPtrChecking);
if (!Pred.isAlwaysTrue() || !Checks.empty()) {
- DEBUG(dbgs() << "\nPointers:\n");
- DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
+ LLVM_DEBUG(dbgs() << "\nPointers:\n");
+ LLVM_DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
LoopVersioning LVer(*LAI, L, LI, DT, SE, false);
LVer.setAliasChecks(std::move(Checks));
LVer.setSCEVChecks(LAI->getPSE().getUnionPredicate());
@@ -775,8 +778,8 @@ public:
// Now, we remove the instruction from each loop that don't belong to that
// partition.
Partitions.removeUnusedInsts();
- DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
- DEBUG(Partitions.printBlocks());
+ LLVM_DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
+ LLVM_DEBUG(Partitions.printBlocks());
if (LDistVerify) {
LI->verify(*DT);
@@ -798,7 +801,7 @@ public:
LLVMContext &Ctx = F->getContext();
bool Forced = isForced().getValueOr(false);
- DEBUG(dbgs() << "Skipping; " << Message << "\n");
+ LLVM_DEBUG(dbgs() << "Skipping; " << Message << "\n");
// With Rpass-missed report that distribution failed.
ORE->emit([&]() {
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 5bfb44bc151f..abd6a81da1e2 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -310,9 +310,9 @@ bool LoopIdiomRecognize::runOnCountableLoop() {
SmallVector<BasicBlock *, 8> ExitBlocks;
CurLoop->getUniqueExitBlocks(ExitBlocks);
- DEBUG(dbgs() << "loop-idiom Scanning: F["
- << CurLoop->getHeader()->getParent()->getName() << "] Loop %"
- << CurLoop->getHeader()->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "loop-idiom Scanning: F["
+ << CurLoop->getHeader()->getParent()->getName()
+ << "] Loop %" << CurLoop->getHeader()->getName() << "\n");
bool MadeChange = false;
@@ -936,8 +936,9 @@ bool LoopIdiomRecognize::processLoopStridedStore(
NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
}
- DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
- << " from store to: " << *Ev << " at: " << *TheStore << "\n");
+ LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
+ << " from store to: " << *Ev << " at: " << *TheStore
+ << "\n");
NewCall->setDebugLoc(TheStore->getDebugLoc());
// Okay, the memset has been formed. Zap the original store and anything that
@@ -1067,9 +1068,10 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
}
NewCall->setDebugLoc(SI->getDebugLoc());
- DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
- << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
- << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
+ LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
+ << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
+ << " from store ptr=" << *StoreEv << " at: " << *SI
+ << "\n");
// Okay, the memcpy has been formed. Zap the original store and anything that
// feeds into it.
@@ -1085,9 +1087,9 @@ bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
bool IsLoopMemset) {
if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
- DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
- << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
- << " avoided: multi-block top-level loop\n");
+ LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
+ << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
+ << " avoided: multi-block top-level loop\n");
return true;
}
}
diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp
index 74c8a5a543e6..9addb04e81bc 100644
--- a/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -77,8 +77,8 @@ static const unsigned MaxLoopNestDepth = 10;
static void printDepMatrix(CharMatrix &DepMatrix) {
for (auto &Row : DepMatrix) {
for (auto D : Row)
- DEBUG(dbgs() << D << " ");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << D << " ");
+ LLVM_DEBUG(dbgs() << "\n");
}
}
#endif
@@ -107,8 +107,8 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level,
}
}
- DEBUG(dbgs() << "Found " << MemInstr.size()
- << " Loads and Stores to analyze\n");
+ LLVM_DEBUG(dbgs() << "Found " << MemInstr.size()
+ << " Loads and Stores to analyze\n");
ValueVector::iterator I, IE, J, JE;
@@ -125,11 +125,11 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level,
// Track Output, Flow, and Anti dependencies.
if (auto D = DI->depends(Src, Dst, true)) {
assert(D->isOrdered() && "Expected an output, flow or anti dep.");
- DEBUG(StringRef DepType =
- D->isFlow() ? "flow" : D->isAnti() ? "anti" : "output";
- dbgs() << "Found " << DepType
- << " dependency between Src and Dst\n"
- << " Src:" << *Src << "\n Dst:" << *Dst << '\n');
+ LLVM_DEBUG(StringRef DepType =
+ D->isFlow() ? "flow" : D->isAnti() ? "anti" : "output";
+ dbgs() << "Found " << DepType
+ << " dependency between Src and Dst\n"
+ << " Src:" << *Src << "\n Dst:" << *Dst << '\n');
unsigned Levels = D->getLevels();
char Direction;
for (unsigned II = 1; II <= Levels; ++II) {
@@ -169,8 +169,8 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level,
DepMatrix.push_back(Dep);
if (DepMatrix.size() > MaxMemInstrCount) {
- DEBUG(dbgs() << "Cannot handle more than " << MaxMemInstrCount
- << " dependencies inside loop\n");
+ LLVM_DEBUG(dbgs() << "Cannot handle more than " << MaxMemInstrCount
+ << " dependencies inside loop\n");
return false;
}
}
@@ -272,9 +272,9 @@ static bool isLegalToInterChangeLoops(CharMatrix &DepMatrix,
}
static void populateWorklist(Loop &L, SmallVector<LoopVector, 8> &V) {
- DEBUG(dbgs() << "Calling populateWorklist on Func: "
- << L.getHeader()->getParent()->getName() << " Loop: %"
- << L.getHeader()->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Calling populateWorklist on Func: "
+ << L.getHeader()->getParent()->getName() << " Loop: %"
+ << L.getHeader()->getName() << '\n');
LoopVector LoopList;
Loop *CurrentLoop = &L;
const std::vector<Loop *> *Vec = &CurrentLoop->getSubLoops();
@@ -478,7 +478,7 @@ struct LoopInterchange : public FunctionPass {
for (Loop *L : *LI)
populateWorklist(*L, Worklist);
- DEBUG(dbgs() << "Worklist size = " << Worklist.size() << "\n");
+ LLVM_DEBUG(dbgs() << "Worklist size = " << Worklist.size() << "\n");
bool Changed = true;
while (!Worklist.empty()) {
LoopVector LoopList = Worklist.pop_back_val();
@@ -491,15 +491,15 @@ struct LoopInterchange : public FunctionPass {
for (Loop *L : LoopList) {
const SCEV *ExitCountOuter = SE->getBackedgeTakenCount(L);
if (ExitCountOuter == SE->getCouldNotCompute()) {
- DEBUG(dbgs() << "Couldn't compute backedge count\n");
+ LLVM_DEBUG(dbgs() << "Couldn't compute backedge count\n");
return false;
}
if (L->getNumBackEdges() != 1) {
- DEBUG(dbgs() << "NumBackEdges is not equal to 1\n");
+ LLVM_DEBUG(dbgs() << "NumBackEdges is not equal to 1\n");
return false;
}
if (!L->getExitingBlock()) {
- DEBUG(dbgs() << "Loop doesn't have unique exit block\n");
+ LLVM_DEBUG(dbgs() << "Loop doesn't have unique exit block\n");
return false;
}
}
@@ -516,37 +516,38 @@ struct LoopInterchange : public FunctionPass {
bool Changed = false;
unsigned LoopNestDepth = LoopList.size();
if (LoopNestDepth < 2) {
- DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n");
+ LLVM_DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n");
return false;
}
if (LoopNestDepth > MaxLoopNestDepth) {
- DEBUG(dbgs() << "Cannot handle loops of depth greater than "
- << MaxLoopNestDepth << "\n");
+ LLVM_DEBUG(dbgs() << "Cannot handle loops of depth greater than "
+ << MaxLoopNestDepth << "\n");
return false;
}
if (!isComputableLoopNest(LoopList)) {
- DEBUG(dbgs() << "Not valid loop candidate for interchange\n");
+ LLVM_DEBUG(dbgs() << "Not valid loop candidate for interchange\n");
return false;
}
- DEBUG(dbgs() << "Processing LoopList of size = " << LoopNestDepth << "\n");
+ LLVM_DEBUG(dbgs() << "Processing LoopList of size = " << LoopNestDepth
+ << "\n");
CharMatrix DependencyMatrix;
Loop *OuterMostLoop = *(LoopList.begin());
if (!populateDependencyMatrix(DependencyMatrix, LoopNestDepth,
OuterMostLoop, DI)) {
- DEBUG(dbgs() << "Populating dependency matrix failed\n");
+ LLVM_DEBUG(dbgs() << "Populating dependency matrix failed\n");
return false;
}
#ifdef DUMP_DEP_MATRICIES
- DEBUG(dbgs() << "Dependence before interchange\n");
+ LLVM_DEBUG(dbgs() << "Dependence before interchange\n");
printDepMatrix(DependencyMatrix);
#endif
// Get the Outermost loop exit.
BasicBlock *LoopNestExit = OuterMostLoop->getExitBlock();
if (!LoopNestExit) {
- DEBUG(dbgs() << "OuterMostLoop needs an unique exit block");
+ LLVM_DEBUG(dbgs() << "OuterMostLoop needs an unique exit block");
return false;
}
@@ -563,7 +564,7 @@ struct LoopInterchange : public FunctionPass {
// Update the DependencyMatrix
interChangeDependencies(DependencyMatrix, i, i - 1);
#ifdef DUMP_DEP_MATRICIES
- DEBUG(dbgs() << "Dependence after interchange\n");
+ LLVM_DEBUG(dbgs() << "Dependence after interchange\n");
printDepMatrix(DependencyMatrix);
#endif
Changed |= Interchanged;
@@ -574,21 +575,21 @@ struct LoopInterchange : public FunctionPass {
bool processLoop(LoopVector LoopList, unsigned InnerLoopId,
unsigned OuterLoopId, BasicBlock *LoopNestExit,
std::vector<std::vector<char>> &DependencyMatrix) {
- DEBUG(dbgs() << "Processing Inner Loop Id = " << InnerLoopId
- << " and OuterLoopId = " << OuterLoopId << "\n");
+ LLVM_DEBUG(dbgs() << "Processing Inner Loop Id = " << InnerLoopId
+ << " and OuterLoopId = " << OuterLoopId << "\n");
Loop *InnerLoop = LoopList[InnerLoopId];
Loop *OuterLoop = LoopList[OuterLoopId];
LoopInterchangeLegality LIL(OuterLoop, InnerLoop, SE, LI, DT,
PreserveLCSSA, ORE);
if (!LIL.canInterchangeLoops(InnerLoopId, OuterLoopId, DependencyMatrix)) {
- DEBUG(dbgs() << "Not interchanging loops. Cannot prove legality.\n");
+ LLVM_DEBUG(dbgs() << "Not interchanging loops. Cannot prove legality.\n");
return false;
}
- DEBUG(dbgs() << "Loops are legal to interchange\n");
+ LLVM_DEBUG(dbgs() << "Loops are legal to interchange\n");
LoopInterchangeProfitability LIP(OuterLoop, InnerLoop, SE, ORE);
if (!LIP.isProfitable(InnerLoopId, OuterLoopId, DependencyMatrix)) {
- DEBUG(dbgs() << "Interchanging loops not profitable.\n");
+ LLVM_DEBUG(dbgs() << "Interchanging loops not profitable.\n");
return false;
}
@@ -602,7 +603,7 @@ struct LoopInterchange : public FunctionPass {
LoopInterchangeTransform LIT(OuterLoop, InnerLoop, SE, LI, DT,
LoopNestExit, LIL.hasInnerLoopReduction());
LIT.transform();
- DEBUG(dbgs() << "Loops interchanged.\n");
+ LLVM_DEBUG(dbgs() << "Loops interchanged.\n");
LoopsInterchanged++;
return true;
}
@@ -651,7 +652,7 @@ bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) {
BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch();
- DEBUG(dbgs() << "Checking if loops are tightly nested\n");
+ LLVM_DEBUG(dbgs() << "Checking if loops are tightly nested\n");
// A perfectly nested loop will not have any branch in between the outer and
// inner block i.e. outer header will branch to either inner preheader and
@@ -665,14 +666,14 @@ bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) {
if (Succ != InnerLoopPreHeader && Succ != OuterLoopLatch)
return false;
- DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch\n");
+ LLVM_DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch\n");
// We do not have any basic block in between now make sure the outer header
// and outer loop latch doesn't contain any unsafe instructions.
if (containsUnsafeInstructionsInHeader(OuterLoopHeader) ||
containsUnsafeInstructionsInLatch(OuterLoopLatch))
return false;
- DEBUG(dbgs() << "Loops are perfectly nested\n");
+ LLVM_DEBUG(dbgs() << "Loops are perfectly nested\n");
// We have a perfect loop nest.
return true;
}
@@ -714,7 +715,7 @@ bool LoopInterchangeLegality::findInductionAndReductions(
else if (RecurrenceDescriptor::isReductionPHI(&PHI, L, RD))
Reductions.push_back(&PHI);
else {
- DEBUG(
+ LLVM_DEBUG(
dbgs() << "Failed to recognize PHI as an induction or reduction.\n");
return false;
}
@@ -750,8 +751,9 @@ bool LoopInterchangeLegality::currentLimitations() {
OuterLoop->getExitingBlock() != OuterLoop->getLoopLatch() ||
!isa<BranchInst>(InnerLoopLatch->getTerminator()) ||
!isa<BranchInst>(OuterLoop->getLoopLatch()->getTerminator())) {
- DEBUG(dbgs() << "Loops where the latch is not the exiting block are not"
- << " supported currently.\n");
+ LLVM_DEBUG(
+ dbgs() << "Loops where the latch is not the exiting block are not"
+ << " supported currently.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "ExitingNotLatch",
OuterLoop->getStartLoc(),
@@ -766,8 +768,9 @@ bool LoopInterchangeLegality::currentLimitations() {
SmallVector<PHINode *, 8> Inductions;
SmallVector<PHINode *, 8> Reductions;
if (!findInductionAndReductions(InnerLoop, Inductions, Reductions)) {
- DEBUG(dbgs() << "Only inner loops with induction or reduction PHI nodes "
- << "are supported currently.\n");
+ LLVM_DEBUG(
+ dbgs() << "Only inner loops with induction or reduction PHI nodes "
+ << "are supported currently.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedPHIInner",
InnerLoop->getStartLoc(),
@@ -780,8 +783,9 @@ bool LoopInterchangeLegality::currentLimitations() {
// TODO: Currently we handle only loops with 1 induction variable.
if (Inductions.size() != 1) {
- DEBUG(dbgs() << "We currently only support loops with 1 induction variable."
- << "Failed to interchange due to current limitation\n");
+ LLVM_DEBUG(
+ dbgs() << "We currently only support loops with 1 induction variable."
+ << "Failed to interchange due to current limitation\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "MultiInductionInner",
InnerLoop->getStartLoc(),
@@ -797,8 +801,9 @@ bool LoopInterchangeLegality::currentLimitations() {
InnerInductionVar = Inductions.pop_back_val();
Reductions.clear();
if (!findInductionAndReductions(OuterLoop, Inductions, Reductions)) {
- DEBUG(dbgs() << "Only outer loops with induction or reduction PHI nodes "
- << "are supported currently.\n");
+ LLVM_DEBUG(
+ dbgs() << "Only outer loops with induction or reduction PHI nodes "
+ << "are supported currently.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedPHIOuter",
OuterLoop->getStartLoc(),
@@ -812,8 +817,8 @@ bool LoopInterchangeLegality::currentLimitations() {
// Outer loop cannot have reduction because then loops will not be tightly
// nested.
if (!Reductions.empty()) {
- DEBUG(dbgs() << "Outer loops with reductions are not supported "
- << "currently.\n");
+ LLVM_DEBUG(dbgs() << "Outer loops with reductions are not supported "
+ << "currently.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "ReductionsOuter",
OuterLoop->getStartLoc(),
@@ -825,8 +830,8 @@ bool LoopInterchangeLegality::currentLimitations() {
}
// TODO: Currently we handle only loops with 1 induction variable.
if (Inductions.size() != 1) {
- DEBUG(dbgs() << "Loops with more than 1 induction variables are not "
- << "supported currently.\n");
+ LLVM_DEBUG(dbgs() << "Loops with more than 1 induction variables are not "
+ << "supported currently.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "MultiIndutionOuter",
OuterLoop->getStartLoc(),
@@ -839,7 +844,7 @@ bool LoopInterchangeLegality::currentLimitations() {
// TODO: Triangular loops are not handled for now.
if (!isLoopStructureUnderstood(InnerInductionVar)) {
- DEBUG(dbgs() << "Loop structure not understood by pass\n");
+ LLVM_DEBUG(dbgs() << "Loop structure not understood by pass\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedStructureInner",
InnerLoop->getStartLoc(),
@@ -852,7 +857,8 @@ bool LoopInterchangeLegality::currentLimitations() {
// TODO: We only handle LCSSA PHI's corresponding to reduction for now.
BasicBlock *InnerExit = InnerLoop->getExitBlock();
if (!containsSafePHI(InnerExit, false)) {
- DEBUG(dbgs() << "Can only handle LCSSA PHIs in inner loops currently.\n");
+ LLVM_DEBUG(
+ dbgs() << "Can only handle LCSSA PHIs in inner loops currently.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "NoLCSSAPHIOuterInner",
InnerLoop->getStartLoc(),
@@ -882,8 +888,9 @@ bool LoopInterchangeLegality::currentLimitations() {
dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(0));
if (!InnerIndexVarInc) {
- DEBUG(dbgs() << "Did not find an instruction to increment the induction "
- << "variable.\n");
+ LLVM_DEBUG(
+ dbgs() << "Did not find an instruction to increment the induction "
+ << "variable.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "NoIncrementInInner",
InnerLoop->getStartLoc(),
@@ -907,8 +914,8 @@ bool LoopInterchangeLegality::currentLimitations() {
// We found an instruction. If this is not induction variable then it is not
// safe to split this loop latch.
if (!I.isIdenticalTo(InnerIndexVarInc)) {
- DEBUG(dbgs() << "Found unsupported instructions between induction "
- << "variable increment and branch.\n");
+ LLVM_DEBUG(dbgs() << "Found unsupported instructions between induction "
+ << "variable increment and branch.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(
DEBUG_TYPE, "UnsupportedInsBetweenInduction",
@@ -925,7 +932,7 @@ bool LoopInterchangeLegality::currentLimitations() {
// The loop latch ended and we didn't find the induction variable return as
// current limitation.
if (!FoundInduction) {
- DEBUG(dbgs() << "Did not find the induction variable.\n");
+ LLVM_DEBUG(dbgs() << "Did not find the induction variable.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "NoIndutionVariable",
InnerLoop->getStartLoc(),
@@ -978,9 +985,9 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId,
unsigned OuterLoopId,
CharMatrix &DepMatrix) {
if (!isLegalToInterChangeLoops(DepMatrix, InnerLoopId, OuterLoopId)) {
- DEBUG(dbgs() << "Failed interchange InnerLoopId = " << InnerLoopId
- << " and OuterLoopId = " << OuterLoopId
- << " due to dependence\n");
+ LLVM_DEBUG(dbgs() << "Failed interchange InnerLoopId = " << InnerLoopId
+ << " and OuterLoopId = " << OuterLoopId
+ << " due to dependence\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "Dependence",
InnerLoop->getStartLoc(),
@@ -996,8 +1003,9 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId,
// readnone functions do not prevent interchanging.
if (CI->doesNotReadMemory())
continue;
- DEBUG(dbgs() << "Loops with call instructions cannot be interchanged "
- << "safely.");
+ LLVM_DEBUG(
+ dbgs() << "Loops with call instructions cannot be interchanged "
+ << "safely.");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "CallInst",
CI->getDebugLoc(),
@@ -1033,13 +1041,13 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId,
// TODO: The loops could not be interchanged due to current limitations in the
// transform module.
if (currentLimitations()) {
- DEBUG(dbgs() << "Not legal because of current transform limitation\n");
+ LLVM_DEBUG(dbgs() << "Not legal because of current transform limitation\n");
return false;
}
// Check if the loops are tightly nested.
if (!tightlyNested(OuterLoop, InnerLoop)) {
- DEBUG(dbgs() << "Loops not tightly nested\n");
+ LLVM_DEBUG(dbgs() << "Loops not tightly nested\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "NotTightlyNested",
InnerLoop->getStartLoc(),
@@ -1051,7 +1059,7 @@ bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId,
}
if (!areLoopExitPHIsSupported(OuterLoop, InnerLoop)) {
- DEBUG(dbgs() << "Found unsupported PHI nodes in outer loop exit.\n");
+ LLVM_DEBUG(dbgs() << "Found unsupported PHI nodes in outer loop exit.\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedExitPHI",
OuterLoop->getStartLoc(),
@@ -1145,7 +1153,7 @@ bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId,
// of induction variables in the instruction and allows reordering if number
// of bad orders is more than good.
int Cost = getInstrOrderCost();
- DEBUG(dbgs() << "Cost = " << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "Cost = " << Cost << "\n");
if (Cost < -LoopInterchangeCostThreshold)
return true;
@@ -1258,10 +1266,10 @@ bool LoopInterchangeTransform::transform() {
if (InnerLoop->getSubLoops().empty()) {
BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader();
- DEBUG(dbgs() << "Calling Split Inner Loop\n");
+ LLVM_DEBUG(dbgs() << "Calling Split Inner Loop\n");
PHINode *InductionPHI = getInductionVariable(InnerLoop, SE);
if (!InductionPHI) {
- DEBUG(dbgs() << "Failed to find the point to split loop latch \n");
+ LLVM_DEBUG(dbgs() << "Failed to find the point to split loop latch \n");
return false;
}
@@ -1279,16 +1287,16 @@ bool LoopInterchangeTransform::transform() {
// incremented/decremented.
// TODO: This splitting logic may not work always. Fix this.
splitInnerLoopLatch(InnerIndexVar);
- DEBUG(dbgs() << "splitInnerLoopLatch done\n");
+ LLVM_DEBUG(dbgs() << "splitInnerLoopLatch done\n");
// Splits the inner loops phi nodes out into a separate basic block.
splitInnerLoopHeader();
- DEBUG(dbgs() << "splitInnerLoopHeader done\n");
+ LLVM_DEBUG(dbgs() << "splitInnerLoopHeader done\n");
}
Transformed |= adjustLoopLinks();
if (!Transformed) {
- DEBUG(dbgs() << "adjustLoopLinks failed\n");
+ LLVM_DEBUG(dbgs() << "adjustLoopLinks failed\n");
return false;
}
@@ -1322,8 +1330,8 @@ void LoopInterchangeTransform::splitInnerLoopHeader() {
}
}
- DEBUG(dbgs() << "Output of splitInnerLoopHeader InnerLoopHeaderSucc & "
- "InnerLoopHeader\n");
+ LLVM_DEBUG(dbgs() << "Output of splitInnerLoopHeader InnerLoopHeaderSucc & "
+ "InnerLoopHeader\n");
}
/// Move all instructions except the terminator from FromBB right before
@@ -1370,7 +1378,7 @@ static void updateSuccessor(BranchInst *BI, BasicBlock *OldBB,
}
bool LoopInterchangeTransform::adjustLoopBranches() {
- DEBUG(dbgs() << "adjustLoopBranches called\n");
+ LLVM_DEBUG(dbgs() << "adjustLoopBranches called\n");
std::vector<DominatorTree::UpdateType> DTUpdates;
// Adjust the loop preheader
diff --git a/lib/Transforms/Scalar/LoopLoadElimination.cpp b/lib/Transforms/Scalar/LoopLoadElimination.cpp
index a7c27662aa06..7f882191d1f7 100644
--- a/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -285,9 +285,11 @@ public:
Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) {
if (LoadToSingleCand[Cand.Load] != &Cand) {
- DEBUG(dbgs() << "Removing from candidates: \n" << Cand
- << " The load may have multiple stores forwarding to "
- << "it\n");
+ LLVM_DEBUG(
+ dbgs() << "Removing from candidates: \n"
+ << Cand
+ << " The load may have multiple stores forwarding to "
+ << "it\n");
return true;
}
return false;
@@ -395,8 +397,9 @@ public:
return false;
});
- DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size() << "):\n");
- DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks));
+ LLVM_DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size()
+ << "):\n");
+ LLVM_DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks));
return Checks;
}
@@ -440,8 +443,8 @@ public:
/// Top-level driver for each loop: find store->load forwarding
/// candidates, add run-time checks and perform transformation.
bool processLoop() {
- DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
- << "\" checking " << *L << "\n");
+ LLVM_DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
+ << "\" checking " << *L << "\n");
// Look for store-to-load forwarding cases across the
// backedge. E.g.:
@@ -480,7 +483,7 @@ public:
SmallVector<StoreToLoadForwardingCandidate, 4> Candidates;
unsigned NumForwarding = 0;
for (const StoreToLoadForwardingCandidate Cand : StoreToLoadDependences) {
- DEBUG(dbgs() << "Candidate " << Cand);
+ LLVM_DEBUG(dbgs() << "Candidate " << Cand);
// Make sure that the stored values is available everywhere in the loop in
// the next iteration.
@@ -499,9 +502,10 @@ public:
continue;
++NumForwarding;
- DEBUG(dbgs()
- << NumForwarding
- << ". Valid store-to-load forwarding across the loop backedge\n");
+ LLVM_DEBUG(
+ dbgs()
+ << NumForwarding
+ << ". Valid store-to-load forwarding across the loop backedge\n");
Candidates.push_back(Cand);
}
if (Candidates.empty())
@@ -514,25 +518,26 @@ public:
// Too many checks are likely to outweigh the benefits of forwarding.
if (Checks.size() > Candidates.size() * CheckPerElim) {
- DEBUG(dbgs() << "Too many run-time checks needed.\n");
+ LLVM_DEBUG(dbgs() << "Too many run-time checks needed.\n");
return false;
}
if (LAI.getPSE().getUnionPredicate().getComplexity() >
LoadElimSCEVCheckThreshold) {
- DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
+ LLVM_DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
return false;
}
if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) {
if (L->getHeader()->getParent()->optForSize()) {
- DEBUG(dbgs() << "Versioning is needed but not allowed when optimizing "
- "for size.\n");
+ LLVM_DEBUG(
+ dbgs() << "Versioning is needed but not allowed when optimizing "
+ "for size.\n");
return false;
}
if (!L->isLoopSimplifyForm()) {
- DEBUG(dbgs() << "Loop is not is loop-simplify form");
+ LLVM_DEBUG(dbgs() << "Loop is not is loop-simplify form");
return false;
}
diff --git a/lib/Transforms/Scalar/LoopPredication.cpp b/lib/Transforms/Scalar/LoopPredication.cpp
index 6102890bc5ef..561ceea1d880 100644
--- a/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/lib/Transforms/Scalar/LoopPredication.cpp
@@ -411,11 +411,11 @@ LoopPredication::generateLoopLatchCheck(Type *RangeCheckType) {
if (!NewLatchCheck.IV)
return None;
NewLatchCheck.Limit = SE->getTruncateExpr(LatchCheck.Limit, RangeCheckType);
- DEBUG(dbgs() << "IV of type: " << *LatchType
- << "can be represented as range check type:" << *RangeCheckType
- << "\n");
- DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck.IV << "\n");
- DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck.Limit << "\n");
+ LLVM_DEBUG(dbgs() << "IV of type: " << *LatchType
+ << "can be represented as range check type:"
+ << *RangeCheckType << "\n");
+ LLVM_DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck.IV << "\n");
+ LLVM_DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck.Limit << "\n");
return NewLatchCheck;
}
@@ -448,15 +448,15 @@ Optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop(
SE->getMinusSCEV(LatchStart, SE->getOne(Ty)));
if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) ||
!CanExpand(LatchLimit) || !CanExpand(RHS)) {
- DEBUG(dbgs() << "Can't expand limit check!\n");
+ LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
return None;
}
auto LimitCheckPred =
ICmpInst::getFlippedStrictnessPredicate(LatchCheck.Pred);
- DEBUG(dbgs() << "LHS: " << *LatchLimit << "\n");
- DEBUG(dbgs() << "RHS: " << *RHS << "\n");
- DEBUG(dbgs() << "Pred: " << LimitCheckPred << "\n");
+ LLVM_DEBUG(dbgs() << "LHS: " << *LatchLimit << "\n");
+ LLVM_DEBUG(dbgs() << "RHS: " << *RHS << "\n");
+ LLVM_DEBUG(dbgs() << "Pred: " << LimitCheckPred << "\n");
Instruction *InsertAt = Preheader->getTerminator();
auto *LimitCheck =
@@ -475,16 +475,16 @@ Optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop(
const SCEV *LatchLimit = LatchCheck.Limit;
if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) ||
!CanExpand(LatchLimit)) {
- DEBUG(dbgs() << "Can't expand limit check!\n");
+ LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
return None;
}
// The decrement of the latch check IV should be the same as the
// rangeCheckIV.
auto *PostDecLatchCheckIV = LatchCheck.IV->getPostIncExpr(*SE);
if (RangeCheck.IV != PostDecLatchCheckIV) {
- DEBUG(dbgs() << "Not the same. PostDecLatchCheckIV: "
- << *PostDecLatchCheckIV
- << " and RangeCheckIV: " << *RangeCheck.IV << "\n");
+ LLVM_DEBUG(dbgs() << "Not the same. PostDecLatchCheckIV: "
+ << *PostDecLatchCheckIV
+ << " and RangeCheckIV: " << *RangeCheck.IV << "\n");
return None;
}
@@ -508,8 +508,8 @@ Optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop(
Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
SCEVExpander &Expander,
IRBuilder<> &Builder) {
- DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
- DEBUG(ICI->dump());
+ LLVM_DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
+ LLVM_DEBUG(ICI->dump());
// parseLoopStructure guarantees that the latch condition is:
// ++i <pred> latchLimit, where <pred> is u<, u<=, s<, or s<=.
@@ -517,34 +517,34 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
// i u< guardLimit
auto RangeCheck = parseLoopICmp(ICI);
if (!RangeCheck) {
- DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
+ LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
return None;
}
- DEBUG(dbgs() << "Guard check:\n");
- DEBUG(RangeCheck->dump());
+ LLVM_DEBUG(dbgs() << "Guard check:\n");
+ LLVM_DEBUG(RangeCheck->dump());
if (RangeCheck->Pred != ICmpInst::ICMP_ULT) {
- DEBUG(dbgs() << "Unsupported range check predicate(" << RangeCheck->Pred
- << ")!\n");
+ LLVM_DEBUG(dbgs() << "Unsupported range check predicate("
+ << RangeCheck->Pred << ")!\n");
return None;
}
auto *RangeCheckIV = RangeCheck->IV;
if (!RangeCheckIV->isAffine()) {
- DEBUG(dbgs() << "Range check IV is not affine!\n");
+ LLVM_DEBUG(dbgs() << "Range check IV is not affine!\n");
return None;
}
auto *Step = RangeCheckIV->getStepRecurrence(*SE);
// We cannot just compare with latch IV step because the latch and range IVs
// may have different types.
if (!isSupportedStep(Step)) {
- DEBUG(dbgs() << "Range check and latch have IVs different steps!\n");
+ LLVM_DEBUG(dbgs() << "Range check and latch have IVs different steps!\n");
return None;
}
auto *Ty = RangeCheckIV->getType();
auto CurrLatchCheckOpt = generateLoopLatchCheck(Ty);
if (!CurrLatchCheckOpt) {
- DEBUG(dbgs() << "Failed to generate a loop latch check "
- "corresponding to range type: "
- << *Ty << "\n");
+ LLVM_DEBUG(dbgs() << "Failed to generate a loop latch check "
+ "corresponding to range type: "
+ << *Ty << "\n");
return None;
}
@@ -555,7 +555,7 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
CurrLatchCheck.IV->getStepRecurrence(*SE)->getType() &&
"Range and latch steps should be of same type!");
if (Step != CurrLatchCheck.IV->getStepRecurrence(*SE)) {
- DEBUG(dbgs() << "Range and latch have different step values!\n");
+ LLVM_DEBUG(dbgs() << "Range and latch have different step values!\n");
return None;
}
@@ -571,8 +571,8 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
bool LoopPredication::widenGuardConditions(IntrinsicInst *Guard,
SCEVExpander &Expander) {
- DEBUG(dbgs() << "Processing guard:\n");
- DEBUG(Guard->dump());
+ LLVM_DEBUG(dbgs() << "Processing guard:\n");
+ LLVM_DEBUG(Guard->dump());
IRBuilder<> Builder(cast<Instruction>(Preheader->getTerminator()));
@@ -625,7 +625,7 @@ bool LoopPredication::widenGuardConditions(IntrinsicInst *Guard,
LastCheck = Builder.CreateAnd(LastCheck, Check);
Guard->setOperand(0, LastCheck);
- DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n");
+ LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n");
return true;
}
@@ -634,7 +634,7 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() {
BasicBlock *LoopLatch = L->getLoopLatch();
if (!LoopLatch) {
- DEBUG(dbgs() << "The loop doesn't have a single latch!\n");
+ LLVM_DEBUG(dbgs() << "The loop doesn't have a single latch!\n");
return None;
}
@@ -645,7 +645,7 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() {
if (!match(LoopLatch->getTerminator(),
m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)), TrueDest,
FalseDest))) {
- DEBUG(dbgs() << "Failed to match the latch terminator!\n");
+ LLVM_DEBUG(dbgs() << "Failed to match the latch terminator!\n");
return None;
}
assert((TrueDest == L->getHeader() || FalseDest == L->getHeader()) &&
@@ -655,20 +655,20 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() {
auto Result = parseLoopICmp(Pred, LHS, RHS);
if (!Result) {
- DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
+ LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
return None;
}
// Check affine first, so if it's not we don't try to compute the step
// recurrence.
if (!Result->IV->isAffine()) {
- DEBUG(dbgs() << "The induction variable is not affine!\n");
+ LLVM_DEBUG(dbgs() << "The induction variable is not affine!\n");
return None;
}
auto *Step = Result->IV->getStepRecurrence(*SE);
if (!isSupportedStep(Step)) {
- DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n");
+ LLVM_DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n");
return None;
}
@@ -684,8 +684,8 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() {
};
if (IsUnsupportedPredicate(Step, Result->Pred)) {
- DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred
- << ")!\n");
+ LLVM_DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred
+ << ")!\n");
return None;
}
return Result;
@@ -751,11 +751,11 @@ bool LoopPredication::isLoopProfitableToPredicate() {
// less than one, can invert the definition of profitable loop predication.
float ScaleFactor = LatchExitProbabilityScale;
if (ScaleFactor < 1) {
- DEBUG(
+ LLVM_DEBUG(
dbgs()
<< "Ignored user setting for loop-predication-latch-probability-scale: "
<< LatchExitProbabilityScale << "\n");
- DEBUG(dbgs() << "The value is set to 1.0\n");
+ LLVM_DEBUG(dbgs() << "The value is set to 1.0\n");
ScaleFactor = 1.0;
}
const auto LatchProbabilityThreshold =
@@ -778,8 +778,8 @@ bool LoopPredication::isLoopProfitableToPredicate() {
bool LoopPredication::runOnLoop(Loop *Loop) {
L = Loop;
- DEBUG(dbgs() << "Analyzing ");
- DEBUG(L->dump());
+ LLVM_DEBUG(dbgs() << "Analyzing ");
+ LLVM_DEBUG(L->dump());
Module *M = L->getHeader()->getModule();
@@ -800,11 +800,11 @@ bool LoopPredication::runOnLoop(Loop *Loop) {
return false;
LatchCheck = *LatchCheckOpt;
- DEBUG(dbgs() << "Latch check:\n");
- DEBUG(LatchCheck.dump());
+ LLVM_DEBUG(dbgs() << "Latch check:\n");
+ LLVM_DEBUG(LatchCheck.dump());
if (!isLoopProfitableToPredicate()) {
- DEBUG(dbgs()<< "Loop not profitable to predicate!\n");
+ LLVM_DEBUG(dbgs() << "Loop not profitable to predicate!\n");
return false;
}
// Collect all the guards into a vector and process later, so as not
diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp
index 1f693467647e..83dd196f20e1 100644
--- a/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -644,14 +644,14 @@ void LoopReroll::collectPossibleIVs(Loop *L,
if (IncSCEV->getValue()->isZero() || AInt.uge(MaxInc))
continue;
IVToIncMap[&*I] = IncSCEV->getValue()->getSExtValue();
- DEBUG(dbgs() << "LRR: Possible IV: " << *I << " = " << *PHISCEV
- << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Possible IV: " << *I << " = " << *PHISCEV
+ << "\n");
if (isLoopControlIV(L, &*I)) {
assert(!LoopControlIV && "Found two loop control only IV");
LoopControlIV = &(*I);
- DEBUG(dbgs() << "LRR: Possible loop control only IV: " << *I << " = "
- << *PHISCEV << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Possible loop control only IV: " << *I
+ << " = " << *PHISCEV << "\n");
} else
PossibleIVs.push_back(&*I);
}
@@ -718,8 +718,8 @@ void LoopReroll::collectPossibleReductions(Loop *L,
if (!SLR.valid())
continue;
- DEBUG(dbgs() << "LRR: Possible reduction: " << *I << " (with " <<
- SLR.size() << " chained instructions)\n");
+ LLVM_DEBUG(dbgs() << "LRR: Possible reduction: " << *I << " (with "
+ << SLR.size() << " chained instructions)\n");
Reductions.addSLR(SLR);
}
}
@@ -857,7 +857,8 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) {
BaseUsers.push_back(II);
continue;
} else {
- DEBUG(dbgs() << "LRR: Aborting due to non-instruction: " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Aborting due to non-instruction: " << *I
+ << "\n");
return false;
}
}
@@ -879,7 +880,7 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) {
// away.
if (BaseUsers.size()) {
if (Roots.find(0) != Roots.end()) {
- DEBUG(dbgs() << "LRR: Multiple roots found for base - aborting!\n");
+ LLVM_DEBUG(dbgs() << "LRR: Multiple roots found for base - aborting!\n");
return false;
}
Roots[0] = Base;
@@ -895,9 +896,9 @@ collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) {
if (KV.first == 0)
continue;
if (!KV.second->hasNUses(NumBaseUses)) {
- DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: "
- << "#Base=" << NumBaseUses << ", #Root=" <<
- KV.second->getNumUses() << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: "
+ << "#Base=" << NumBaseUses
+ << ", #Root=" << KV.second->getNumUses() << "\n");
return false;
}
}
@@ -1025,13 +1026,14 @@ bool LoopReroll::DAGRootTracker::findRoots() {
// Ensure all sets have the same size.
if (RootSets.empty()) {
- DEBUG(dbgs() << "LRR: Aborting because no root sets found!\n");
+ LLVM_DEBUG(dbgs() << "LRR: Aborting because no root sets found!\n");
return false;
}
for (auto &V : RootSets) {
if (V.Roots.empty() || V.Roots.size() != RootSets[0].Roots.size()) {
- DEBUG(dbgs()
- << "LRR: Aborting because not all root sets have the same size\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "LRR: Aborting because not all root sets have the same size\n");
return false;
}
}
@@ -1039,13 +1041,14 @@ bool LoopReroll::DAGRootTracker::findRoots() {
Scale = RootSets[0].Roots.size() + 1;
if (Scale > IL_MaxRerollIterations) {
- DEBUG(dbgs() << "LRR: Aborting - too many iterations found. "
- << "#Found=" << Scale << ", #Max=" << IL_MaxRerollIterations
- << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Aborting - too many iterations found. "
+ << "#Found=" << Scale
+ << ", #Max=" << IL_MaxRerollIterations << "\n");
return false;
}
- DEBUG(dbgs() << "LRR: Successfully found roots: Scale=" << Scale << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Successfully found roots: Scale=" << Scale
+ << "\n");
return true;
}
@@ -1079,7 +1082,7 @@ bool LoopReroll::DAGRootTracker::collectUsedInstructions(SmallInstructionSet &Po
// While we're here, check the use sets are the same size.
if (V.size() != VBase.size()) {
- DEBUG(dbgs() << "LRR: Aborting - use sets are different sizes\n");
+ LLVM_DEBUG(dbgs() << "LRR: Aborting - use sets are different sizes\n");
return false;
}
@@ -1236,17 +1239,17 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
// set.
for (auto &KV : Uses) {
if (KV.second.count() != 1 && !isIgnorableInst(KV.first)) {
- DEBUG(dbgs() << "LRR: Aborting - instruction is not used in 1 iteration: "
- << *KV.first << " (#uses=" << KV.second.count() << ")\n");
+ LLVM_DEBUG(
+ dbgs() << "LRR: Aborting - instruction is not used in 1 iteration: "
+ << *KV.first << " (#uses=" << KV.second.count() << ")\n");
return false;
}
}
- DEBUG(
- for (auto &KV : Uses) {
- dbgs() << "LRR: " << KV.second.find_first() << "\t" << *KV.first << "\n";
- }
- );
+ LLVM_DEBUG(for (auto &KV
+ : Uses) {
+ dbgs() << "LRR: " << KV.second.find_first() << "\t" << *KV.first << "\n";
+ });
for (unsigned Iter = 1; Iter < Scale; ++Iter) {
// In addition to regular aliasing information, we need to look for
@@ -1305,8 +1308,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
if (TryIt == Uses.end() || TryIt == RootIt ||
instrDependsOn(TryIt->first, RootIt, TryIt)) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
- " vs. " << *RootInst << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at "
+ << *BaseInst << " vs. " << *RootInst << "\n");
return false;
}
@@ -1342,8 +1345,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
// root instruction, does not also belong to the base set or the set of
// some other root instruction.
if (RootIt->second.count() > 1) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
- " vs. " << *RootInst << " (prev. case overlap)\n");
+ LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst
+ << " vs. " << *RootInst << " (prev. case overlap)\n");
return false;
}
@@ -1353,8 +1356,9 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
if (RootInst->mayReadFromMemory())
for (auto &K : AST) {
if (K.aliasesUnknownInst(RootInst, *AA)) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
- " vs. " << *RootInst << " (depends on future store)\n");
+ LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at "
+ << *BaseInst << " vs. " << *RootInst
+ << " (depends on future store)\n");
return false;
}
}
@@ -1367,9 +1371,9 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
!isSafeToSpeculativelyExecute(BaseInst)) ||
(!isUnorderedLoadStore(RootInst) &&
!isSafeToSpeculativelyExecute(RootInst)))) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
- " vs. " << *RootInst <<
- " (side effects prevent reordering)\n");
+ LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst
+ << " vs. " << *RootInst
+ << " (side effects prevent reordering)\n");
return false;
}
@@ -1420,8 +1424,9 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
BaseInst->getOperand(!j) == Op2) {
Swapped = true;
} else {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst
- << " vs. " << *RootInst << " (operand " << j << ")\n");
+ LLVM_DEBUG(dbgs()
+ << "LRR: iteration root match failed at " << *BaseInst
+ << " vs. " << *RootInst << " (operand " << j << ")\n");
return false;
}
}
@@ -1434,8 +1439,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
hasUsesOutsideLoop(BaseInst, L)) ||
(!PossibleRedLastSet.count(RootInst) &&
hasUsesOutsideLoop(RootInst, L))) {
- DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst <<
- " vs. " << *RootInst << " (uses outside loop)\n");
+ LLVM_DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst
+ << " vs. " << *RootInst << " (uses outside loop)\n");
return false;
}
@@ -1452,8 +1457,8 @@ bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) {
"Mismatched set sizes!");
}
- DEBUG(dbgs() << "LRR: Matched all iteration increments for " <<
- *IV << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Matched all iteration increments for " << *IV
+ << "\n");
return true;
}
@@ -1465,7 +1470,7 @@ void LoopReroll::DAGRootTracker::replace(const SCEV *IterCount) {
J != JE;) {
unsigned I = Uses[&*J].find_first();
if (I > 0 && I < IL_All) {
- DEBUG(dbgs() << "LRR: removing: " << *J << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: removing: " << *J << "\n");
J++->eraseFromParent();
continue;
}
@@ -1618,17 +1623,17 @@ bool LoopReroll::ReductionTracker::validateSelected() {
int Iter = PossibleRedIter[J];
if (Iter != PrevIter && Iter != PrevIter + 1 &&
!PossibleReds[i].getReducedValue()->isAssociative()) {
- DEBUG(dbgs() << "LRR: Out-of-order non-associative reduction: " <<
- J << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Out-of-order non-associative reduction: "
+ << J << "\n");
return false;
}
if (Iter != PrevIter) {
if (Count != BaseCount) {
- DEBUG(dbgs() << "LRR: Iteration " << PrevIter <<
- " reduction use count " << Count <<
- " is not equal to the base use count " <<
- BaseCount << "\n");
+ LLVM_DEBUG(dbgs()
+ << "LRR: Iteration " << PrevIter << " reduction use count "
+ << Count << " is not equal to the base use count "
+ << BaseCount << "\n");
return false;
}
@@ -1724,8 +1729,8 @@ bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header,
if (!DAGRoots.findRoots())
return false;
- DEBUG(dbgs() << "LRR: Found all root induction increments for: " <<
- *IV << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: Found all root induction increments for: " << *IV
+ << "\n");
if (!DAGRoots.validate(Reductions))
return false;
@@ -1753,9 +1758,9 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
BasicBlock *Header = L->getHeader();
- DEBUG(dbgs() << "LRR: F[" << Header->getParent()->getName() <<
- "] Loop %" << Header->getName() << " (" <<
- L->getNumBlocks() << " block(s))\n");
+ LLVM_DEBUG(dbgs() << "LRR: F[" << Header->getParent()->getName() << "] Loop %"
+ << Header->getName() << " (" << L->getNumBlocks()
+ << " block(s))\n");
// For now, we'll handle only single BB loops.
if (L->getNumBlocks() > 1)
@@ -1766,8 +1771,8 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
const SCEV *LIBETC = SE->getBackedgeTakenCount(L);
const SCEV *IterCount = SE->getAddExpr(LIBETC, SE->getOne(LIBETC->getType()));
- DEBUG(dbgs() << "\n Before Reroll:\n" << *(L->getHeader()) << "\n");
- DEBUG(dbgs() << "LRR: iteration count = " << *IterCount << "\n");
+ LLVM_DEBUG(dbgs() << "\n Before Reroll:\n" << *(L->getHeader()) << "\n");
+ LLVM_DEBUG(dbgs() << "LRR: iteration count = " << *IterCount << "\n");
// First, we need to find the induction variable with respect to which we can
// reroll (there may be several possible options).
@@ -1777,7 +1782,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
collectPossibleIVs(L, PossibleIVs);
if (PossibleIVs.empty()) {
- DEBUG(dbgs() << "LRR: No possible IVs found\n");
+ LLVM_DEBUG(dbgs() << "LRR: No possible IVs found\n");
return false;
}
@@ -1792,7 +1797,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
Changed = true;
break;
}
- DEBUG(dbgs() << "\n After Reroll:\n" << *(L->getHeader()) << "\n");
+ LLVM_DEBUG(dbgs() << "\n After Reroll:\n" << *(L->getHeader()) << "\n");
// Trip count of L has changed so SE must be re-evaluated.
if (Changed)
diff --git a/lib/Transforms/Scalar/LoopSink.cpp b/lib/Transforms/Scalar/LoopSink.cpp
index a2983e60b8fd..731506d91077 100644
--- a/lib/Transforms/Scalar/LoopSink.cpp
+++ b/lib/Transforms/Scalar/LoopSink.cpp
@@ -224,11 +224,11 @@ static bool sinkInstruction(Loop &L, Instruction &I,
}
// Replaces uses of I with IC in blocks dominated by N
replaceDominatedUsesWith(&I, IC, DT, N);
- DEBUG(dbgs() << "Sinking a clone of " << I << " To: " << N->getName()
- << '\n');
+ LLVM_DEBUG(dbgs() << "Sinking a clone of " << I << " To: " << N->getName()
+ << '\n');
NumLoopSunkCloned++;
}
- DEBUG(dbgs() << "Sinking " << I << " To: " << MoveBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Sinking " << I << " To: " << MoveBB->getName() << '\n');
NumLoopSunk++;
I.moveBefore(&*MoveBB->getFirstInsertionPt());
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 4c0b3cc808c7..b46dc7493903 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -2424,8 +2424,8 @@ LSRInstance::OptimizeLoopTermCond() {
}
}
- DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
- << *Cond << '\n');
+ LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
+ << *Cond << '\n');
// It's possible for the setcc instruction to be anywhere in the loop, and
// possible for it to have multiple users. If it is not immediately before
@@ -2666,7 +2666,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() {
if (Types.size() == 1)
Types.clear();
- DEBUG(print_factors_and_types(dbgs()));
+ LLVM_DEBUG(print_factors_and_types(dbgs()));
}
/// Helper for CollectChains that finds an IV operand (computed by an AddRec in
@@ -2797,10 +2797,9 @@ isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users,
return false;
if (!Users.empty()) {
- DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";
- for (Instruction *Inst : Users) {
- dbgs() << " " << *Inst << "\n";
- });
+ LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";
+ for (Instruction *Inst
+ : Users) { dbgs() << " " << *Inst << "\n"; });
return false;
}
assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
@@ -2853,8 +2852,8 @@ isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users,
// the stride.
cost -= NumReusedIncrements;
- DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost
- << "\n");
+ LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost
+ << "\n");
return cost < 0;
}
@@ -2907,7 +2906,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
if (isa<PHINode>(UserInst))
return;
if (NChains >= MaxChains && !StressIVChain) {
- DEBUG(dbgs() << "IV Chain Limit\n");
+ LLVM_DEBUG(dbgs() << "IV Chain Limit\n");
return;
}
LastIncExpr = OperExpr;
@@ -2920,11 +2919,11 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr),
OperExprBase));
ChainUsersVec.resize(NChains);
- DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst
- << ") IV=" << *LastIncExpr << "\n");
+ LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst
+ << ") IV=" << *LastIncExpr << "\n");
} else {
- DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst
- << ") IV+" << *LastIncExpr << "\n");
+ LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst
+ << ") IV+" << *LastIncExpr << "\n");
// Add this IV user to the end of the chain.
IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr));
}
@@ -2994,7 +2993,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
/// loop latch. This will discover chains on side paths, but requires
/// maintaining multiple copies of the Chains state.
void LSRInstance::CollectChains() {
- DEBUG(dbgs() << "Collecting IV Chains.\n");
+ LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n");
SmallVector<ChainUsers, 8> ChainUsersVec;
SmallVector<BasicBlock *,8> LatchPath;
@@ -3063,10 +3062,10 @@ void LSRInstance::CollectChains() {
void LSRInstance::FinalizeChain(IVChain &Chain) {
assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
- DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
+ LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
for (const IVInc &Inc : Chain) {
- DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n");
+ LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n");
auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand);
assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand");
IVIncSet.insert(UseI);
@@ -3123,11 +3122,11 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
}
if (IVOpIter == IVOpEnd) {
// Gracefully give up on this chain.
- DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n");
+ LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n");
return;
}
- DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
+ LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
Type *IVTy = IVSrc->getType();
Type *IntTy = SE.getEffectiveSCEVType(IVTy);
const SCEV *LeftOverExpr = nullptr;
@@ -3203,7 +3202,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
find(UserInst->operands(), U.getOperandValToReplace());
assert(UseI != UserInst->op_end() && "cannot find IV operand");
if (IVIncSet.count(UseI)) {
- DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n');
+ LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n');
continue;
}
@@ -3279,7 +3278,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
}
}
- DEBUG(print_fixups(dbgs()));
+ LLVM_DEBUG(print_fixups(dbgs()));
}
/// Insert a formula for the given expression into the given use, separating out
@@ -3995,10 +3994,11 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
if (Imms.size() == 1)
continue;
- DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
- for (const auto &Entry : Imms)
- dbgs() << ' ' << Entry.first;
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
+ for (const auto &Entry
+ : Imms) dbgs()
+ << ' ' << Entry.first;
+ dbgs() << '\n');
// Examine each offset.
for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
@@ -4010,7 +4010,8 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
if (!isa<SCEVConstant>(OrigReg) &&
UsedByIndicesMap[Reg].count() == 1) {
- DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n');
+ LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg
+ << '\n');
continue;
}
@@ -4159,9 +4160,9 @@ LSRInstance::GenerateAllReuseFormulae() {
GenerateCrossUseConstantOffsets();
- DEBUG(dbgs() << "\n"
- "After generating reuse formulae:\n";
- print_uses(dbgs()));
+ LLVM_DEBUG(dbgs() << "\n"
+ "After generating reuse formulae:\n";
+ print_uses(dbgs()));
}
/// If there are multiple formulae with the same set of registers used
@@ -4183,7 +4184,8 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
LSRUse &LU = Uses[LUIdx];
- DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());
+ dbgs() << '\n');
bool Any = false;
for (size_t FIdx = 0, NumForms = LU.Formulae.size();
@@ -4207,8 +4209,8 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
// as the basis of rediscovering the desired formula that uses an AddRec
// corresponding to the existing phi. Once all formulae have been
// generated, these initial losers may be pruned.
- DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());
+ dbgs() << "\n");
}
else {
SmallVector<const SCEV *, 4> Key;
@@ -4235,10 +4237,10 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
CostBest.RateFormula(TTI, Best, Regs, VisitedRegs, L, SE, DT, LU);
if (CostF.isLess(CostBest, TTI))
std::swap(F, Best);
- DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
- dbgs() << "\n"
- " in favor of formula "; Best.print(dbgs());
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
+ dbgs() << "\n"
+ " in favor of formula ";
+ Best.print(dbgs()); dbgs() << '\n');
}
#ifndef NDEBUG
ChangedFormulae = true;
@@ -4257,11 +4259,11 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
BestFormulae.clear();
}
- DEBUG(if (ChangedFormulae) {
- dbgs() << "\n"
- "After filtering out undesirable candidates:\n";
- print_uses(dbgs());
- });
+ LLVM_DEBUG(if (ChangedFormulae) {
+ dbgs() << "\n"
+ "After filtering out undesirable candidates:\n";
+ print_uses(dbgs());
+ });
}
// This is a rough guess that seems to work fairly well.
@@ -4290,11 +4292,11 @@ size_t LSRInstance::EstimateSearchSpaceComplexity() const {
/// register pressure); remove it to simplify the system.
void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
- DEBUG(dbgs() << "The search space is too complex.\n");
+ LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
- DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
- "which use a superset of registers used by other "
- "formulae.\n");
+ LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
+ "which use a superset of registers used by other "
+ "formulae.\n");
for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
LSRUse &LU = Uses[LUIdx];
@@ -4312,7 +4314,8 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
(I - F.BaseRegs.begin()));
if (LU.HasFormulaWithSameRegs(NewF)) {
- DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());
+ dbgs() << '\n');
LU.DeleteFormula(F);
--i;
--e;
@@ -4327,8 +4330,8 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
(I - F.BaseRegs.begin()));
if (LU.HasFormulaWithSameRegs(NewF)) {
- DEBUG(dbgs() << " Deleting "; F.print(dbgs());
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());
+ dbgs() << '\n');
LU.DeleteFormula(F);
--i;
--e;
@@ -4343,8 +4346,7 @@ void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
LU.RecomputeRegs(LUIdx, RegUses);
}
- DEBUG(dbgs() << "After pre-selection:\n";
- print_uses(dbgs()));
+ LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
}
}
@@ -4354,9 +4356,10 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
if (EstimateSearchSpaceComplexity() < ComplexityLimit)
return;
- DEBUG(dbgs() << "The search space is too complex.\n"
- "Narrowing the search space by assuming that uses separated "
- "by a constant offset will use the same registers.\n");
+ LLVM_DEBUG(
+ dbgs() << "The search space is too complex.\n"
+ "Narrowing the search space by assuming that uses separated "
+ "by a constant offset will use the same registers.\n");
// This is especially useful for unrolled loops.
@@ -4374,7 +4377,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
LU.Kind, LU.AccessTy))
continue;
- DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n');
LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
@@ -4382,7 +4385,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
for (LSRFixup &Fixup : LU.Fixups) {
Fixup.Offset += F.BaseOffset;
LUThatHas->pushFixup(Fixup);
- DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n');
+ LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n');
}
// Delete formulae from the new use which are no longer legal.
@@ -4391,8 +4394,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
Formula &F = LUThatHas->Formulae[i];
if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset,
LUThatHas->Kind, LUThatHas->AccessTy, F)) {
- DEBUG(dbgs() << " Deleting "; F.print(dbgs());
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
LUThatHas->DeleteFormula(F);
--i;
--e;
@@ -4411,7 +4413,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
}
}
- DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
+ LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
}
/// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that
@@ -4419,15 +4421,14 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
/// eliminate.
void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
- DEBUG(dbgs() << "The search space is too complex.\n");
+ LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
- DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
- "undesirable dedicated registers.\n");
+ LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
+ "undesirable dedicated registers.\n");
FilterOutUndesirableDedicatedRegisters();
- DEBUG(dbgs() << "After pre-selection:\n";
- print_uses(dbgs()));
+ LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
}
}
@@ -4444,9 +4445,10 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
if (EstimateSearchSpaceComplexity() < ComplexityLimit)
return;
- DEBUG(dbgs() << "The search space is too complex.\n"
- "Narrowing the search space by choosing the best Formula "
- "from the Formulae with the same Scale and ScaledReg.\n");
+ LLVM_DEBUG(
+ dbgs() << "The search space is too complex.\n"
+ "Narrowing the search space by choosing the best Formula "
+ "from the Formulae with the same Scale and ScaledReg.\n");
// Map the "Scale * ScaledReg" pair to the best formula of current LSRUse.
using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>;
@@ -4460,7 +4462,8 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
LSRUse &LU = Uses[LUIdx];
- DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());
+ dbgs() << '\n');
// Return true if Formula FA is better than Formula FB.
auto IsBetterThan = [&](Formula &FA, Formula &FB) {
@@ -4504,10 +4507,10 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
Formula &Best = LU.Formulae[P.first->second];
if (IsBetterThan(F, Best))
std::swap(F, Best);
- DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
- dbgs() << "\n"
- " in favor of formula ";
- Best.print(dbgs()); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
+ dbgs() << "\n"
+ " in favor of formula ";
+ Best.print(dbgs()); dbgs() << '\n');
#ifndef NDEBUG
ChangedFormulae = true;
#endif
@@ -4523,7 +4526,7 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
BestFormulae.clear();
}
- DEBUG(if (ChangedFormulae) {
+ LLVM_DEBUG(if (ChangedFormulae) {
dbgs() << "\n"
"After filtering out undesirable candidates:\n";
print_uses(dbgs());
@@ -4582,7 +4585,7 @@ void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
// Used in each formula of a solution (in example above this is reg(c)).
// We can skip them in calculations.
SmallPtrSet<const SCEV *, 4> UniqRegs;
- DEBUG(dbgs() << "The search space is too complex.\n");
+ LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
// Map each register to probability of not selecting
DenseMap <const SCEV *, float> RegNumMap;
@@ -4602,7 +4605,8 @@ void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
RegNumMap.insert(std::make_pair(Reg, PNotSel));
}
- DEBUG(dbgs() << "Narrowing the search space by deleting costly formulas\n");
+ LLVM_DEBUG(
+ dbgs() << "Narrowing the search space by deleting costly formulas\n");
// Delete formulas where registers number expectation is high.
for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
@@ -4644,26 +4648,25 @@ void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
MinIdx = i;
}
}
- DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs());
- dbgs() << " with min reg num " << FMinRegNum << '\n');
+ LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs());
+ dbgs() << " with min reg num " << FMinRegNum << '\n');
if (MinIdx != 0)
std::swap(LU.Formulae[MinIdx], LU.Formulae[0]);
while (LU.Formulae.size() != 1) {
- DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs());
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs());
+ dbgs() << '\n');
LU.Formulae.pop_back();
}
LU.RecomputeRegs(LUIdx, RegUses);
assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula");
Formula &F = LU.Formulae[0];
- DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n');
// When we choose the formula, the regs become unique.
UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
if (F.ScaledReg)
UniqRegs.insert(F.ScaledReg);
}
- DEBUG(dbgs() << "After pre-selection:\n";
- print_uses(dbgs()));
+ LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
}
/// Pick a register which seems likely to be profitable, and then in any use
@@ -4676,7 +4679,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
// Ok, we have too many of formulae on our hands to conveniently handle.
// Use a rough heuristic to thin out the list.
- DEBUG(dbgs() << "The search space is too complex.\n");
+ LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
// Pick the register which is used by the most LSRUses, which is likely
// to be a good reuse register candidate.
@@ -4697,8 +4700,8 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
}
}
- DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
- << " will yield profitable reuse.\n");
+ LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
+ << " will yield profitable reuse.\n");
Taken.insert(Best);
// In any use with formulae which references this register, delete formulae
@@ -4711,7 +4714,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
Formula &F = LU.Formulae[i];
if (!F.referencesReg(Best)) {
- DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
LU.DeleteFormula(F);
--e;
--i;
@@ -4725,8 +4728,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
LU.RecomputeRegs(LUIdx, RegUses);
}
- DEBUG(dbgs() << "After pre-selection:\n";
- print_uses(dbgs()));
+ LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
}
}
@@ -4808,11 +4810,11 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
if (F.getNumRegs() == 1 && Workspace.size() == 1)
VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
} else {
- DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
- dbgs() << ".\n Regs:";
- for (const SCEV *S : NewRegs)
- dbgs() << ' ' << *S;
- dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
+ dbgs() << ".\n Regs:"; for (const SCEV *S
+ : NewRegs) dbgs()
+ << ' ' << *S;
+ dbgs() << '\n');
SolutionCost = NewCost;
Solution = Workspace;
@@ -4837,22 +4839,22 @@ void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
CurRegs, VisitedRegs);
if (Solution.empty()) {
- DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
+ LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
return;
}
// Ok, we've now made all our decisions.
- DEBUG(dbgs() << "\n"
- "The chosen solution requires "; SolutionCost.print(dbgs());
- dbgs() << ":\n";
- for (size_t i = 0, e = Uses.size(); i != e; ++i) {
- dbgs() << " ";
- Uses[i].print(dbgs());
- dbgs() << "\n"
- " ";
- Solution[i]->print(dbgs());
- dbgs() << '\n';
- });
+ LLVM_DEBUG(dbgs() << "\n"
+ "The chosen solution requires ";
+ SolutionCost.print(dbgs()); dbgs() << ":\n";
+ for (size_t i = 0, e = Uses.size(); i != e; ++i) {
+ dbgs() << " ";
+ Uses[i].print(dbgs());
+ dbgs() << "\n"
+ " ";
+ Solution[i]->print(dbgs());
+ dbgs() << '\n';
+ });
assert(Solution.size() == Uses.size() && "Malformed solution!");
}
@@ -5326,7 +5328,8 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
for (const IVStrideUse &U : IU) {
if (++NumUsers > MaxIVUsers) {
(void)U;
- DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U << "\n");
+ LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U
+ << "\n");
return;
}
// Bail out if we have a PHI on an EHPad that gets a value from a
@@ -5359,9 +5362,9 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
}
#endif // DEBUG
- DEBUG(dbgs() << "\nLSR on loop ";
- L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);
- dbgs() << ":\n");
+ LLVM_DEBUG(dbgs() << "\nLSR on loop ";
+ L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);
+ dbgs() << ":\n");
// First, perform some low-level loop optimizations.
OptimizeShadowIV();
@@ -5372,7 +5375,7 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
// Skip nested loops until we can model them better with formulae.
if (!L->empty()) {
- DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
+ LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
return;
}
@@ -5383,8 +5386,8 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
CollectLoopInvariantFixupsAndFormulae();
assert(!Uses.empty() && "IVUsers reported at least one use");
- DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
- print_uses(dbgs()));
+ LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
+ print_uses(dbgs()));
// Now use the reuse data to generate a bunch of interesting ways
// to formulate the values needed for the uses.
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 822f880f2226..bbd1d3baa926 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -406,9 +406,9 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
// First accumulate the cost of this instruction.
if (!Cost.IsFree) {
UnrolledCost += TTI.getUserCost(I);
- DEBUG(dbgs() << "Adding cost of instruction (iteration " << Iteration
- << "): ");
- DEBUG(I->dump());
+ LLVM_DEBUG(dbgs() << "Adding cost of instruction (iteration "
+ << Iteration << "): ");
+ LLVM_DEBUG(I->dump());
}
// We must count the cost of every operand which is not free,
@@ -443,14 +443,14 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
assert(L->isLCSSAForm(DT) &&
"Must have loops in LCSSA form to track live-out values.");
- DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n");
+ LLVM_DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n");
// Simulate execution of each iteration of the loop counting instructions,
// which would be simplified.
// Since the same load will take different values on different iterations,
// we literally have to go through all loop's iterations.
for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) {
- DEBUG(dbgs() << " Analyzing iteration " << Iteration << "\n");
+ LLVM_DEBUG(dbgs() << " Analyzing iteration " << Iteration << "\n");
// Prepare for the iteration by collecting any simplified entry or backedge
// inputs.
@@ -525,10 +525,10 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
// If unrolled body turns out to be too big, bail out.
if (UnrolledCost > MaxUnrolledLoopSize) {
- DEBUG(dbgs() << " Exceeded threshold.. exiting.\n"
- << " UnrolledCost: " << UnrolledCost
- << ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize
- << "\n");
+ LLVM_DEBUG(dbgs() << " Exceeded threshold.. exiting.\n"
+ << " UnrolledCost: " << UnrolledCost
+ << ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize
+ << "\n");
return None;
}
}
@@ -581,8 +581,8 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
// If we found no optimization opportunities on the first iteration, we
// won't find them on later ones too.
if (UnrolledCost == RolledDynamicCost) {
- DEBUG(dbgs() << " No opportunities found.. exiting.\n"
- << " UnrolledCost: " << UnrolledCost << "\n");
+ LLVM_DEBUG(dbgs() << " No opportunities found.. exiting.\n"
+ << " UnrolledCost: " << UnrolledCost << "\n");
return None;
}
}
@@ -603,9 +603,9 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
}
}
- DEBUG(dbgs() << "Analysis finished:\n"
- << "UnrolledCost: " << UnrolledCost << ", "
- << "RolledDynamicCost: " << RolledDynamicCost << "\n");
+ LLVM_DEBUG(dbgs() << "Analysis finished:\n"
+ << "UnrolledCost: " << UnrolledCost << ", "
+ << "RolledDynamicCost: " << RolledDynamicCost << "\n");
return {{UnrolledCost, RolledDynamicCost}};
}
@@ -808,8 +808,8 @@ static bool computeUnrollCount(
if (TripCount) {
UP.Partial |= ExplicitUnroll;
if (!UP.Partial) {
- DEBUG(dbgs() << " will not try to unroll partially because "
- << "-unroll-allow-partial not given\n");
+ LLVM_DEBUG(dbgs() << " will not try to unroll partially because "
+ << "-unroll-allow-partial not given\n");
UP.Count = 0;
return false;
}
@@ -896,8 +896,9 @@ static bool computeUnrollCount(
// Reduce count based on the type of unrolling and the threshold values.
UP.Runtime |= PragmaEnableUnroll || PragmaCount > 0 || UserUnrollCount;
if (!UP.Runtime) {
- DEBUG(dbgs() << " will not try to unroll loop with runtime trip count "
- << "-unroll-runtime not given\n");
+ LLVM_DEBUG(
+ dbgs() << " will not try to unroll loop with runtime trip count "
+ << "-unroll-runtime not given\n");
UP.Count = 0;
return false;
}
@@ -917,12 +918,13 @@ static bool computeUnrollCount(
if (!UP.AllowRemainder && UP.Count != 0 && (TripMultiple % UP.Count) != 0) {
while (UP.Count != 0 && TripMultiple % UP.Count != 0)
UP.Count >>= 1;
- DEBUG(dbgs() << "Remainder loop is restricted (that could architecture "
- "specific or because the loop contains a convergent "
- "instruction), so unroll count must divide the trip "
- "multiple, "
- << TripMultiple << ". Reducing unroll count from "
- << OrigCount << " to " << UP.Count << ".\n");
+ LLVM_DEBUG(
+ dbgs() << "Remainder loop is restricted (that could architecture "
+ "specific or because the loop contains a convergent "
+ "instruction), so unroll count must divide the trip "
+ "multiple, "
+ << TripMultiple << ". Reducing unroll count from " << OrigCount
+ << " to " << UP.Count << ".\n");
using namespace ore;
@@ -944,7 +946,8 @@ static bool computeUnrollCount(
if (UP.Count > UP.MaxCount)
UP.Count = UP.MaxCount;
- DEBUG(dbgs() << " partially unrolling with count: " << UP.Count << "\n");
+ LLVM_DEBUG(dbgs() << " partially unrolling with count: " << UP.Count
+ << "\n");
if (UP.Count < 2)
UP.Count = 0;
return ExplicitUnroll;
@@ -957,12 +960,13 @@ static LoopUnrollResult tryToUnrollLoop(
Optional<unsigned> ProvidedCount, Optional<unsigned> ProvidedThreshold,
Optional<bool> ProvidedAllowPartial, Optional<bool> ProvidedRuntime,
Optional<bool> ProvidedUpperBound, Optional<bool> ProvidedAllowPeeling) {
- DEBUG(dbgs() << "Loop Unroll: F[" << L->getHeader()->getParent()->getName()
- << "] Loop %" << L->getHeader()->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Loop Unroll: F["
+ << L->getHeader()->getParent()->getName() << "] Loop %"
+ << L->getHeader()->getName() << "\n");
if (HasUnrollDisablePragma(L))
return LoopUnrollResult::Unmodified;
if (!L->isLoopSimplifyForm()) {
- DEBUG(
+ LLVM_DEBUG(
dbgs() << " Not unrolling loop which is not in loop-simplify form.\n");
return LoopUnrollResult::Unmodified;
}
@@ -984,14 +988,14 @@ static LoopUnrollResult tryToUnrollLoop(
unsigned LoopSize =
ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent,
TTI, EphValues, UP.BEInsns);
- DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
+ LLVM_DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
if (NotDuplicatable) {
- DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable"
- << " instructions.\n");
+ LLVM_DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable"
+ << " instructions.\n");
return LoopUnrollResult::Unmodified;
}
if (NumInlineCandidates != 0) {
- DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
+ LLVM_DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
return LoopUnrollResult::Unmodified;
}
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 91a30088fa07..b530f7c5dbde 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -298,9 +298,9 @@ bool LUAnalysisCache::countLoop(const Loop *L, const TargetTransformInfo &TTI,
MaxSize -= Props.SizeEstimation * Props.CanBeUnswitchedCount;
if (Metrics.notDuplicatable) {
- DEBUG(dbgs() << "NOT unswitching loop %"
- << L->getHeader()->getName() << ", contents cannot be "
- << "duplicated!\n");
+ LLVM_DEBUG(dbgs() << "NOT unswitching loop %" << L->getHeader()->getName()
+ << ", contents cannot be "
+ << "duplicated!\n");
return false;
}
}
@@ -856,20 +856,20 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val,
TerminatorInst *TI) {
// Check to see if it would be profitable to unswitch current loop.
if (!BranchesInfo.CostAllowsUnswitching()) {
- DEBUG(dbgs() << "NOT unswitching loop %"
- << currentLoop->getHeader()->getName()
- << " at non-trivial condition '" << *Val
- << "' == " << *LoopCond << "\n"
- << ". Cost too high.\n");
+ LLVM_DEBUG(dbgs() << "NOT unswitching loop %"
+ << currentLoop->getHeader()->getName()
+ << " at non-trivial condition '" << *Val
+ << "' == " << *LoopCond << "\n"
+ << ". Cost too high.\n");
return false;
}
if (hasBranchDivergence &&
getAnalysis<DivergenceAnalysis>().isDivergent(LoopCond)) {
- DEBUG(dbgs() << "NOT unswitching loop %"
- << currentLoop->getHeader()->getName()
- << " at non-trivial condition '" << *Val
- << "' == " << *LoopCond << "\n"
- << ". Condition is divergent.\n");
+ LLVM_DEBUG(dbgs() << "NOT unswitching loop %"
+ << currentLoop->getHeader()->getName()
+ << " at non-trivial condition '" << *Val
+ << "' == " << *LoopCond << "\n"
+ << ". Condition is divergent.\n");
return false;
}
@@ -970,11 +970,11 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val,
BasicBlock *ExitBlock,
TerminatorInst *TI) {
- DEBUG(dbgs() << "loop-unswitch: Trivial-Unswitch loop %"
- << loopHeader->getName() << " [" << L->getBlocks().size()
- << " blocks] in Function "
- << L->getHeader()->getParent()->getName() << " on cond: " << *Val
- << " == " << *Cond << "\n");
+ LLVM_DEBUG(dbgs() << "loop-unswitch: Trivial-Unswitch loop %"
+ << loopHeader->getName() << " [" << L->getBlocks().size()
+ << " blocks] in Function "
+ << L->getHeader()->getParent()->getName()
+ << " on cond: " << *Val << " == " << *Cond << "\n");
// First step, split the preheader, so that we know that there is a safe place
// to insert the conditional branch. We will change loopPreheader to have a
@@ -1196,10 +1196,10 @@ void LoopUnswitch::SplitExitEdges(Loop *L,
void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
Loop *L, TerminatorInst *TI) {
Function *F = loopHeader->getParent();
- DEBUG(dbgs() << "loop-unswitch: Unswitching loop %"
- << loopHeader->getName() << " [" << L->getBlocks().size()
- << " blocks] in Function " << F->getName()
- << " when '" << *Val << "' == " << *LIC << "\n");
+ LLVM_DEBUG(dbgs() << "loop-unswitch: Unswitching loop %"
+ << loopHeader->getName() << " [" << L->getBlocks().size()
+ << " blocks] in Function " << F->getName() << " when '"
+ << *Val << "' == " << *LIC << "\n");
if (auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>())
SEWP->getSE().forgetLoop(L);
@@ -1355,7 +1355,7 @@ static void RemoveFromWorklist(Instruction *I,
static void ReplaceUsesOfWith(Instruction *I, Value *V,
std::vector<Instruction*> &Worklist,
Loop *L, LPPassManager *LPM) {
- DEBUG(dbgs() << "Replace with '" << *V << "': " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "Replace with '" << *V << "': " << *I << "\n");
// Add uses to the worklist, which may be dead now.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
@@ -1524,7 +1524,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
// Simple DCE.
if (isInstructionTriviallyDead(I)) {
- DEBUG(dbgs() << "Remove dead instruction '" << *I << "\n");
+ LLVM_DEBUG(dbgs() << "Remove dead instruction '" << *I << "\n");
// Add uses to the worklist, which may be dead now.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
@@ -1557,8 +1557,8 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
if (!SinglePred) continue; // Nothing to do.
assert(SinglePred == Pred && "CFG broken");
- DEBUG(dbgs() << "Merging blocks: " << Pred->getName() << " <- "
- << Succ->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Merging blocks: " << Pred->getName() << " <- "
+ << Succ->getName() << "\n");
// Resolve any single entry PHI nodes in Succ.
while (PHINode *PN = dyn_cast<PHINode>(Succ->begin()))
diff --git a/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/lib/Transforms/Scalar/LoopVersioningLICM.cpp
index e0e2c1938aa8..06e86081e8a0 100644
--- a/lib/Transforms/Scalar/LoopVersioningLICM.cpp
+++ b/lib/Transforms/Scalar/LoopVersioningLICM.cpp
@@ -246,48 +246,47 @@ private:
bool LoopVersioningLICM::legalLoopStructure() {
// Loop must be in loop simplify form.
if (!CurLoop->isLoopSimplifyForm()) {
- DEBUG(
- dbgs() << " loop is not in loop-simplify form.\n");
+ LLVM_DEBUG(dbgs() << " loop is not in loop-simplify form.\n");
return false;
}
// Loop should be innermost loop, if not return false.
if (!CurLoop->getSubLoops().empty()) {
- DEBUG(dbgs() << " loop is not innermost\n");
+ LLVM_DEBUG(dbgs() << " loop is not innermost\n");
return false;
}
// Loop should have a single backedge, if not return false.
if (CurLoop->getNumBackEdges() != 1) {
- DEBUG(dbgs() << " loop has multiple backedges\n");
+ LLVM_DEBUG(dbgs() << " loop has multiple backedges\n");
return false;
}
// Loop must have a single exiting block, if not return false.
if (!CurLoop->getExitingBlock()) {
- DEBUG(dbgs() << " loop has multiple exiting block\n");
+ LLVM_DEBUG(dbgs() << " loop has multiple exiting block\n");
return false;
}
// We only handle bottom-tested loop, i.e. loop in which the condition is
// checked at the end of each iteration. With that we can assume that all
// instructions in the loop are executed the same number of times.
if (CurLoop->getExitingBlock() != CurLoop->getLoopLatch()) {
- DEBUG(dbgs() << " loop is not bottom tested\n");
+ LLVM_DEBUG(dbgs() << " loop is not bottom tested\n");
return false;
}
// Parallel loops must not have aliasing loop-invariant memory accesses.
// Hence we don't need to version anything in this case.
if (CurLoop->isAnnotatedParallel()) {
- DEBUG(dbgs() << " Parallel loop is not worth versioning\n");
+ LLVM_DEBUG(dbgs() << " Parallel loop is not worth versioning\n");
return false;
}
// Loop depth more then LoopDepthThreshold are not allowed
if (CurLoop->getLoopDepth() > LoopDepthThreshold) {
- DEBUG(dbgs() << " loop depth is more then threshold\n");
+ LLVM_DEBUG(dbgs() << " loop depth is more then threshold\n");
return false;
}
// We need to be able to compute the loop trip count in order
// to generate the bound checks.
const SCEV *ExitCount = SE->getBackedgeTakenCount(CurLoop);
if (ExitCount == SE->getCouldNotCompute()) {
- DEBUG(dbgs() << " loop does not has trip count\n");
+ LLVM_DEBUG(dbgs() << " loop does not has trip count\n");
return false;
}
return true;
@@ -335,18 +334,18 @@ bool LoopVersioningLICM::legalLoopMemoryAccesses() {
}
// Ensure types should be of same type.
if (!TypeSafety) {
- DEBUG(dbgs() << " Alias tracker type safety failed!\n");
+ LLVM_DEBUG(dbgs() << " Alias tracker type safety failed!\n");
return false;
}
// Ensure loop body shouldn't be read only.
if (!HasMod) {
- DEBUG(dbgs() << " No memory modified in loop body\n");
+ LLVM_DEBUG(dbgs() << " No memory modified in loop body\n");
return false;
}
// Make sure alias set has may alias case.
// If there no alias memory ambiguity, return false.
if (!HasMayAlias) {
- DEBUG(dbgs() << " No ambiguity in memory access.\n");
+ LLVM_DEBUG(dbgs() << " No ambiguity in memory access.\n");
return false;
}
return true;
@@ -362,12 +361,12 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
assert(I != nullptr && "Null instruction found!");
// Check function call safety
if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
- DEBUG(dbgs() << " Unsafe call site found.\n");
+ LLVM_DEBUG(dbgs() << " Unsafe call site found.\n");
return false;
}
// Avoid loops with possiblity of throw
if (I->mayThrow()) {
- DEBUG(dbgs() << " May throw instruction found in loop body\n");
+ LLVM_DEBUG(dbgs() << " May throw instruction found in loop body\n");
return false;
}
// If current instruction is load instructions
@@ -375,7 +374,7 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
if (I->mayReadFromMemory()) {
LoadInst *Ld = dyn_cast<LoadInst>(I);
if (!Ld || !Ld->isSimple()) {
- DEBUG(dbgs() << " Found a non-simple load.\n");
+ LLVM_DEBUG(dbgs() << " Found a non-simple load.\n");
return false;
}
LoadAndStoreCounter++;
@@ -389,7 +388,7 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
else if (I->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(I);
if (!St || !St->isSimple()) {
- DEBUG(dbgs() << " Found a non-simple store.\n");
+ LLVM_DEBUG(dbgs() << " Found a non-simple store.\n");
return false;
}
LoadAndStoreCounter++;
@@ -428,13 +427,14 @@ bool LoopVersioningLICM::legalLoopInstructions() {
LAI = &LAA->getInfo(CurLoop);
// Check LoopAccessInfo for need of runtime check.
if (LAI->getRuntimePointerChecking()->getChecks().empty()) {
- DEBUG(dbgs() << " LAA: Runtime check not found !!\n");
+ LLVM_DEBUG(dbgs() << " LAA: Runtime check not found !!\n");
return false;
}
// Number of runtime-checks should be less then RuntimeMemoryCheckThreshold
if (LAI->getNumRuntimePointerChecks() >
VectorizerParams::RuntimeMemoryCheckThreshold) {
- DEBUG(dbgs() << " LAA: Runtime checks are more than threshold !!\n");
+ LLVM_DEBUG(
+ dbgs() << " LAA: Runtime checks are more than threshold !!\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "RuntimeCheck",
CurLoop->getStartLoc(),
@@ -448,23 +448,25 @@ bool LoopVersioningLICM::legalLoopInstructions() {
}
// Loop should have at least one invariant load or store instruction.
if (!InvariantCounter) {
- DEBUG(dbgs() << " Invariant not found !!\n");
+ LLVM_DEBUG(dbgs() << " Invariant not found !!\n");
return false;
}
// Read only loop not allowed.
if (IsReadOnlyLoop) {
- DEBUG(dbgs() << " Found a read-only loop!\n");
+ LLVM_DEBUG(dbgs() << " Found a read-only loop!\n");
return false;
}
// Profitablity check:
// Check invariant threshold, should be in limit.
if (InvariantCounter * 100 < InvariantThreshold * LoadAndStoreCounter) {
- DEBUG(dbgs()
- << " Invariant load & store are less then defined threshold\n");
- DEBUG(dbgs() << " Invariant loads & stores: "
- << ((InvariantCounter * 100) / LoadAndStoreCounter) << "%\n");
- DEBUG(dbgs() << " Invariant loads & store threshold: "
- << InvariantThreshold << "%\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Invariant load & store are less then defined threshold\n");
+ LLVM_DEBUG(dbgs() << " Invariant loads & stores: "
+ << ((InvariantCounter * 100) / LoadAndStoreCounter)
+ << "%\n");
+ LLVM_DEBUG(dbgs() << " Invariant loads & store threshold: "
+ << InvariantThreshold << "%\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "InvariantThreshold",
CurLoop->getStartLoc(),
@@ -497,16 +499,16 @@ bool LoopVersioningLICM::isLoopAlreadyVisited() {
/// Return true if legal else returns false.
bool LoopVersioningLICM::isLegalForVersioning() {
using namespace ore;
- DEBUG(dbgs() << "Loop: " << *CurLoop);
+ LLVM_DEBUG(dbgs() << "Loop: " << *CurLoop);
// Make sure not re-visiting same loop again.
if (isLoopAlreadyVisited()) {
- DEBUG(
+ LLVM_DEBUG(
dbgs() << " Revisiting loop in LoopVersioningLICM not allowed.\n\n");
return false;
}
// Check loop structure leagality.
if (!legalLoopStructure()) {
- DEBUG(
+ LLVM_DEBUG(
dbgs() << " Loop structure not suitable for LoopVersioningLICM\n\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "IllegalLoopStruct",
@@ -518,14 +520,16 @@ bool LoopVersioningLICM::isLegalForVersioning() {
}
// Check loop instruction leagality.
if (!legalLoopInstructions()) {
- DEBUG(dbgs()
- << " Loop instructions not suitable for LoopVersioningLICM\n\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Loop instructions not suitable for LoopVersioningLICM\n\n");
return false;
}
// Check loop memory access leagality.
if (!legalLoopMemoryAccesses()) {
- DEBUG(dbgs()
- << " Loop memory access not suitable for LoopVersioningLICM\n\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Loop memory access not suitable for LoopVersioningLICM\n\n");
ORE->emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "IllegalLoopMemoryAccess",
CurLoop->getStartLoc(),
@@ -535,7 +539,7 @@ bool LoopVersioningLICM::isLegalForVersioning() {
return false;
}
// Loop versioning is feasible, return true.
- DEBUG(dbgs() << " Loop Versioning found to be beneficial\n\n");
+ LLVM_DEBUG(dbgs() << " Loop Versioning found to be beneficial\n\n");
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "IsLegalForVersioning",
CurLoop->getStartLoc(), CurLoop->getHeader())
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index e21d09e65a1f..437c70745d31 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -479,10 +479,10 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
AMemSet =
Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
- DEBUG(dbgs() << "Replace stores:\n";
- for (Instruction *SI : Range.TheStores)
- dbgs() << *SI << '\n';
- dbgs() << "With: " << *AMemSet << '\n');
+ LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
+ : Range.TheStores) dbgs()
+ << *SI << '\n';
+ dbgs() << "With: " << *AMemSet << '\n');
if (!Range.TheStores.empty())
AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
@@ -603,7 +603,7 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
// We made it, we need to lift
for (auto *I : llvm::reverse(ToLift)) {
- DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
+ LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
I->moveBefore(P);
}
@@ -680,8 +680,8 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
LI->getPointerOperand(), findLoadAlignment(DL, LI), Size,
SI->isVolatile());
- DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI
- << " => " << *M << "\n");
+ LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
+ << *M << "\n");
MD->removeInstruction(SI);
SI->eraseFromParent();
@@ -770,7 +770,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal,
Size, Align, SI->isVolatile());
- DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
+ LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
MD->removeInstruction(SI);
SI->eraseFromParent();
@@ -1294,8 +1294,8 @@ bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
MemoryLocation::getForSource(M)))
return false;
- DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
- << "\n");
+ LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
+ << "\n");
// If not, then we know we can transform this.
Type *ArgTys[3] = { M->getRawDest()->getType(),
@@ -1377,9 +1377,9 @@ bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
"tmpcast", CS.getInstruction());
- DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
- << " " << *MDep << "\n"
- << " " << *CS.getInstruction() << "\n");
+ LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
+ << " " << *MDep << "\n"
+ << " " << *CS.getInstruction() << "\n");
// Otherwise we're good! Update the byval argument.
CS.setArgument(ArgNo, TmpCast);
diff --git a/lib/Transforms/Scalar/MergeICmps.cpp b/lib/Transforms/Scalar/MergeICmps.cpp
index bb1b8a531105..57dd2292dfb5 100644
--- a/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/lib/Transforms/Scalar/MergeICmps.cpp
@@ -76,25 +76,25 @@ struct BCEAtom {
BCEAtom visitICmpLoadOperand(Value *const Val) {
BCEAtom Result;
if (auto *const LoadI = dyn_cast<LoadInst>(Val)) {
- DEBUG(dbgs() << "load\n");
+ LLVM_DEBUG(dbgs() << "load\n");
if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) {
- DEBUG(dbgs() << "used outside of block\n");
+ LLVM_DEBUG(dbgs() << "used outside of block\n");
return {};
}
if (LoadI->isVolatile()) {
- DEBUG(dbgs() << "volatile\n");
+ LLVM_DEBUG(dbgs() << "volatile\n");
return {};
}
Value *const Addr = LoadI->getOperand(0);
if (auto *const GEP = dyn_cast<GetElementPtrInst>(Addr)) {
- DEBUG(dbgs() << "GEP\n");
+ LLVM_DEBUG(dbgs() << "GEP\n");
if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) {
- DEBUG(dbgs() << "used outside of block\n");
+ LLVM_DEBUG(dbgs() << "used outside of block\n");
return {};
}
const auto &DL = GEP->getModule()->getDataLayout();
if (!isDereferenceablePointer(GEP, DL)) {
- DEBUG(dbgs() << "not dereferenceable\n");
+ LLVM_DEBUG(dbgs() << "not dereferenceable\n");
// We need to make sure that we can do comparison in any order, so we
// require memory to be unconditionnally dereferencable.
return {};
@@ -251,13 +251,13 @@ BCECmpBlock visitICmp(const ICmpInst *const CmpI,
// If there are any other uses of the comparison, we cannot merge it with
// other comparisons as we would create an orphan use of the value.
if (!CmpI->hasOneUse()) {
- DEBUG(dbgs() << "cmp has several uses\n");
+ LLVM_DEBUG(dbgs() << "cmp has several uses\n");
return {};
}
if (CmpI->getPredicate() == ExpectedPredicate) {
- DEBUG(dbgs() << "cmp "
- << (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")
- << "\n");
+ LLVM_DEBUG(dbgs() << "cmp "
+ << (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")
+ << "\n");
auto Lhs = visitICmpLoadOperand(CmpI->getOperand(0));
if (!Lhs.Base()) return {};
auto Rhs = visitICmpLoadOperand(CmpI->getOperand(1));
@@ -275,7 +275,7 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
if (Block->empty()) return {};
auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
if (!BranchI) return {};
- DEBUG(dbgs() << "branch\n");
+ LLVM_DEBUG(dbgs() << "branch\n");
if (BranchI->isUnconditional()) {
// In this case, we expect an incoming value which is the result of the
// comparison. This is the last link in the chain of comparisons (note
@@ -283,7 +283,7 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
// can be reordered).
auto *const CmpI = dyn_cast<ICmpInst>(Val);
if (!CmpI) return {};
- DEBUG(dbgs() << "icmp\n");
+ LLVM_DEBUG(dbgs() << "icmp\n");
auto Result = visitICmp(CmpI, ICmpInst::ICMP_EQ);
Result.CmpI = CmpI;
Result.BranchI = BranchI;
@@ -292,12 +292,12 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
// In this case, we expect a constant incoming value (the comparison is
// chained).
const auto *const Const = dyn_cast<ConstantInt>(Val);
- DEBUG(dbgs() << "const\n");
+ LLVM_DEBUG(dbgs() << "const\n");
if (!Const->isZero()) return {};
- DEBUG(dbgs() << "false\n");
+ LLVM_DEBUG(dbgs() << "false\n");
auto *const CmpI = dyn_cast<ICmpInst>(BranchI->getCondition());
if (!CmpI) return {};
- DEBUG(dbgs() << "icmp\n");
+ LLVM_DEBUG(dbgs() << "icmp\n");
assert(BranchI->getNumSuccessors() == 2 && "expecting a cond branch");
BasicBlock *const FalseBlock = BranchI->getSuccessor(1);
auto Result = visitICmp(
@@ -311,12 +311,13 @@ BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
static inline void enqueueBlock(std::vector<BCECmpBlock> &Comparisons,
BCECmpBlock &Comparison) {
- DEBUG(dbgs() << "Block '" << Comparison.BB->getName() << "': Found cmp of "
- << Comparison.SizeBits() << " bits between "
- << Comparison.Lhs().Base() << " + " << Comparison.Lhs().Offset
- << " and " << Comparison.Rhs().Base() << " + "
- << Comparison.Rhs().Offset << "\n");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Block '" << Comparison.BB->getName()
+ << "': Found cmp of " << Comparison.SizeBits()
+ << " bits between " << Comparison.Lhs().Base() << " + "
+ << Comparison.Lhs().Offset << " and "
+ << Comparison.Rhs().Base() << " + "
+ << Comparison.Rhs().Offset << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
Comparisons.push_back(Comparison);
}
@@ -367,12 +368,12 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi)
Block, Phi.getParent());
Comparison.BB = Block;
if (!Comparison.IsValid()) {
- DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n");
+ LLVM_DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n");
return;
}
if (Comparison.doesOtherWork()) {
- DEBUG(dbgs() << "block '" << Comparison.BB->getName()
- << "' does extra work besides compare\n");
+ LLVM_DEBUG(dbgs() << "block '" << Comparison.BB->getName()
+ << "' does extra work besides compare\n");
if (Comparisons.empty()) {
// This is the initial block in the chain, in case this block does other
// work, we can try to split the block and move the irrelevant
@@ -388,13 +389,15 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi)
//
// NOTE: we only handle block with single predecessor for now.
if (Comparison.canSplit()) {
- DEBUG(dbgs() << "Split initial block '" << Comparison.BB->getName()
- << "' that does extra work besides compare\n");
+ LLVM_DEBUG(dbgs()
+ << "Split initial block '" << Comparison.BB->getName()
+ << "' that does extra work besides compare\n");
Comparison.RequireSplit = true;
enqueueBlock(Comparisons, Comparison);
} else {
- DEBUG(dbgs() << "ignoring initial block '" << Comparison.BB->getName()
- << "' that does extra work besides compare\n");
+ LLVM_DEBUG(dbgs()
+ << "ignoring initial block '" << Comparison.BB->getName()
+ << "' that does extra work besides compare\n");
}
continue;
}
@@ -428,7 +431,7 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi)
// It is possible we have no suitable comparison to merge.
if (Comparisons.empty()) {
- DEBUG(dbgs() << "chain with no BCE basic blocks, no merge\n");
+ LLVM_DEBUG(dbgs() << "chain with no BCE basic blocks, no merge\n");
return;
}
EntryBlock_ = Comparisons[0].BB;
@@ -549,7 +552,7 @@ void BCECmpChain::mergeComparisons(ArrayRef<BCECmpBlock> Comparisons,
if (C != Comparisons.end())
C->split(EntryBlock_);
- DEBUG(dbgs() << "Merging " << Comparisons.size() << " comparisons\n");
+ LLVM_DEBUG(dbgs() << "Merging " << Comparisons.size() << " comparisons\n");
const auto TotalSize =
std::accumulate(Comparisons.begin(), Comparisons.end(), 0,
[](int Size, const BCECmpBlock &C) {
@@ -594,17 +597,17 @@ void BCECmpChain::mergeComparisons(ArrayRef<BCECmpBlock> Comparisons,
} else {
assert(Comparisons.size() == 1);
// There are no blocks to merge, but we still need to update the branches.
- DEBUG(dbgs() << "Only one comparison, updating branches\n");
+ LLVM_DEBUG(dbgs() << "Only one comparison, updating branches\n");
if (NextBBInChain) {
if (FirstComparison.BranchI->isConditional()) {
- DEBUG(dbgs() << "conditional -> conditional\n");
+ LLVM_DEBUG(dbgs() << "conditional -> conditional\n");
// Just update the "true" target, the "false" target should already be
// the phi block.
assert(FirstComparison.BranchI->getSuccessor(1) == Phi.getParent());
FirstComparison.BranchI->setSuccessor(0, NextBBInChain);
Phi.addIncoming(ConstantInt::getFalse(Context), BB);
} else {
- DEBUG(dbgs() << "unconditional -> conditional\n");
+ LLVM_DEBUG(dbgs() << "unconditional -> conditional\n");
// Replace the unconditional branch by a conditional one.
FirstComparison.BranchI->eraseFromParent();
IRBuilder<> Builder(BB);
@@ -614,14 +617,14 @@ void BCECmpChain::mergeComparisons(ArrayRef<BCECmpBlock> Comparisons,
}
} else {
if (FirstComparison.BranchI->isConditional()) {
- DEBUG(dbgs() << "conditional -> unconditional\n");
+ LLVM_DEBUG(dbgs() << "conditional -> unconditional\n");
// Replace the conditional branch by an unconditional one.
FirstComparison.BranchI->eraseFromParent();
IRBuilder<> Builder(BB);
Builder.CreateBr(Phi.getParent());
Phi.addIncoming(FirstComparison.CmpI, BB);
} else {
- DEBUG(dbgs() << "unconditional -> unconditional\n");
+ LLVM_DEBUG(dbgs() << "unconditional -> unconditional\n");
Phi.addIncoming(FirstComparison.CmpI, BB);
}
}
@@ -639,22 +642,22 @@ std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
if (CurBlock->hasAddressTaken()) {
// Somebody is jumping to the block through an address, all bets are
// off.
- DEBUG(dbgs() << "skip: block " << BlockIndex
- << " has its address taken\n");
+ LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
+ << " has its address taken\n");
return {};
}
Blocks[BlockIndex] = CurBlock;
auto *SinglePredecessor = CurBlock->getSinglePredecessor();
if (!SinglePredecessor) {
// The block has two or more predecessors.
- DEBUG(dbgs() << "skip: block " << BlockIndex
- << " has two or more predecessors\n");
+ LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
+ << " has two or more predecessors\n");
return {};
}
if (Phi.getBasicBlockIndex(SinglePredecessor) < 0) {
// The block does not link back to the phi.
- DEBUG(dbgs() << "skip: block " << BlockIndex
- << " does not link back to the phi\n");
+ LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
+ << " does not link back to the phi\n");
return {};
}
CurBlock = SinglePredecessor;
@@ -664,9 +667,9 @@ std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
}
bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) {
- DEBUG(dbgs() << "processPhi()\n");
+ LLVM_DEBUG(dbgs() << "processPhi()\n");
if (Phi.getNumIncomingValues() <= 1) {
- DEBUG(dbgs() << "skip: only one incoming value in phi\n");
+ LLVM_DEBUG(dbgs() << "skip: only one incoming value in phi\n");
return false;
}
// We are looking for something that has the following structure:
@@ -690,7 +693,7 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) {
if (isa<ConstantInt>(Phi.getIncomingValue(I))) continue;
if (LastBlock) {
// There are several non-constant values.
- DEBUG(dbgs() << "skip: several non-constant values\n");
+ LLVM_DEBUG(dbgs() << "skip: several non-constant values\n");
return false;
}
if (!isa<ICmpInst>(Phi.getIncomingValue(I)) ||
@@ -701,7 +704,7 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) {
// producing block more than once.
//
// This is an uncommon case, so we bail.
- DEBUG(
+ LLVM_DEBUG(
dbgs()
<< "skip: non-constant value not from cmp or not from last block.\n");
return false;
@@ -710,11 +713,11 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) {
}
if (!LastBlock) {
// There is no non-constant block.
- DEBUG(dbgs() << "skip: no non-constant block\n");
+ LLVM_DEBUG(dbgs() << "skip: no non-constant block\n");
return false;
}
if (LastBlock->getSingleSuccessor() != Phi.getParent()) {
- DEBUG(dbgs() << "skip: last block non-phi successor\n");
+ LLVM_DEBUG(dbgs() << "skip: last block non-phi successor\n");
return false;
}
@@ -724,7 +727,7 @@ bool processPhi(PHINode &Phi, const TargetLibraryInfo *const TLI) {
BCECmpChain CmpChain(Blocks, Phi);
if (CmpChain.size() < 2) {
- DEBUG(dbgs() << "skip: only one compare block\n");
+ LLVM_DEBUG(dbgs() << "skip: only one compare block\n");
return false;
}
@@ -759,7 +762,7 @@ class MergeICmps : public FunctionPass {
PreservedAnalyses MergeICmps::runImpl(Function &F, const TargetLibraryInfo *TLI,
const TargetTransformInfo *TTI) {
- DEBUG(dbgs() << "MergeICmpsPass: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "MergeICmpsPass: " << F.getName() << "\n");
// We only try merging comparisons if the target wants to expand memcmp later.
// The rationale is to avoid turning small chains into memcmp calls.
diff --git a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index 07d451447580..72ab175deca7 100644
--- a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -180,7 +180,7 @@ bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start,
///
StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1,
StoreInst *Store0) {
- DEBUG(dbgs() << "can Sink? : "; Store0->dump(); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "can Sink? : "; Store0->dump(); dbgs() << "\n");
BasicBlock *BB0 = Store0->getParent();
for (Instruction &Inst : reverse(*BB1)) {
auto *Store1 = dyn_cast<StoreInst>(&Inst);
@@ -229,9 +229,9 @@ bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0,
if (A0 && A1 && A0->isIdenticalTo(A1) && A0->hasOneUse() &&
(A0->getParent() == S0->getParent()) && A1->hasOneUse() &&
(A1->getParent() == S1->getParent()) && isa<GetElementPtrInst>(A0)) {
- DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump();
- dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n";
- dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump();
+ dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n";
+ dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n");
// Hoist the instruction.
BasicBlock::iterator InsertPt = BB->getFirstInsertionPt();
// Intersect optional metadata.
@@ -313,7 +313,7 @@ bool MergedLoadStoreMotion::mergeStores(BasicBlock *T) {
break;
RBI = Pred0->rbegin();
RBE = Pred0->rend();
- DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump());
+ LLVM_DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump());
}
}
return MergedStores;
@@ -323,7 +323,7 @@ bool MergedLoadStoreMotion::run(Function &F, AliasAnalysis &AA) {
this->AA = &AA;
bool Changed = false;
- DEBUG(dbgs() << "Instruction Merger\n");
+ LLVM_DEBUG(dbgs() << "Instruction Merger\n");
// Merge unconditional branches, allowing PRE to catch more
// optimization opportunities.
diff --git a/lib/Transforms/Scalar/NewGVN.cpp b/lib/Transforms/Scalar/NewGVN.cpp
index 369f59ec3c39..0cf9979b40aa 100644
--- a/lib/Transforms/Scalar/NewGVN.cpp
+++ b/lib/Transforms/Scalar/NewGVN.cpp
@@ -221,13 +221,13 @@ private:
Components.resize(Components.size() + 1);
auto &Component = Components.back();
Component.insert(I);
- DEBUG(dbgs() << "Component root is " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n");
InComponent.insert(I);
ValueToComponent[I] = ComponentID;
// Pop a component off the stack and label it.
while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) {
auto *Member = Stack.back();
- DEBUG(dbgs() << "Component member is " << *Member << "\n");
+ LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n");
Component.insert(Member);
InComponent.insert(Member);
ValueToComponent[Member] = ComponentID;
@@ -1068,8 +1068,8 @@ const Expression *NewGVN::checkSimplificationResults(Expression *E,
return nullptr;
if (auto *C = dyn_cast<Constant>(V)) {
if (I)
- DEBUG(dbgs() << "Simplified " << *I << " to "
- << " constant " << *C << "\n");
+ LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
+ << " constant " << *C << "\n");
NumGVNOpsSimplified++;
assert(isa<BasicExpression>(E) &&
"We should always have had a basic expression here");
@@ -1077,8 +1077,8 @@ const Expression *NewGVN::checkSimplificationResults(Expression *E,
return createConstantExpression(C);
} else if (isa<Argument>(V) || isa<GlobalVariable>(V)) {
if (I)
- DEBUG(dbgs() << "Simplified " << *I << " to "
- << " variable " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
+ << " variable " << *V << "\n");
deleteExpression(E);
return createVariableExpression(V);
}
@@ -1101,8 +1101,8 @@ const Expression *NewGVN::checkSimplificationResults(Expression *E,
}
if (I)
- DEBUG(dbgs() << "Simplified " << *I << " to "
- << " expression " << *CC->getDefiningExpr() << "\n");
+ LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
+ << " expression " << *CC->getDefiningExpr() << "\n");
NumGVNOpsSimplified++;
deleteExpression(E);
return CC->getDefiningExpr();
@@ -1422,8 +1422,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
if (Offset >= 0) {
if (auto *C = dyn_cast<Constant>(
lookupOperandLeader(DepSI->getValueOperand()))) {
- DEBUG(dbgs() << "Coercing load from store " << *DepSI << " to constant "
- << *C << "\n");
+ LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI
+ << " to constant " << *C << "\n");
return createConstantExpression(
getConstantStoreValueForLoad(C, Offset, LoadType, DL));
}
@@ -1438,8 +1438,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI)))
if (auto *PossibleConstant =
getConstantLoadValueForLoad(C, Offset, LoadType, DL)) {
- DEBUG(dbgs() << "Coercing load from load " << *LI << " to constant "
- << *PossibleConstant << "\n");
+ LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI
+ << " to constant " << *PossibleConstant << "\n");
return createConstantExpression(PossibleConstant);
}
}
@@ -1448,8 +1448,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
if (Offset >= 0) {
if (auto *PossibleConstant =
getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) {
- DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
- << " to constant " << *PossibleConstant << "\n");
+ LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
+ << " to constant " << *PossibleConstant << "\n");
return createConstantExpression(PossibleConstant);
}
}
@@ -1530,7 +1530,7 @@ NewGVN::performSymbolicPredicateInfoEvaluation(Instruction *I) const {
if (!PI)
return nullptr;
- DEBUG(dbgs() << "Found predicate info from instruction !\n");
+ LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n");
auto *PWC = dyn_cast<PredicateWithCondition>(PI);
if (!PWC)
@@ -1570,7 +1570,7 @@ NewGVN::performSymbolicPredicateInfoEvaluation(Instruction *I) const {
return nullptr;
if (CopyOf != Cmp->getOperand(0) && CopyOf != Cmp->getOperand(1)) {
- DEBUG(dbgs() << "Copy is not of any condition operands!\n");
+ LLVM_DEBUG(dbgs() << "Copy is not of any condition operands!\n");
return nullptr;
}
Value *FirstOp = lookupOperandLeader(Cmp->getOperand(0));
@@ -1653,10 +1653,11 @@ bool NewGVN::setMemoryClass(const MemoryAccess *From,
CongruenceClass *NewClass) {
assert(NewClass &&
"Every MemoryAccess should be getting mapped to a non-null class");
- DEBUG(dbgs() << "Setting " << *From);
- DEBUG(dbgs() << " equivalent to congruence class ");
- DEBUG(dbgs() << NewClass->getID() << " with current MemoryAccess leader ");
- DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n");
+ LLVM_DEBUG(dbgs() << "Setting " << *From);
+ LLVM_DEBUG(dbgs() << " equivalent to congruence class ");
+ LLVM_DEBUG(dbgs() << NewClass->getID()
+ << " with current MemoryAccess leader ");
+ LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n");
auto LookupResult = MemoryAccessToClass.find(From);
bool Changed = false;
@@ -1674,11 +1675,11 @@ bool NewGVN::setMemoryClass(const MemoryAccess *From,
OldClass->setMemoryLeader(nullptr);
} else {
OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
- DEBUG(dbgs() << "Memory class leader change for class "
- << OldClass->getID() << " to "
- << *OldClass->getMemoryLeader()
- << " due to removal of a memory member " << *From
- << "\n");
+ LLVM_DEBUG(dbgs() << "Memory class leader change for class "
+ << OldClass->getID() << " to "
+ << *OldClass->getMemoryLeader()
+ << " due to removal of a memory member " << *From
+ << "\n");
markMemoryLeaderChangeTouched(OldClass);
}
}
@@ -1754,12 +1755,13 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
// If it has undef at this point, it means there are no-non-undef arguments,
// and thus, the value of the phi node must be undef.
if (HasUndef) {
- DEBUG(dbgs() << "PHI Node " << *I
- << " has no non-undef arguments, valuing it as undef\n");
+ LLVM_DEBUG(
+ dbgs() << "PHI Node " << *I
+ << " has no non-undef arguments, valuing it as undef\n");
return createConstantExpression(UndefValue::get(I->getType()));
}
- DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n");
+ LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n");
deleteExpression(E);
return createDeadExpression();
}
@@ -1798,8 +1800,8 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
InstrToDFSNum(AllSameValue) > InstrToDFSNum(I))
return E;
NumGVNPhisAllSame++;
- DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue
- << "\n");
+ LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue
+ << "\n");
deleteExpression(E);
return createVariableOrConstant(AllSameValue);
}
@@ -2092,7 +2094,7 @@ void NewGVN::markUsersTouched(Value *V) {
}
void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const {
- DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n");
+ LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n");
MemoryToUsers[To].insert(U);
}
@@ -2228,8 +2230,9 @@ void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
(isa<StoreInst>(I) && NewClass->getStoreCount() == 1));
NewClass->setMemoryLeader(InstMA);
// Mark it touched if we didn't just create a singleton
- DEBUG(dbgs() << "Memory class leader change for class " << NewClass->getID()
- << " due to new memory instruction becoming leader\n");
+ LLVM_DEBUG(dbgs() << "Memory class leader change for class "
+ << NewClass->getID()
+ << " due to new memory instruction becoming leader\n");
markMemoryLeaderChangeTouched(NewClass);
}
setMemoryClass(InstMA, NewClass);
@@ -2237,10 +2240,10 @@ void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
if (OldClass->getMemoryLeader() == InstMA) {
if (!OldClass->definesNoMemory()) {
OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
- DEBUG(dbgs() << "Memory class leader change for class "
- << OldClass->getID() << " to "
- << *OldClass->getMemoryLeader()
- << " due to removal of old leader " << *InstMA << "\n");
+ LLVM_DEBUG(dbgs() << "Memory class leader change for class "
+ << OldClass->getID() << " to "
+ << *OldClass->getMemoryLeader()
+ << " due to removal of old leader " << *InstMA << "\n");
markMemoryLeaderChangeTouched(OldClass);
} else
OldClass->setMemoryLeader(nullptr);
@@ -2277,9 +2280,10 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
NewClass->setStoredValue(SE->getStoredValue());
markValueLeaderChangeTouched(NewClass);
// Shift the new class leader to be the store
- DEBUG(dbgs() << "Changing leader of congruence class "
- << NewClass->getID() << " from " << *NewClass->getLeader()
- << " to " << *SI << " because store joined class\n");
+ LLVM_DEBUG(dbgs() << "Changing leader of congruence class "
+ << NewClass->getID() << " from "
+ << *NewClass->getLeader() << " to " << *SI
+ << " because store joined class\n");
// If we changed the leader, we have to mark it changed because we don't
// know what it will do to symbolic evaluation.
NewClass->setLeader(SI);
@@ -2299,8 +2303,8 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
// See if we destroyed the class or need to swap leaders.
if (OldClass->empty() && OldClass != TOPClass) {
if (OldClass->getDefiningExpr()) {
- DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr()
- << " from table\n");
+ LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr()
+ << " from table\n");
// We erase it as an exact expression to make sure we don't just erase an
// equivalent one.
auto Iter = ExpressionToClass.find_as(
@@ -2317,8 +2321,8 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
// When the leader changes, the value numbering of
// everything may change due to symbolization changes, so we need to
// reprocess.
- DEBUG(dbgs() << "Value class leader change for class " << OldClass->getID()
- << "\n");
+ LLVM_DEBUG(dbgs() << "Value class leader change for class "
+ << OldClass->getID() << "\n");
++NumGVNLeaderChanges;
// Destroy the stored value if there are no more stores to represent it.
// Note that this is basically clean up for the expression removal that
@@ -2381,12 +2385,14 @@ void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) {
"VariableExpression should have been handled already");
EClass = NewClass;
- DEBUG(dbgs() << "Created new congruence class for " << *I
- << " using expression " << *E << " at " << NewClass->getID()
- << " and leader " << *(NewClass->getLeader()));
+ LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I
+ << " using expression " << *E << " at "
+ << NewClass->getID() << " and leader "
+ << *(NewClass->getLeader()));
if (NewClass->getStoredValue())
- DEBUG(dbgs() << " and stored value " << *(NewClass->getStoredValue()));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " and stored value "
+ << *(NewClass->getStoredValue()));
+ LLVM_DEBUG(dbgs() << "\n");
} else {
EClass = lookupResult.first->second;
if (isa<ConstantExpression>(E))
@@ -2404,8 +2410,8 @@ void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) {
bool ClassChanged = IClass != EClass;
bool LeaderChanged = LeaderChanges.erase(I);
if (ClassChanged || LeaderChanged) {
- DEBUG(dbgs() << "New class " << EClass->getID() << " for expression " << *E
- << "\n");
+ LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression "
+ << *E << "\n");
if (ClassChanged) {
moveValueToNewCongruenceClass(I, E, IClass, EClass);
markPhiOfOpsChanged(E);
@@ -2443,13 +2449,15 @@ void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) {
if (ReachableEdges.insert({From, To}).second) {
// If this block wasn't reachable before, all instructions are touched.
if (ReachableBlocks.insert(To).second) {
- DEBUG(dbgs() << "Block " << getBlockName(To) << " marked reachable\n");
+ LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
+ << " marked reachable\n");
const auto &InstRange = BlockInstRange.lookup(To);
TouchedInstructions.set(InstRange.first, InstRange.second);
} else {
- DEBUG(dbgs() << "Block " << getBlockName(To)
- << " was reachable, but new edge {" << getBlockName(From)
- << "," << getBlockName(To) << "} to it found\n");
+ LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
+ << " was reachable, but new edge {"
+ << getBlockName(From) << "," << getBlockName(To)
+ << "} to it found\n");
// We've made an edge reachable to an existing block, which may
// impact predicates. Otherwise, only mark the phi nodes as touched, as
@@ -2496,12 +2504,12 @@ void NewGVN::processOutgoingEdges(TerminatorInst *TI, BasicBlock *B) {
BasicBlock *FalseSucc = BR->getSuccessor(1);
if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) {
if (CI->isOne()) {
- DEBUG(dbgs() << "Condition for Terminator " << *TI
- << " evaluated to true\n");
+ LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
+ << " evaluated to true\n");
updateReachableEdge(B, TrueSucc);
} else if (CI->isZero()) {
- DEBUG(dbgs() << "Condition for Terminator " << *TI
- << " evaluated to false\n");
+ LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
+ << " evaluated to false\n");
updateReachableEdge(B, FalseSucc);
}
} else {
@@ -2686,8 +2694,8 @@ Value *NewGVN::findLeaderForInst(Instruction *TransInst,
auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB);
if (!FoundVal) {
ExpressionToPhiOfOps[E].insert(OrigInst);
- DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst
- << " in block " << getBlockName(PredBB) << "\n");
+ LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst
+ << " in block " << getBlockName(PredBB) << "\n");
return nullptr;
}
if (auto *SI = dyn_cast<StoreInst>(FoundVal))
@@ -2736,15 +2744,16 @@ NewGVN::makePossiblePHIOfOps(Instruction *I,
auto *ValuePHI = RealToTemp.lookup(Op);
if (!ValuePHI)
continue;
- DEBUG(dbgs() << "Found possible dependent phi of ops\n");
+ LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n");
Op = ValuePHI;
}
OpPHI = cast<PHINode>(Op);
if (!SamePHIBlock) {
SamePHIBlock = getBlockForValue(OpPHI);
} else if (SamePHIBlock != getBlockForValue(OpPHI)) {
- DEBUG(dbgs()
- << "PHIs for operands are not all in the same block, aborting\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "PHIs for operands are not all in the same block, aborting\n");
return nullptr;
}
// No point in doing this for one-operand phis.
@@ -2812,25 +2821,26 @@ NewGVN::makePossiblePHIOfOps(Instruction *I,
}
Deps.insert(CurrentDeps.begin(), CurrentDeps.end());
} else {
- DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
- << getBlockName(PredBB)
- << " because the block is unreachable\n");
+ LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
+ << getBlockName(PredBB)
+ << " because the block is unreachable\n");
FoundVal = UndefValue::get(I->getType());
RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I));
}
PHIOps.push_back({FoundVal, PredBB});
- DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in "
- << getBlockName(PredBB) << "\n");
+ LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in "
+ << getBlockName(PredBB) << "\n");
}
for (auto Dep : Deps)
addAdditionalUsers(Dep, I);
sortPHIOps(PHIOps);
auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock);
if (isa<ConstantExpression>(E) || isa<VariableExpression>(E)) {
- DEBUG(dbgs()
- << "Not creating real PHI of ops because it simplified to existing "
- "value or constant\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Not creating real PHI of ops because it simplified to existing "
+ "value or constant\n");
return E;
}
auto *ValuePHI = RealToTemp.lookup(I);
@@ -2855,7 +2865,8 @@ NewGVN::makePossiblePHIOfOps(Instruction *I,
}
}
RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I));
- DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I
+ << "\n");
return E;
}
@@ -2927,8 +2938,9 @@ void NewGVN::initializeCongruenceClasses(Function &F) {
void NewGVN::cleanupTables() {
for (unsigned i = 0, e = CongruenceClasses.size(); i != e; ++i) {
- DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID()
- << " has " << CongruenceClasses[i]->size() << " members\n");
+ LLVM_DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID()
+ << " has " << CongruenceClasses[i]->size()
+ << " members\n");
// Make sure we delete the congruence class (probably worth switching to
// a unique_ptr at some point.
delete CongruenceClasses[i];
@@ -2998,7 +3010,7 @@ std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B,
// we change its DFS number so that it doesn't get value numbered.
if (isInstructionTriviallyDead(&I, TLI)) {
InstrDFS[&I] = 0;
- DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n");
+ LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n");
markInstructionForDeletion(&I);
continue;
}
@@ -3064,9 +3076,10 @@ void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) {
[&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; });
if (AllEqual)
- DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue << "\n");
+ LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue
+ << "\n");
else
- DEBUG(dbgs() << "Memory Phi value numbered to itself\n");
+ LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n");
// If it's equal to something, it's in that class. Otherwise, it has to be in
// a class where it is the leader (other things may be equivalent to it, but
// it needs to start off in its own class, which means it must have been the
@@ -3085,7 +3098,7 @@ void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) {
// Value number a single instruction, symbolically evaluating, performing
// congruence finding, and updating mappings.
void NewGVN::valueNumberInstruction(Instruction *I) {
- DEBUG(dbgs() << "Processing instruction " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n");
if (!I->isTerminator()) {
const Expression *Symbolized = nullptr;
SmallPtrSet<Value *, 2> Visited;
@@ -3271,7 +3284,7 @@ void NewGVN::verifyMemoryCongruency() const {
// and redoing the iteration to see if anything changed.
void NewGVN::verifyIterationSettled(Function &F) {
#ifndef NDEBUG
- DEBUG(dbgs() << "Beginning iteration verification\n");
+ LLVM_DEBUG(dbgs() << "Beginning iteration verification\n");
if (DebugCounter::isCounterSet(VNCounter))
DebugCounter::setCounterValue(VNCounter, StartingVNCounter);
@@ -3389,9 +3402,9 @@ void NewGVN::iterateTouchedInstructions() {
// If it's not reachable, erase any touched instructions and move on.
if (!BlockReachable) {
TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second);
- DEBUG(dbgs() << "Skipping instructions in block "
- << getBlockName(CurrBlock)
- << " because it is unreachable\n");
+ LLVM_DEBUG(dbgs() << "Skipping instructions in block "
+ << getBlockName(CurrBlock)
+ << " because it is unreachable\n");
continue;
}
updateProcessedCount(CurrBlock);
@@ -3401,7 +3414,7 @@ void NewGVN::iterateTouchedInstructions() {
TouchedInstructions.reset(InstrNum);
if (auto *MP = dyn_cast<MemoryPhi>(V)) {
- DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n");
+ LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n");
valueNumberMemoryPhi(MP);
} else if (auto *I = dyn_cast<Instruction>(V)) {
valueNumberInstruction(I);
@@ -3471,8 +3484,8 @@ bool NewGVN::runGVN() {
// Initialize the touched instructions to include the entry block.
const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock());
TouchedInstructions.set(InstRange.first, InstRange.second);
- DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock())
- << " marked reachable\n");
+ LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock())
+ << " marked reachable\n");
ReachableBlocks.insert(&F.getEntryBlock());
iterateTouchedInstructions();
@@ -3497,8 +3510,8 @@ bool NewGVN::runGVN() {
};
for (auto &BB : make_filter_range(F, UnreachableBlockPred)) {
- DEBUG(dbgs() << "We believe block " << getBlockName(&BB)
- << " is unreachable\n");
+ LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB)
+ << " is unreachable\n");
deleteInstructionsInBlock(&BB);
Changed = true;
}
@@ -3720,7 +3733,7 @@ static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
}
void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
- DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
+ LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
++NumGVNBlocksDeleted;
// Delete the instructions backwards, as it has a reduced likelihood of having
@@ -3747,12 +3760,12 @@ void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
}
void NewGVN::markInstructionForDeletion(Instruction *I) {
- DEBUG(dbgs() << "Marking " << *I << " for deletion\n");
+ LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n");
InstructionsToErase.insert(I);
}
void NewGVN::replaceInstruction(Instruction *I, Value *V) {
- DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n");
patchAndReplaceAllUsesWith(I, V);
// We save the actual erasing to avoid invalidating memory
// dependencies until we are done with everything.
@@ -3878,9 +3891,10 @@ bool NewGVN::eliminateInstructions(Function &F) {
auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) {
for (auto &Operand : PHI->incoming_values())
if (!ReachableEdges.count({PHI->getIncomingBlock(Operand), BB})) {
- DEBUG(dbgs() << "Replacing incoming value of " << PHI << " for block "
- << getBlockName(PHI->getIncomingBlock(Operand))
- << " with undef due to it being unreachable\n");
+ LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI
+ << " for block "
+ << getBlockName(PHI->getIncomingBlock(Operand))
+ << " with undef due to it being unreachable\n");
Operand.set(UndefValue::get(PHI->getType()));
}
};
@@ -3912,7 +3926,8 @@ bool NewGVN::eliminateInstructions(Function &F) {
// Map to store the use counts
DenseMap<const Value *, unsigned int> UseCounts;
for (auto *CC : reverse(CongruenceClasses)) {
- DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID() << "\n");
+ LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID()
+ << "\n");
// Track the equivalent store info so we can decide whether to try
// dead store elimination.
SmallVector<ValueDFS, 8> PossibleDeadStores;
@@ -3950,8 +3965,8 @@ bool NewGVN::eliminateInstructions(Function &F) {
MembersLeft.insert(Member);
continue;
}
- DEBUG(dbgs() << "Found replacement " << *(Leader) << " for " << *Member
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for "
+ << *Member << "\n");
auto *I = cast<Instruction>(Member);
assert(Leader != I && "About to accidentally remove our leader");
replaceInstruction(I, Leader);
@@ -3991,24 +4006,24 @@ bool NewGVN::eliminateInstructions(Function &F) {
// remove from temp instruction list.
AllTempInstructions.erase(PN);
auto *DefBlock = getBlockForValue(Def);
- DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def
- << " into block "
- << getBlockName(getBlockForValue(Def)) << "\n");
+ LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def
+ << " into block "
+ << getBlockName(getBlockForValue(Def)) << "\n");
PN->insertBefore(&DefBlock->front());
Def = PN;
NumGVNPHIOfOpsEliminations++;
}
if (EliminationStack.empty()) {
- DEBUG(dbgs() << "Elimination Stack is empty\n");
+ LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n");
} else {
- DEBUG(dbgs() << "Elimination Stack Top DFS numbers are ("
- << EliminationStack.dfs_back().first << ","
- << EliminationStack.dfs_back().second << ")\n");
+ LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are ("
+ << EliminationStack.dfs_back().first << ","
+ << EliminationStack.dfs_back().second << ")\n");
}
- DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << ","
- << MemberDFSOut << ")\n");
+ LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << ","
+ << MemberDFSOut << ")\n");
// First, we see if we are out of scope or empty. If so,
// and there equivalences, we try to replace the top of
// stack with equivalences (if it's on the stack, it must
@@ -4090,8 +4105,9 @@ bool NewGVN::eliminateInstructions(Function &F) {
// Don't replace our existing users with ourselves.
if (U->get() == DominatingLeader)
continue;
- DEBUG(dbgs() << "Found replacement " << *DominatingLeader << " for "
- << *U->get() << " in " << *(U->getUser()) << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Found replacement " << *DominatingLeader << " for "
+ << *U->get() << " in " << *(U->getUser()) << "\n");
// If we replaced something in an instruction, handle the patching of
// metadata. Skip this if we are replacing predicateinfo with its
@@ -4157,8 +4173,8 @@ bool NewGVN::eliminateInstructions(Function &F) {
(void)Leader;
assert(DT->dominates(Leader->getParent(), Member->getParent()));
// Member is dominater by Leader, and thus dead
- DEBUG(dbgs() << "Marking dead store " << *Member
- << " that is dominated by " << *Leader << "\n");
+ LLVM_DEBUG(dbgs() << "Marking dead store " << *Member
+ << " that is dominated by " << *Leader << "\n");
markInstructionForDeletion(Member);
CC->erase(Member);
++NumGVNDeadStores;
diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 6b1ae2c3cab9..a06c424f82b8 100644
--- a/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -323,7 +323,7 @@ bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
// avoiding the runtime cost of the actual safepoint.
if (!AllBackedges) {
if (mustBeFiniteCountedLoop(L, SE, Pred)) {
- DEBUG(dbgs() << "skipping safepoint placement in finite loop\n");
+ LLVM_DEBUG(dbgs() << "skipping safepoint placement in finite loop\n");
FiniteExecution++;
continue;
}
@@ -332,7 +332,9 @@ bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
// Note: This is only semantically legal since we won't do any further
// IPO or inlining before the actual call insertion.. If we hadn't, we
// might latter loose this call safepoint.
- DEBUG(dbgs() << "skipping safepoint placement due to unconditional call\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "skipping safepoint placement due to unconditional call\n");
CallInLoop++;
continue;
}
@@ -348,7 +350,7 @@ bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
// variables) and branches to the true header
TerminatorInst *Term = Pred->getTerminator();
- DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term);
+ LLVM_DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term);
PollLocations.push_back(Term);
}
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 0d1d57d64868..bb9802a66ea1 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -169,8 +169,8 @@ void ReassociatePass::BuildRankMap(Function &F,
// Assign distinct ranks to function arguments.
for (auto &Arg : F.args()) {
ValueRankMap[&Arg] = ++Rank;
- DEBUG(dbgs() << "Calculated Rank[" << Arg.getName() << "] = " << Rank
- << "\n");
+ LLVM_DEBUG(dbgs() << "Calculated Rank[" << Arg.getName() << "] = " << Rank
+ << "\n");
}
// Traverse basic blocks in ReversePostOrder
@@ -210,7 +210,8 @@ unsigned ReassociatePass::getRank(Value *V) {
!BinaryOperator::isFNeg(I))
++Rank;
- DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank << "\n");
+ LLVM_DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank
+ << "\n");
return ValueRankMap[I] = Rank;
}
@@ -445,7 +446,7 @@ using RepeatedValue = std::pair<Value*, APInt>;
/// type and thus make the expression bigger.
static bool LinearizeExprTree(BinaryOperator *I,
SmallVectorImpl<RepeatedValue> &Ops) {
- DEBUG(dbgs() << "LINEARIZE: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "LINEARIZE: " << *I << '\n');
unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits();
unsigned Opcode = I->getOpcode();
assert(I->isAssociative() && I->isCommutative() &&
@@ -494,14 +495,14 @@ static bool LinearizeExprTree(BinaryOperator *I,
for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx) { // Visit operands.
Value *Op = I->getOperand(OpIdx);
APInt Weight = P.second; // Number of paths to this operand.
- DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n");
+ LLVM_DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n");
assert(!Op->use_empty() && "No uses, so how did we get to it?!");
// If this is a binary operation of the right kind with only one use then
// add its operands to the expression.
if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
assert(Visited.insert(Op).second && "Not first visit!");
- DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n");
+ LLVM_DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n");
Worklist.push_back(std::make_pair(BO, Weight));
continue;
}
@@ -514,7 +515,8 @@ static bool LinearizeExprTree(BinaryOperator *I,
if (!Op->hasOneUse()) {
// This value has uses not accounted for by the expression, so it is
// not safe to modify. Mark it as being a leaf.
- DEBUG(dbgs() << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n");
+ LLVM_DEBUG(dbgs()
+ << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n");
LeafOrder.push_back(Op);
Leaves[Op] = Weight;
continue;
@@ -540,7 +542,7 @@ static bool LinearizeExprTree(BinaryOperator *I,
// to the expression, then no longer consider it to be a leaf and add
// its operands to the expression.
if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
- DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n");
+ LLVM_DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n");
Worklist.push_back(std::make_pair(BO, It->second));
Leaves.erase(It);
continue;
@@ -573,9 +575,10 @@ static bool LinearizeExprTree(BinaryOperator *I,
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op))
if ((Opcode == Instruction::Mul && BinaryOperator::isNeg(BO)) ||
(Opcode == Instruction::FMul && BinaryOperator::isFNeg(BO))) {
- DEBUG(dbgs() << "MORPH LEAF: " << *Op << " (" << Weight << ") TO ");
+ LLVM_DEBUG(dbgs()
+ << "MORPH LEAF: " << *Op << " (" << Weight << ") TO ");
BO = LowerNegateToMultiply(BO);
- DEBUG(dbgs() << *BO << '\n');
+ LLVM_DEBUG(dbgs() << *BO << '\n');
Worklist.push_back(std::make_pair(BO, Weight));
Changed = true;
continue;
@@ -583,7 +586,7 @@ static bool LinearizeExprTree(BinaryOperator *I,
// Failed to morph into an expression of the right type. This really is
// a leaf.
- DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n");
+ LLVM_DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n");
assert(!isReassociableOp(Op, Opcode) && "Value was morphed?");
LeafOrder.push_back(Op);
Leaves[Op] = Weight;
@@ -675,9 +678,9 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I,
if (NewLHS == OldRHS && NewRHS == OldLHS) {
// The order of the operands was reversed. Swap them.
- DEBUG(dbgs() << "RA: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
Op->swapOperands();
- DEBUG(dbgs() << "TO: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
MadeChange = true;
++NumChanged;
break;
@@ -685,7 +688,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I,
// The new operation differs non-trivially from the original. Overwrite
// the old operands with the new ones.
- DEBUG(dbgs() << "RA: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
if (NewLHS != OldLHS) {
BinaryOperator *BO = isReassociableOp(OldLHS, Opcode);
if (BO && !NotRewritable.count(BO))
@@ -698,7 +701,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I,
NodesToRewrite.push_back(BO);
Op->setOperand(1, NewRHS);
}
- DEBUG(dbgs() << "TO: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
ExpressionChanged = Op;
MadeChange = true;
@@ -711,7 +714,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I,
// while the right-hand side will be the current element of Ops.
Value *NewRHS = Ops[i].Op;
if (NewRHS != Op->getOperand(1)) {
- DEBUG(dbgs() << "RA: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
if (NewRHS == Op->getOperand(0)) {
// The new right-hand side was already present as the left operand. If
// we are lucky then swapping the operands will sort out both of them.
@@ -724,7 +727,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I,
Op->setOperand(1, NewRHS);
ExpressionChanged = Op;
}
- DEBUG(dbgs() << "TO: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
MadeChange = true;
++NumChanged;
}
@@ -756,9 +759,9 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I,
NewOp = NodesToRewrite.pop_back_val();
}
- DEBUG(dbgs() << "RA: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
Op->setOperand(0, NewOp);
- DEBUG(dbgs() << "TO: " << *Op << '\n');
+ LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
ExpressionChanged = Op;
MadeChange = true;
++NumChanged;
@@ -941,7 +944,7 @@ static BinaryOperator *BreakUpSubtract(Instruction *Sub,
Sub->replaceAllUsesWith(New);
New->setDebugLoc(Sub->getDebugLoc());
- DEBUG(dbgs() << "Negated: " << *New << '\n');
+ LLVM_DEBUG(dbgs() << "Negated: " << *New << '\n');
return New;
}
@@ -1427,7 +1430,8 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I,
++NumFound;
} while (i != Ops.size() && Ops[i].Op == TheOp);
- DEBUG(dbgs() << "\nFACTORING [" << NumFound << "]: " << *TheOp << '\n');
+ LLVM_DEBUG(dbgs() << "\nFACTORING [" << NumFound << "]: " << *TheOp
+ << '\n');
++NumFactor;
// Insert a new multiply.
@@ -1565,7 +1569,8 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I,
// If any factor occurred more than one time, we can pull it out.
if (MaxOcc > 1) {
- DEBUG(dbgs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << '\n');
+ LLVM_DEBUG(dbgs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal
+ << '\n');
++NumFactor;
// Create a new instruction that uses the MaxOccVal twice. If we don't do
@@ -1888,7 +1893,7 @@ void ReassociatePass::RecursivelyEraseDeadInsts(Instruction *I,
/// Zap the given instruction, adding interesting operands to the work list.
void ReassociatePass::EraseInst(Instruction *I) {
assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!");
- DEBUG(dbgs() << "Erasing dead inst: "; I->dump());
+ LLVM_DEBUG(dbgs() << "Erasing dead inst: "; I->dump());
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
// Erase the dead instruction.
@@ -2139,7 +2144,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) {
ValueEntry(getRank(E.first), E.first));
}
- DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n');
// Now that we have linearized the tree to a list and have gathered all of
// the operands and their ranks, sort the operands by their rank. Use a
@@ -2157,7 +2162,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) {
return;
// This expression tree simplified to something that isn't a tree,
// eliminate it.
- DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n');
+ LLVM_DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n');
I->replaceAllUsesWith(V);
if (Instruction *VI = dyn_cast<Instruction>(V))
if (I->getDebugLoc())
@@ -2188,7 +2193,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) {
}
}
- DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n');
if (Ops.size() == 1) {
if (Ops[0].Op == I)
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index bf851ef2e71b..342ba9bc1451 100644
--- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -616,8 +616,8 @@ static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) {
Value *&Cached = Cache[I];
if (!Cached) {
Cached = findBaseDefiningValue(I).BDV;
- DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
- << Cached->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
+ << Cached->getName() << "\n");
}
assert(Cache[I] != nullptr);
return Cached;
@@ -848,9 +848,9 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) {
}
#ifndef NDEBUG
- DEBUG(dbgs() << "States after initialization:\n");
+ LLVM_DEBUG(dbgs() << "States after initialization:\n");
for (auto Pair : States) {
- DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
+ LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
}
#endif
@@ -923,9 +923,9 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) {
}
#ifndef NDEBUG
- DEBUG(dbgs() << "States after meet iteration:\n");
+ LLVM_DEBUG(dbgs() << "States after meet iteration:\n");
for (auto Pair : States) {
- DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
+ LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
}
#endif
@@ -1124,10 +1124,11 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) {
assert(BDV && Base);
assert(!isKnownBaseResult(BDV) && "why did it get added?");
- DEBUG(dbgs() << "Updating base value cache"
- << " for: " << BDV->getName() << " from: "
- << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
- << " to: " << Base->getName() << "\n");
+ LLVM_DEBUG(
+ dbgs() << "Updating base value cache"
+ << " for: " << BDV->getName() << " from: "
+ << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
+ << " to: " << Base->getName() << "\n");
if (Cache.count(BDV)) {
assert(isKnownBaseResult(Base) &&
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 5a6697fd9fef..27c51f07f1f7 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -259,7 +259,7 @@ public:
bool MarkBlockExecutable(BasicBlock *BB) {
if (!BBExecutable.insert(BB).second)
return false;
- DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
BBWorkList.push_back(BB); // Add the block to the work list!
return true;
}
@@ -415,7 +415,7 @@ private:
// the users of the instruction are updated later.
void markConstant(LatticeVal &IV, Value *V, Constant *C) {
if (!IV.markConstant(C)) return;
- DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
+ LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
pushToWorkList(IV, V);
}
@@ -428,7 +428,7 @@ private:
assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
LatticeVal &IV = ValueState[V];
IV.markForcedConstant(C);
- DEBUG(dbgs() << "markForcedConstant: " << *C << ": " << *V << '\n');
+ LLVM_DEBUG(dbgs() << "markForcedConstant: " << *C << ": " << *V << '\n');
pushToWorkList(IV, V);
}
@@ -438,11 +438,10 @@ private:
void markOverdefined(LatticeVal &IV, Value *V) {
if (!IV.markOverdefined()) return;
- DEBUG(dbgs() << "markOverdefined: ";
- if (auto *F = dyn_cast<Function>(V))
- dbgs() << "Function '" << F->getName() << "'\n";
- else
- dbgs() << *V << '\n');
+ LLVM_DEBUG(dbgs() << "markOverdefined: ";
+ if (auto *F = dyn_cast<Function>(V)) dbgs()
+ << "Function '" << F->getName() << "'\n";
+ else dbgs() << *V << '\n');
// Only instructions go on the work list
pushToWorkList(IV, V);
}
@@ -540,8 +539,8 @@ private:
// If the destination is already executable, we just made an *edge*
// feasible that wasn't before. Revisit the PHI nodes in the block
// because they have potentially new operands.
- DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
- << " -> " << Dest->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
+ << " -> " << Dest->getName() << '\n');
for (PHINode &PN : Dest->phis())
visitPHINode(PN);
@@ -612,7 +611,7 @@ private:
void visitInstruction(Instruction &I) {
// All the instructions we don't do any special handling for just
// go to overdefined.
- DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
+ LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
markOverdefined(&I);
}
};
@@ -699,7 +698,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
return;
}
- DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
+ LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
llvm_unreachable("SCCP: Don't know how to handle this terminator!");
}
@@ -759,7 +758,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
return Addr->getBasicBlock() == To;
}
- DEBUG(dbgs() << "Unknown terminator instruction: " << *TI << '\n');
+ LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << *TI << '\n');
llvm_unreachable("SCCP: Don't know how to handle this terminator!");
}
@@ -1260,7 +1259,7 @@ void SCCPSolver::Solve() {
while (!OverdefinedInstWorkList.empty()) {
Value *I = OverdefinedInstWorkList.pop_back_val();
- DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n');
// "I" got into the work list because it either made the transition from
// bottom to constant, or to overdefined.
@@ -1278,7 +1277,7 @@ void SCCPSolver::Solve() {
while (!InstWorkList.empty()) {
Value *I = InstWorkList.pop_back_val();
- DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
+ LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
// "I" got into the work list because it made the transition from undef to
// constant.
@@ -1298,7 +1297,7 @@ void SCCPSolver::Solve() {
BasicBlock *BB = BBWorkList.back();
BBWorkList.pop_back();
- DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
+ LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
// Notify all instructions in this basic block that they are newly
// executable.
@@ -1645,9 +1644,9 @@ static bool tryToReplaceWithConstantRange(SCCPSolver &Solver, Value *V) {
Constant *C = A.getCompare(Icmp->getPredicate(), Icmp->getType(), B);
if (C) {
Icmp->replaceAllUsesWith(C);
- DEBUG(dbgs() << "Replacing " << *Icmp << " with " << *C
- << ", because of range information " << A << " " << B
- << "\n");
+ LLVM_DEBUG(dbgs() << "Replacing " << *Icmp << " with " << *C
+ << ", because of range information " << A << " " << B
+ << "\n");
Icmp->eraseFromParent();
Changed = true;
}
@@ -1699,12 +1698,12 @@ static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) {
if (F)
Solver.AddMustTailCallee(F);
- DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI
- << " as a constant\n");
+ LLVM_DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI
+ << " as a constant\n");
return false;
}
- DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
+ LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
// Replaces all of the uses of a variable with uses of the constant.
V->replaceAllUsesWith(Const);
@@ -1715,7 +1714,7 @@ static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) {
// and return true if the function was modified.
static bool runSCCP(Function &F, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
- DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
+ LLVM_DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
SCCPSolver Solver(DL, TLI);
// Mark the first block of the function as being executable.
@@ -1729,7 +1728,7 @@ static bool runSCCP(Function &F, const DataLayout &DL,
bool ResolvedUndefs = true;
while (ResolvedUndefs) {
Solver.Solve();
- DEBUG(dbgs() << "RESOLVING UNDEFs\n");
+ LLVM_DEBUG(dbgs() << "RESOLVING UNDEFs\n");
ResolvedUndefs = Solver.ResolvedUndefsIn(F);
}
@@ -1741,7 +1740,7 @@ static bool runSCCP(Function &F, const DataLayout &DL,
for (BasicBlock &BB : F) {
if (!Solver.isBlockExecutable(&BB)) {
- DEBUG(dbgs() << " BasicBlock Dead:" << BB);
+ LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << BB);
++NumDeadBlocks;
NumInstRemoved += removeAllNonTerminatorAndEHPadInstructions(&BB);
@@ -1837,15 +1836,15 @@ static void findReturnsToZap(Function &F,
// There is a non-removable musttail call site of this function. Zapping
// returns is not allowed.
if (Solver.isMustTailCallee(&F)) {
- DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName()
- << " due to present musttail call of it\n");
+ LLVM_DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName()
+ << " due to present musttail call of it\n");
return;
}
for (BasicBlock &BB : F) {
if (CallInst *CI = BB.getTerminatingMustTailCall()) {
- DEBUG(dbgs() << "Can't zap return of the block due to present "
- << "musttail call : " << *CI << "\n");
+ LLVM_DEBUG(dbgs() << "Can't zap return of the block due to present "
+ << "musttail call : " << *CI << "\n");
(void)CI;
return;
}
@@ -1900,7 +1899,7 @@ bool llvm::runIPSCCP(Module &M, const DataLayout &DL,
while (ResolvedUndefs) {
Solver.Solve();
- DEBUG(dbgs() << "RESOLVING UNDEFS\n");
+ LLVM_DEBUG(dbgs() << "RESOLVING UNDEFS\n");
ResolvedUndefs = false;
for (Function &F : M)
ResolvedUndefs |= Solver.ResolvedUndefsIn(F);
@@ -1930,7 +1929,7 @@ bool llvm::runIPSCCP(Module &M, const DataLayout &DL,
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!Solver.isBlockExecutable(&*BB)) {
- DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
+ LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
++NumDeadBlocks;
NumInstRemoved +=
@@ -2028,7 +2027,8 @@ bool llvm::runIPSCCP(Module &M, const DataLayout &DL,
GlobalVariable *GV = I->first;
assert(!I->second.isOverdefined() &&
"Overdefined values should have been taken out of the map!");
- DEBUG(dbgs() << "Found that GV '" << GV->getName() << "' is constant!\n");
+ LLVM_DEBUG(dbgs() << "Found that GV '" << GV->getName()
+ << "' is constant!\n");
while (!GV->use_empty()) {
StoreInst *SI = cast<StoreInst>(GV->user_back());
SI->eraseFromParent();
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index b4200da99cc6..2db08b729725 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -683,11 +683,12 @@ private:
// Completely skip uses which have a zero size or start either before or
// past the end of the allocation.
if (Size == 0 || Offset.uge(AllocSize)) {
- DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
- << " which has zero size or starts outside of the "
- << AllocSize << " byte alloca:\n"
- << " alloca: " << AS.AI << "\n"
- << " use: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @"
+ << Offset
+ << " which has zero size or starts outside of the "
+ << AllocSize << " byte alloca:\n"
+ << " alloca: " << AS.AI << "\n"
+ << " use: " << I << "\n");
return markAsDead(I);
}
@@ -702,10 +703,11 @@ private:
// them, and so have to record at least the information here.
assert(AllocSize >= BeginOffset); // Established above.
if (Size > AllocSize - BeginOffset) {
- DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
- << " to remain within the " << AllocSize << " byte alloca:\n"
- << " alloca: " << AS.AI << "\n"
- << " use: " << I << "\n");
+ LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @"
+ << Offset << " to remain within the " << AllocSize
+ << " byte alloca:\n"
+ << " alloca: " << AS.AI << "\n"
+ << " use: " << I << "\n");
EndOffset = AllocSize;
}
@@ -805,11 +807,11 @@ private:
// FIXME: We should instead consider the pointer to have escaped if this
// function is being instrumented for addressing bugs or race conditions.
if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
- DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
- << " which extends past the end of the " << AllocSize
- << " byte alloca:\n"
- << " alloca: " << AS.AI << "\n"
- << " use: " << SI << "\n");
+ LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @"
+ << Offset << " which extends past the end of the "
+ << AllocSize << " byte alloca:\n"
+ << " alloca: " << AS.AI << "\n"
+ << " use: " << SI << "\n");
return markAsDead(SI);
}
@@ -1236,7 +1238,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
}
static void speculatePHINodeLoads(PHINode &PN) {
- DEBUG(dbgs() << " original: " << PN << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
IRBuilderTy PHIBuilder(&PN);
@@ -1274,7 +1276,7 @@ static void speculatePHINodeLoads(PHINode &PN) {
NewPN->addIncoming(Load, Pred);
}
- DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
+ LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
PN.eraseFromParent();
}
@@ -1314,7 +1316,7 @@ static bool isSafeSelectToSpeculate(SelectInst &SI) {
}
static void speculateSelectInstLoads(SelectInst &SI) {
- DEBUG(dbgs() << " original: " << SI << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
IRBuilderTy IRB(&SI);
Value *TV = SI.getTrueValue();
@@ -1345,7 +1347,7 @@ static void speculateSelectInstLoads(SelectInst &SI) {
Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
LI->getName() + ".sroa.speculated");
- DEBUG(dbgs() << " speculated to: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n");
LI->replaceAllUsesWith(V);
LI->eraseFromParent();
}
@@ -2068,7 +2070,7 @@ static bool isIntegerWideningViable(Partition &P, Type *AllocaTy,
static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
IntegerType *Ty, uint64_t Offset,
const Twine &Name) {
- DEBUG(dbgs() << " start: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
IntegerType *IntTy = cast<IntegerType>(V->getType());
assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
"Element extends past full value");
@@ -2077,13 +2079,13 @@ static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
if (ShAmt) {
V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
- DEBUG(dbgs() << " shifted: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
}
assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
"Cannot extract to a larger integer!");
if (Ty != IntTy) {
V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
- DEBUG(dbgs() << " trunced: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n");
}
return V;
}
@@ -2094,10 +2096,10 @@ static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
IntegerType *Ty = cast<IntegerType>(V->getType());
assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
"Cannot insert a larger integer!");
- DEBUG(dbgs() << " start: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
if (Ty != IntTy) {
V = IRB.CreateZExt(V, IntTy, Name + ".ext");
- DEBUG(dbgs() << " extended: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " extended: " << *V << "\n");
}
assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
"Element store outside of alloca store");
@@ -2106,15 +2108,15 @@ static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
if (ShAmt) {
V = IRB.CreateShl(V, ShAmt, Name + ".shift");
- DEBUG(dbgs() << " shifted: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
}
if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
- DEBUG(dbgs() << " masked: " << *Old << "\n");
+ LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n");
V = IRB.CreateOr(Old, V, Name + ".insert");
- DEBUG(dbgs() << " inserted: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n");
}
return V;
}
@@ -2131,7 +2133,7 @@ static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
if (NumElements == 1) {
V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
Name + ".extract");
- DEBUG(dbgs() << " extract: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " extract: " << *V << "\n");
return V;
}
@@ -2141,7 +2143,7 @@ static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
Mask.push_back(IRB.getInt32(i));
V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(Mask), Name + ".extract");
- DEBUG(dbgs() << " shuffle: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
return V;
}
@@ -2155,7 +2157,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
// Single element to insert.
V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
Name + ".insert");
- DEBUG(dbgs() << " insert: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " insert: " << *V << "\n");
return V;
}
@@ -2180,7 +2182,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(Mask), Name + ".expand");
- DEBUG(dbgs() << " shuffle: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
Mask.clear();
for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
@@ -2188,7 +2190,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
- DEBUG(dbgs() << " blend: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " blend: " << *V << "\n");
return V;
}
@@ -2291,9 +2293,9 @@ public:
IsSplittable = I->isSplittable();
IsSplit =
BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
- DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : ""));
- DEBUG(AS.printSlice(dbgs(), I, ""));
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : ""));
+ LLVM_DEBUG(AS.printSlice(dbgs(), I, ""));
+ LLVM_DEBUG(dbgs() << "\n");
// Compute the intersecting offset range.
assert(BeginOffset < NewAllocaEndOffset);
@@ -2323,7 +2325,7 @@ private:
// Every instruction which can end up as a user must have a rewrite rule.
bool visitInstruction(Instruction &I) {
- DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
+ LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
llvm_unreachable("No rewrite rule for this instruction!");
}
@@ -2427,7 +2429,7 @@ private:
}
bool visitLoadInst(LoadInst &LI) {
- DEBUG(dbgs() << " original: " << LI << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
@@ -2527,7 +2529,7 @@ private:
Pass.DeadInsts.insert(&LI);
deleteIfTriviallyDead(OldOp);
- DEBUG(dbgs() << " to: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *V << "\n");
return !LI.isVolatile() && !IsPtrAdjusted;
}
@@ -2554,7 +2556,7 @@ private:
Store->setAAMetadata(AATags);
Pass.DeadInsts.insert(&SI);
- DEBUG(dbgs() << " to: " << *Store << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
}
@@ -2575,12 +2577,12 @@ private:
if (AATags)
Store->setAAMetadata(AATags);
Pass.DeadInsts.insert(&SI);
- DEBUG(dbgs() << " to: " << *Store << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
}
bool visitStoreInst(StoreInst &SI) {
- DEBUG(dbgs() << " original: " << SI << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
Value *OldOp = SI.getOperand(1);
assert(OldOp == OldPtr);
@@ -2648,7 +2650,7 @@ private:
Pass.DeadInsts.insert(&SI);
deleteIfTriviallyDead(OldOp);
- DEBUG(dbgs() << " to: " << *NewSI << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n");
return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
}
@@ -2682,12 +2684,12 @@ private:
/// Compute a vector splat for a given element value.
Value *getVectorSplat(Value *V, unsigned NumElements) {
V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
- DEBUG(dbgs() << " splat: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << " splat: " << *V << "\n");
return V;
}
bool visitMemSetInst(MemSetInst &II) {
- DEBUG(dbgs() << " original: " << II << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getRawDest() == OldPtr);
AAMDNodes AATags;
@@ -2726,7 +2728,7 @@ private:
getSliceAlign(), II.isVolatile());
if (AATags)
New->setAAMetadata(AATags);
- DEBUG(dbgs() << " to: " << *New << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
@@ -2792,7 +2794,7 @@ private:
II.isVolatile());
if (AATags)
New->setAAMetadata(AATags);
- DEBUG(dbgs() << " to: " << *New << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return !II.isVolatile();
}
@@ -2800,7 +2802,7 @@ private:
// Rewriting of memory transfer instructions can be a bit tricky. We break
// them into two categories: split intrinsics and unsplit intrinsics.
- DEBUG(dbgs() << " original: " << II << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << II << "\n");
AAMDNodes AATags;
II.getAAMetadata(AATags);
@@ -2829,7 +2831,7 @@ private:
II.setSourceAlignment(SliceAlign);
}
- DEBUG(dbgs() << " to: " << II << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << II << "\n");
deleteIfTriviallyDead(OldPtr);
return false;
}
@@ -2912,7 +2914,7 @@ private:
Size, II.isVolatile());
if (AATags)
New->setAAMetadata(AATags);
- DEBUG(dbgs() << " to: " << *New << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
@@ -2984,14 +2986,14 @@ private:
IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
if (AATags)
Store->setAAMetadata(AATags);
- DEBUG(dbgs() << " to: " << *Store << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
}
bool visitIntrinsicInst(IntrinsicInst &II) {
assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end);
- DEBUG(dbgs() << " original: " << II << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getArgOperand(1) == OldPtr);
// Record this instruction for deletion.
@@ -3019,13 +3021,13 @@ private:
New = IRB.CreateLifetimeEnd(Ptr, Size);
(void)New;
- DEBUG(dbgs() << " to: " << *New << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
return true;
}
bool visitPHINode(PHINode &PN) {
- DEBUG(dbgs() << " original: " << PN << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
@@ -3044,7 +3046,7 @@ private:
// Replace the operands which were using the old pointer.
std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
- DEBUG(dbgs() << " to: " << PN << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << PN << "\n");
deleteIfTriviallyDead(OldPtr);
// PHIs can't be promoted on their own, but often can be speculated. We
@@ -3055,7 +3057,7 @@ private:
}
bool visitSelectInst(SelectInst &SI) {
- DEBUG(dbgs() << " original: " << SI << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
"Pointer isn't an operand!");
assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
@@ -3068,7 +3070,7 @@ private:
if (SI.getOperand(2) == OldPtr)
SI.setOperand(2, NewPtr);
- DEBUG(dbgs() << " to: " << SI << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << SI << "\n");
deleteIfTriviallyDead(OldPtr);
// Selects can't be promoted on their own, but often can be speculated. We
@@ -3104,7 +3106,7 @@ public:
/// Rewrite loads and stores through a pointer and all pointers derived from
/// it.
bool rewrite(Instruction &I) {
- DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
+ LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
enqueueUsers(I);
bool Changed = false;
while (!Queue.empty()) {
@@ -3218,7 +3220,7 @@ private:
if (AATags)
Load->setAAMetadata(AATags);
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
- DEBUG(dbgs() << " to: " << *Load << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *Load << "\n");
}
};
@@ -3228,7 +3230,7 @@ private:
return false;
// We have an aggregate being loaded, split it apart.
- DEBUG(dbgs() << " original: " << LI << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
AAMDNodes AATags;
LI.getAAMetadata(AATags);
LoadOpSplitter Splitter(&LI, *U, AATags);
@@ -3259,7 +3261,7 @@ private:
StoreInst *Store = IRB.CreateStore(ExtractValue, InBoundsGEP);
if (AATags)
Store->setAAMetadata(AATags);
- DEBUG(dbgs() << " to: " << *Store << "\n");
+ LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
}
};
@@ -3271,7 +3273,7 @@ private:
return false;
// We have an aggregate being stored, split it apart.
- DEBUG(dbgs() << " original: " << SI << "\n");
+ LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
AAMDNodes AATags;
SI.getAAMetadata(AATags);
StoreOpSplitter Splitter(&SI, *U, AATags);
@@ -3470,7 +3472,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
///
/// \returns true if any changes are made.
bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
- DEBUG(dbgs() << "Pre-splitting loads and stores\n");
+ LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n");
// Track the loads and stores which are candidates for pre-splitting here, in
// the order they first appear during the partition scan. These give stable
@@ -3502,7 +3504,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
// maybe it would make it more principled?
SmallPtrSet<LoadInst *, 8> UnsplittableLoads;
- DEBUG(dbgs() << " Searching for candidate loads and stores\n");
+ LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n");
for (auto &P : AS.partitions()) {
for (Slice &S : P) {
Instruction *I = cast<Instruction>(S.getUse()->getUser());
@@ -3557,7 +3559,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
}
// Record the initial split.
- DEBUG(dbgs() << " Candidate: " << *I << "\n");
+ LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n");
auto &Offsets = SplitOffsetsMap[I];
assert(Offsets.Splits.empty() &&
"Should not have splits the first time we see an instruction!");
@@ -3617,10 +3619,11 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
if (LoadOffsets.Splits == StoreOffsets.Splits)
return false;
- DEBUG(dbgs()
- << " Mismatched splits for load and store:\n"
- << " " << *LI << "\n"
- << " " << *SI << "\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Mismatched splits for load and store:\n"
+ << " " << *LI << "\n"
+ << " " << *SI << "\n");
// We've found a store and load that we need to split
// with mismatched relative splits. Just give up on them
@@ -3693,7 +3696,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand());
IRB.SetInsertPoint(LI);
- DEBUG(dbgs() << " Splitting load: " << *LI << "\n");
+ LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n");
uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
int Idx = 0, Size = Offsets.Splits.size();
@@ -3718,9 +3721,9 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
&PLoad->getOperandUse(PLoad->getPointerOperandIndex()),
/*IsSplittable*/ false));
- DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
- << ", " << NewSlices.back().endOffset() << "): " << *PLoad
- << "\n");
+ LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
+ << ", " << NewSlices.back().endOffset()
+ << "): " << *PLoad << "\n");
// See if we've handled all the splits.
if (Idx >= Size)
@@ -3740,14 +3743,15 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
StoreInst *SI = cast<StoreInst>(LU);
if (!Stores.empty() && SplitOffsetsMap.count(SI)) {
DeferredStores = true;
- DEBUG(dbgs() << " Deferred splitting of store: " << *SI << "\n");
+ LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI
+ << "\n");
continue;
}
Value *StoreBasePtr = SI->getPointerOperand();
IRB.SetInsertPoint(SI);
- DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n");
+ LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n");
for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) {
LoadInst *PLoad = SplitLoads[Idx];
@@ -3763,7 +3767,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
PartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
- DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n");
+ LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n");
}
// We want to immediately iterate on any allocas impacted by splitting
@@ -3812,7 +3816,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
Value *LoadBasePtr = LI->getPointerOperand();
Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand());
- DEBUG(dbgs() << " Splitting store: " << *SI << "\n");
+ LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n");
// Check whether we have an already split load.
auto SplitLoadsMapI = SplitLoadsMap.find(LI);
@@ -3822,7 +3826,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
assert(SplitLoads->size() == Offsets.Splits.size() + 1 &&
"Too few split loads for the number of splits in the store!");
} else {
- DEBUG(dbgs() << " of load: " << *LI << "\n");
+ LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n");
}
uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
@@ -3862,11 +3866,11 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
&PStore->getOperandUse(PStore->getPointerOperandIndex()),
/*IsSplittable*/ false));
- DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
- << ", " << NewSlices.back().endOffset() << "): " << *PStore
- << "\n");
+ LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
+ << ", " << NewSlices.back().endOffset()
+ << "): " << *PStore << "\n");
if (!SplitLoads) {
- DEBUG(dbgs() << " of split load: " << *PLoad << "\n");
+ LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n");
}
// See if we've finished all the splits.
@@ -3921,10 +3925,10 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
// sequence.
AS.insert(NewSlices);
- DEBUG(dbgs() << " Pre-split slices:\n");
+ LLVM_DEBUG(dbgs() << " Pre-split slices:\n");
#ifndef NDEBUG
for (auto I = AS.begin(), E = AS.end(); I != E; ++I)
- DEBUG(AS.print(dbgs(), I, " "));
+ LLVM_DEBUG(AS.print(dbgs(), I, " "));
#endif
// Finally, don't try to promote any allocas that new require re-splitting.
@@ -4008,9 +4012,9 @@ AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
++NumNewAllocas;
}
- DEBUG(dbgs() << "Rewriting alloca partition "
- << "[" << P.beginOffset() << "," << P.endOffset()
- << ") to: " << *NewAI << "\n");
+ LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
+ << "[" << P.beginOffset() << "," << P.endOffset()
+ << ") to: " << *NewAI << "\n");
// Track the high watermark on the worklist as it is only relevant for
// promoted allocas. We will reset it to this point if the alloca is not in
@@ -4269,7 +4273,7 @@ void SROA::clobberUse(Use &U) {
/// the slices of the alloca, and then hands it off to be split and
/// rewritten as needed.
bool SROA::runOnAlloca(AllocaInst &AI) {
- DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
+ LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
++NumAllocasAnalyzed;
// Special case dead allocas, as they're trivial.
@@ -4293,7 +4297,7 @@ bool SROA::runOnAlloca(AllocaInst &AI) {
// Build the slices using a recursive instruction-visiting builder.
AllocaSlices AS(DL, AI);
- DEBUG(AS.print(dbgs()));
+ LLVM_DEBUG(AS.print(dbgs()));
if (AS.isEscaped())
return Changed;
@@ -4321,11 +4325,11 @@ bool SROA::runOnAlloca(AllocaInst &AI) {
Changed |= splitAlloca(AI, AS);
- DEBUG(dbgs() << " Speculating PHIs\n");
+ LLVM_DEBUG(dbgs() << " Speculating PHIs\n");
while (!SpeculatablePHIs.empty())
speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
- DEBUG(dbgs() << " Speculating Selects\n");
+ LLVM_DEBUG(dbgs() << " Speculating Selects\n");
while (!SpeculatableSelects.empty())
speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
@@ -4346,7 +4350,7 @@ bool SROA::deleteDeadInstructions(
bool Changed = false;
while (!DeadInsts.empty()) {
Instruction *I = DeadInsts.pop_back_val();
- DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
// If the instruction is an alloca, find the possible dbg.declare connected
// to it, and remove it too. We must do this before calling RAUW or we will
@@ -4385,7 +4389,7 @@ bool SROA::promoteAllocas(Function &F) {
NumPromoted += PromotableAllocas.size();
- DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
+ LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
PromoteMemToReg(PromotableAllocas, *DT, AC);
PromotableAllocas.clear();
return true;
@@ -4393,7 +4397,7 @@ bool SROA::promoteAllocas(Function &F) {
PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT,
AssumptionCache &RunAC) {
- DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
+ LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
DT = &RunDT;
AC = &RunAC;
diff --git a/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index 1cd61bfffc21..09b91781e9d2 100644
--- a/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -184,7 +184,7 @@ static void rewritePHINodesForExitAndUnswitchedBlocks(BasicBlock &ExitBB,
static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT,
LoopInfo &LI) {
assert(BI.isConditional() && "Can only unswitch a conditional branch!");
- DEBUG(dbgs() << " Trying to unswitch branch: " << BI << "\n");
+ LLVM_DEBUG(dbgs() << " Trying to unswitch branch: " << BI << "\n");
Value *LoopCond = BI.getCondition();
@@ -212,8 +212,8 @@ static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT,
if (!areLoopExitPHIsLoopInvariant(L, *ParentBB, *LoopExitBB))
return false;
- DEBUG(dbgs() << " unswitching trivial branch when: " << CondVal
- << " == " << LoopCond << "\n");
+ LLVM_DEBUG(dbgs() << " unswitching trivial branch when: " << CondVal
+ << " == " << LoopCond << "\n");
// Split the preheader, so that we know that there is a safe place to insert
// the conditional branch. We will change the preheader to have a conditional
@@ -292,7 +292,7 @@ static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT,
/// branch. Still more cleanup can be done with some simplify-cfg like pass.
static bool unswitchTrivialSwitch(Loop &L, SwitchInst &SI, DominatorTree &DT,
LoopInfo &LI) {
- DEBUG(dbgs() << " Trying to unswitch switch: " << SI << "\n");
+ LLVM_DEBUG(dbgs() << " Trying to unswitch switch: " << SI << "\n");
Value *LoopCond = SI.getCondition();
// If this isn't switching on an invariant condition, we can't unswitch it.
@@ -316,7 +316,7 @@ static bool unswitchTrivialSwitch(Loop &L, SwitchInst &SI, DominatorTree &DT,
else if (ExitCaseIndices.empty())
return false;
- DEBUG(dbgs() << " unswitching trivial cases...\n");
+ LLVM_DEBUG(dbgs() << " unswitching trivial cases...\n");
SmallVector<std::pair<ConstantInt *, BasicBlock *>, 4> ExitCases;
ExitCases.reserve(ExitCaseIndices.size());
@@ -1798,8 +1798,9 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC,
if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
return Changed;
- DEBUG(dbgs() << "Considering " << UnswitchCandidates.size()
- << " non-trivial loop invariant conditions for unswitching.\n");
+ LLVM_DEBUG(
+ dbgs() << "Considering " << UnswitchCandidates.size()
+ << " non-trivial loop invariant conditions for unswitching.\n");
// Given that unswitching these terminators will require duplicating parts of
// the loop, so we need to be able to model that cost. Compute the ephemeral
@@ -1835,7 +1836,7 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC,
assert(LoopCost >= 0 && "Must not have negative loop costs!");
BBCostMap[BB] = Cost;
}
- DEBUG(dbgs() << " Total loop cost: " << LoopCost << "\n");
+ LLVM_DEBUG(dbgs() << " Total loop cost: " << LoopCost << "\n");
// Now we find the best candidate by searching for the one with the following
// properties in order:
@@ -1889,8 +1890,8 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC,
int BestUnswitchCost;
for (TerminatorInst *CandidateTI : UnswitchCandidates) {
int CandidateCost = ComputeUnswitchedCost(CandidateTI);
- DEBUG(dbgs() << " Computed cost of " << CandidateCost
- << " for unswitch candidate: " << *CandidateTI << "\n");
+ LLVM_DEBUG(dbgs() << " Computed cost of " << CandidateCost
+ << " for unswitch candidate: " << *CandidateTI << "\n");
if (!BestUnswitchTI || CandidateCost < BestUnswitchCost) {
BestUnswitchTI = CandidateTI;
BestUnswitchCost = CandidateCost;
@@ -1898,14 +1899,14 @@ unswitchLoop(Loop &L, DominatorTree &DT, LoopInfo &LI, AssumptionCache &AC,
}
if (BestUnswitchCost < UnswitchThreshold) {
- DEBUG(dbgs() << " Trying to unswitch non-trivial (cost = "
- << BestUnswitchCost << ") branch: " << *BestUnswitchTI
- << "\n");
+ LLVM_DEBUG(dbgs() << " Trying to unswitch non-trivial (cost = "
+ << BestUnswitchCost << ") branch: " << *BestUnswitchTI
+ << "\n");
Changed |= unswitchInvariantBranch(L, cast<BranchInst>(*BestUnswitchTI), DT,
LI, AC, NonTrivialUnswitchCB);
} else {
- DEBUG(dbgs() << "Cannot unswitch, lowest cost found: " << BestUnswitchCost
- << "\n");
+ LLVM_DEBUG(dbgs() << "Cannot unswitch, lowest cost found: "
+ << BestUnswitchCost << "\n");
}
return Changed;
@@ -1917,7 +1918,8 @@ PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM,
Function &F = *L.getHeader()->getParent();
(void)F;
- DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L << "\n");
+ LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << L
+ << "\n");
// Save the current loop name in a variable so that we can report it even
// after it has been deleted.
@@ -1977,7 +1979,8 @@ bool SimpleLoopUnswitchLegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) {
Function &F = *L->getHeader()->getParent();
- DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << *L << "\n");
+ LLVM_DEBUG(dbgs() << "Unswitching loop in " << F.getName() << ": " << *L
+ << "\n");
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index 811762880493..ca6b93e0b4a9 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -187,11 +187,9 @@ static bool SinkInstruction(Instruction *Inst,
if (!SuccToSinkTo)
return false;
- DEBUG(dbgs() << "Sink" << *Inst << " (";
- Inst->getParent()->printAsOperand(dbgs(), false);
- dbgs() << " -> ";
- SuccToSinkTo->printAsOperand(dbgs(), false);
- dbgs() << ")\n");
+ LLVM_DEBUG(dbgs() << "Sink" << *Inst << " (";
+ Inst->getParent()->printAsOperand(dbgs(), false); dbgs() << " -> ";
+ SuccToSinkTo->printAsOperand(dbgs(), false); dbgs() << ")\n");
// Move the instruction.
Inst->moveBefore(&*SuccToSinkTo->getFirstInsertionPt());
@@ -244,7 +242,7 @@ static bool iterativelySinkInstructions(Function &F, DominatorTree &DT,
do {
MadeChange = false;
- DEBUG(dbgs() << "Sinking iteration " << NumSinkIter << "\n");
+ LLVM_DEBUG(dbgs() << "Sinking iteration " << NumSinkIter << "\n");
// Process all basic blocks.
for (BasicBlock &I : F)
MadeChange |= ProcessBlock(I, DT, LI, AA);
diff --git a/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp b/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp
index 23156d5a4d83..3b0971804e6b 100644
--- a/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp
+++ b/lib/Transforms/Scalar/SpeculateAroundPHIs.cpp
@@ -64,7 +64,7 @@ isSafeToSpeculatePHIUsers(PHINode &PN, DominatorTree &DT,
// block. We should consider using actual post-dominance here in the
// future.
if (UI->getParent() != PhiBB) {
- DEBUG(dbgs() << " Unsafe: use in a different BB: " << *UI << "\n");
+ LLVM_DEBUG(dbgs() << " Unsafe: use in a different BB: " << *UI << "\n");
return false;
}
@@ -75,7 +75,7 @@ isSafeToSpeculatePHIUsers(PHINode &PN, DominatorTree &DT,
// probably change this to do at least a limited scan of the intervening
// instructions and allow handling stores in easily proven safe cases.
if (mayBeMemoryDependent(*UI)) {
- DEBUG(dbgs() << " Unsafe: can't speculate use: " << *UI << "\n");
+ LLVM_DEBUG(dbgs() << " Unsafe: can't speculate use: " << *UI << "\n");
return false;
}
@@ -126,8 +126,8 @@ isSafeToSpeculatePHIUsers(PHINode &PN, DominatorTree &DT,
// If when we directly test whether this is safe it fails, bail.
if (UnsafeSet.count(OpI) || ParentBB != PhiBB ||
mayBeMemoryDependent(*OpI)) {
- DEBUG(dbgs() << " Unsafe: can't speculate transitive use: " << *OpI
- << "\n");
+ LLVM_DEBUG(dbgs() << " Unsafe: can't speculate transitive use: "
+ << *OpI << "\n");
// Record the stack of instructions which reach this node as unsafe
// so we prune subsequent searches.
UnsafeSet.insert(OpI);
@@ -229,7 +229,7 @@ static bool isSafeAndProfitableToSpeculateAroundPHI(
NonFreeMat |= MatCost != TTI.TCC_Free;
}
if (!NonFreeMat) {
- DEBUG(dbgs() << " Free: " << PN << "\n");
+ LLVM_DEBUG(dbgs() << " Free: " << PN << "\n");
// No profit in free materialization.
return false;
}
@@ -237,7 +237,7 @@ static bool isSafeAndProfitableToSpeculateAroundPHI(
// Now check that the uses of this PHI can actually be speculated,
// otherwise we'll still have to materialize the PHI value.
if (!isSafeToSpeculatePHIUsers(PN, DT, PotentialSpecSet, UnsafeSet)) {
- DEBUG(dbgs() << " Unsafe PHI: " << PN << "\n");
+ LLVM_DEBUG(dbgs() << " Unsafe PHI: " << PN << "\n");
return false;
}
@@ -288,9 +288,13 @@ static bool isSafeAndProfitableToSpeculateAroundPHI(
// just bail. We're only interested in cases where folding the incoming
// constants is at least break-even on all paths.
if (FoldedCost > MatCost) {
- DEBUG(dbgs() << " Not profitable to fold imm: " << *IncomingC << "\n"
- " Materializing cost: " << MatCost << "\n"
- " Accumulated folded cost: " << FoldedCost << "\n");
+ LLVM_DEBUG(dbgs() << " Not profitable to fold imm: " << *IncomingC
+ << "\n"
+ " Materializing cost: "
+ << MatCost
+ << "\n"
+ " Accumulated folded cost: "
+ << FoldedCost << "\n");
return false;
}
}
@@ -310,8 +314,8 @@ static bool isSafeAndProfitableToSpeculateAroundPHI(
"less that its materialized cost, "
"the sum must be as well.");
- DEBUG(dbgs() << " Cost savings " << (TotalMatCost - TotalFoldedCost)
- << ": " << PN << "\n");
+ LLVM_DEBUG(dbgs() << " Cost savings " << (TotalMatCost - TotalFoldedCost)
+ << ": " << PN << "\n");
CostSavingsMap[&PN] = TotalMatCost - TotalFoldedCost;
return true;
}
@@ -489,9 +493,13 @@ findProfitablePHIs(ArrayRef<PHINode *> PNs,
// and zero out the cost of everything it depends on.
int CostSavings = CostSavingsMap.find(PN)->second;
if (SpecCost > CostSavings) {
- DEBUG(dbgs() << " Not profitable, speculation cost: " << *PN << "\n"
- " Cost savings: " << CostSavings << "\n"
- " Speculation cost: " << SpecCost << "\n");
+ LLVM_DEBUG(dbgs() << " Not profitable, speculation cost: " << *PN
+ << "\n"
+ " Cost savings: "
+ << CostSavings
+ << "\n"
+ " Speculation cost: "
+ << SpecCost << "\n");
continue;
}
@@ -545,7 +553,7 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs,
SmallPtrSetImpl<Instruction *> &PotentialSpecSet,
SmallSetVector<BasicBlock *, 16> &PredSet,
DominatorTree &DT) {
- DEBUG(dbgs() << " Speculating around " << SpecPNs.size() << " PHIs!\n");
+ LLVM_DEBUG(dbgs() << " Speculating around " << SpecPNs.size() << " PHIs!\n");
NumPHIsSpeculated += SpecPNs.size();
// Split any critical edges so that we have a block to hoist into.
@@ -558,8 +566,8 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs,
CriticalEdgeSplittingOptions(&DT).setMergeIdenticalEdges());
if (NewPredBB) {
++NumEdgesSplit;
- DEBUG(dbgs() << " Split critical edge from: " << PredBB->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << " Split critical edge from: " << PredBB->getName()
+ << "\n");
SpecPreds.push_back(NewPredBB);
} else {
assert(PredBB->getSingleSuccessor() == ParentBB &&
@@ -593,8 +601,9 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs,
int NumSpecInsts = SpecList.size() * SpecPreds.size();
int NumRedundantInsts = NumSpecInsts - SpecList.size();
- DEBUG(dbgs() << " Inserting " << NumSpecInsts << " speculated instructions, "
- << NumRedundantInsts << " redundancies\n");
+ LLVM_DEBUG(dbgs() << " Inserting " << NumSpecInsts
+ << " speculated instructions, " << NumRedundantInsts
+ << " redundancies\n");
NumSpeculatedInstructions += NumSpecInsts;
NumNewRedundantInstructions += NumRedundantInsts;
@@ -716,7 +725,7 @@ static void speculatePHIs(ArrayRef<PHINode *> SpecPNs,
/// true when at least some speculation occurs.
static bool tryToSpeculatePHIs(SmallVectorImpl<PHINode *> &PNs,
DominatorTree &DT, TargetTransformInfo &TTI) {
- DEBUG(dbgs() << "Evaluating phi nodes for speculation:\n");
+ LLVM_DEBUG(dbgs() << "Evaluating phi nodes for speculation:\n");
// Savings in cost from speculating around a PHI node.
SmallDenseMap<PHINode *, int, 16> CostSavingsMap;
@@ -745,7 +754,7 @@ static bool tryToSpeculatePHIs(SmallVectorImpl<PHINode *> &PNs,
PNs.end());
// If no PHIs were profitable, skip.
if (PNs.empty()) {
- DEBUG(dbgs() << " No safe and profitable PHIs found!\n");
+ LLVM_DEBUG(dbgs() << " No safe and profitable PHIs found!\n");
return false;
}
@@ -763,13 +772,13 @@ static bool tryToSpeculatePHIs(SmallVectorImpl<PHINode *> &PNs,
// differently.
if (isa<IndirectBrInst>(PredBB->getTerminator()) ||
isa<InvokeInst>(PredBB->getTerminator())) {
- DEBUG(dbgs() << " Invalid: predecessor terminator: " << PredBB->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << " Invalid: predecessor terminator: "
+ << PredBB->getName() << "\n");
return false;
}
}
if (PredSet.size() < 2) {
- DEBUG(dbgs() << " Unimportant: phi with only one predecessor\n");
+ LLVM_DEBUG(dbgs() << " Unimportant: phi with only one predecessor\n");
return false;
}
diff --git a/lib/Transforms/Scalar/SpeculativeExecution.cpp b/lib/Transforms/Scalar/SpeculativeExecution.cpp
index a7c308b59877..488c4df2b191 100644
--- a/lib/Transforms/Scalar/SpeculativeExecution.cpp
+++ b/lib/Transforms/Scalar/SpeculativeExecution.cpp
@@ -151,8 +151,8 @@ namespace llvm {
bool SpeculativeExecutionPass::runImpl(Function &F, TargetTransformInfo *TTI) {
if (OnlyIfDivergentTarget && !TTI->hasBranchDivergence()) {
- DEBUG(dbgs() << "Not running SpeculativeExecution because "
- "TTI->hasBranchDivergence() is false.\n");
+ LLVM_DEBUG(dbgs() << "Not running SpeculativeExecution because "
+ "TTI->hasBranchDivergence() is false.\n");
return false;
}
diff --git a/lib/Transforms/Scalar/StructurizeCFG.cpp b/lib/Transforms/Scalar/StructurizeCFG.cpp
index 25816f70c569..f0ddc30beff2 100644
--- a/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -489,10 +489,10 @@ void StructurizeCFG::collectInfos() {
Visited.clear();
for (RegionNode *RN : reverse(Order)) {
- DEBUG(dbgs() << "Visiting: "
- << (RN->isSubRegion() ? "SubRegion with entry: " : "")
- << RN->getEntry()->getName() << " Loop Depth: "
- << LI->getLoopDepth(RN->getEntry()) << "\n");
+ LLVM_DEBUG(dbgs() << "Visiting: "
+ << (RN->isSubRegion() ? "SubRegion with entry: " : "")
+ << RN->getEntry()->getName() << " Loop Depth: "
+ << LI->getLoopDepth(RN->getEntry()) << "\n");
// Analyze all the conditions leading to a node
gatherPredicates(RN);
@@ -901,8 +901,8 @@ static bool hasOnlyUniformBranches(Region *R, unsigned UniformMDKindID,
if (!DA.isUniform(Br))
return false;
- DEBUG(dbgs() << "BB: " << Br->getParent()->getName()
- << " has uniform terminator\n");
+ LLVM_DEBUG(dbgs() << "BB: " << Br->getParent()->getName()
+ << " has uniform terminator\n");
} else {
// Explicitly refuse to treat regions as uniform if they have non-uniform
// subregions. We cannot rely on DivergenceAnalysis for branches in
@@ -943,7 +943,8 @@ bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) {
DA = &getAnalysis<DivergenceAnalysis>();
if (hasOnlyUniformBranches(R, UniformMDKindID, *DA)) {
- DEBUG(dbgs() << "Skipping region with uniform control flow: " << *R << '\n');
+ LLVM_DEBUG(dbgs() << "Skipping region with uniform control flow: " << *R
+ << '\n');
// Mark all direct child block terminators as having been treated as
// uniform. To account for a possible future in which non-uniform
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 37ea4375a4c2..f8cd6c17a5a6 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -302,7 +302,7 @@ static bool markTails(Function &F, bool &AllCallsAreTailCalls,
if (Visited[CI->getParent()] != ESCAPED) {
// If the escape point was part way through the block, calls after the
// escape point wouldn't have been put into DeferredTails.
- DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
+ LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
CI->setTailCall();
Modified = true;
} else {
@@ -699,8 +699,8 @@ static bool foldReturnAndProcessPred(
BranchInst *BI = UncondBranchPreds.pop_back_val();
BasicBlock *Pred = BI->getParent();
if (CallInst *CI = findTRECandidate(BI, CannotTailCallElimCallsMarkedTail, TTI)){
- DEBUG(dbgs() << "FOLDING: " << *BB
- << "INTO UNCOND BRANCH PRED: " << *Pred);
+ LLVM_DEBUG(dbgs() << "FOLDING: " << *BB
+ << "INTO UNCOND BRANCH PRED: " << *Pred);
ReturnInst *RI = FoldReturnIntoUncondBranch(Ret, BB, Pred);
// Cleanup: if all predecessors of BB have been eliminated by
diff --git a/lib/Transforms/Utils/AddDiscriminators.cpp b/lib/Transforms/Utils/AddDiscriminators.cpp
index 9a4996e5475d..e3ef42362223 100644
--- a/lib/Transforms/Utils/AddDiscriminators.cpp
+++ b/lib/Transforms/Utils/AddDiscriminators.cpp
@@ -210,9 +210,9 @@ static bool addDiscriminators(Function &F) {
// it in 1 byte ULEB128 representation.
unsigned Discriminator = R.second ? ++LDM[L] : LDM[L];
I.setDebugLoc(DIL->setBaseDiscriminator(Discriminator));
- DEBUG(dbgs() << DIL->getFilename() << ":" << DIL->getLine() << ":"
- << DIL->getColumn() << ":" << Discriminator << " " << I
- << "\n");
+ LLVM_DEBUG(dbgs() << DIL->getFilename() << ":" << DIL->getLine() << ":"
+ << DIL->getColumn() << ":" << Discriminator << " " << I
+ << "\n");
Changed = true;
}
}
diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp
index 3d7149b49bc0..f31dab9f96af 100644
--- a/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/lib/Transforms/Utils/CodeExtractor.cpp
@@ -205,7 +205,7 @@ buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
// Make sure that the first block is not a landing pad.
if (BB == Result.front()) {
if (BB->isEHPad()) {
- DEBUG(dbgs() << "The first block cannot be an unwind block\n");
+ LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n");
return {};
}
continue;
@@ -215,8 +215,9 @@ buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
// the subgraph which is being extracted.
for (auto *PBB : predecessors(BB))
if (!Result.count(PBB)) {
- DEBUG(dbgs() << "No blocks in this region may have entries from "
- "outside the region except for the first block!\n");
+ LLVM_DEBUG(
+ dbgs() << "No blocks in this region may have entries from "
+ "outside the region except for the first block!\n");
return {};
}
}
@@ -623,8 +624,8 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
BasicBlock *newHeader,
Function *oldFunction,
Module *M) {
- DEBUG(dbgs() << "inputs: " << inputs.size() << "\n");
- DEBUG(dbgs() << "outputs: " << outputs.size() << "\n");
+ LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n");
+ LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n");
// This function returns unsigned, outputs will go back by reference.
switch (NumExitBlocks) {
@@ -638,20 +639,20 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
// Add the types of the input values to the function's argument list
for (Value *value : inputs) {
- DEBUG(dbgs() << "value used in func: " << *value << "\n");
+ LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n");
paramTy.push_back(value->getType());
}
// Add the types of the output values to the function's argument list.
for (Value *output : outputs) {
- DEBUG(dbgs() << "instr used in func: " << *output << "\n");
+ LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n");
if (AggregateArgs)
paramTy.push_back(output->getType());
else
paramTy.push_back(PointerType::getUnqual(output->getType()));
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "Function type: " << *RetTy << " f(";
for (Type *i : paramTy)
dbgs() << *i << ", ";
@@ -1277,7 +1278,7 @@ Function *CodeExtractor::extractCodeRegion() {
}
}
- DEBUG(if (verifyFunction(*newFunction))
- report_fatal_error("verifyFunction failed!"));
+ LLVM_DEBUG(if (verifyFunction(*newFunction))
+ report_fatal_error("verifyFunction failed!"));
return newFunction;
}
diff --git a/lib/Transforms/Utils/CtorUtils.cpp b/lib/Transforms/Utils/CtorUtils.cpp
index 82b67c293102..9a0240144d08 100644
--- a/lib/Transforms/Utils/CtorUtils.cpp
+++ b/lib/Transforms/Utils/CtorUtils.cpp
@@ -138,7 +138,7 @@ bool optimizeGlobalCtorsList(Module &M,
if (!F)
continue;
- DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n");
+ LLVM_DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n");
// We cannot simplify external ctor functions.
if (F->empty())
diff --git a/lib/Transforms/Utils/Evaluator.cpp b/lib/Transforms/Utils/Evaluator.cpp
index cb5280992fbe..05a318a3f22b 100644
--- a/lib/Transforms/Utils/Evaluator.cpp
+++ b/lib/Transforms/Utils/Evaluator.cpp
@@ -226,22 +226,23 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
while (true) {
Constant *InstResult = nullptr;
- DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
+ LLVM_DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
if (!SI->isSimple()) {
- DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n");
+ LLVM_DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n");
return false; // no volatile/atomic accesses.
}
Constant *Ptr = getVal(SI->getOperand(1));
if (auto *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI)) {
- DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
+ LLVM_DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
Ptr = FoldedPtr;
- DEBUG(dbgs() << "; To: " << *Ptr << "\n");
+ LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
if (!isSimpleEnoughPointerToCommit(Ptr)) {
// If this is too complex for us to commit, reject it.
- DEBUG(dbgs() << "Pointer is too complex for us to evaluate store.");
+ LLVM_DEBUG(
+ dbgs() << "Pointer is too complex for us to evaluate store.");
return false;
}
@@ -250,14 +251,15 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// If this might be too difficult for the backend to handle (e.g. the addr
// of one global variable divided by another) then we can't commit it.
if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
- DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val
- << "\n");
+ LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. "
+ << *Val << "\n");
return false;
}
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
if (CE->getOpcode() == Instruction::BitCast) {
- DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n");
+ LLVM_DEBUG(dbgs()
+ << "Attempting to resolve bitcast on constant ptr.\n");
// If we're evaluating a store through a bitcast, then we need
// to pull the bitcast off the pointer type and push it onto the
// stored value.
@@ -287,14 +289,14 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
} else {
- DEBUG(dbgs() << "Failed to bitcast constant ptr, can not "
- "evaluate.\n");
+ LLVM_DEBUG(dbgs() << "Failed to bitcast constant ptr, can not "
+ "evaluate.\n");
return false;
}
}
Val = NewVal;
- DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n");
}
}
@@ -303,37 +305,37 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
InstResult = ConstantExpr::get(BO->getOpcode(),
getVal(BO->getOperand(0)),
getVal(BO->getOperand(1)));
- DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: "
+ << *InstResult << "\n");
} else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
InstResult = ConstantExpr::getCompare(CI->getPredicate(),
getVal(CI->getOperand(0)),
getVal(CI->getOperand(1)));
- DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult
+ << "\n");
} else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
InstResult = ConstantExpr::getCast(CI->getOpcode(),
getVal(CI->getOperand(0)),
CI->getType());
- DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult
+ << "\n");
} else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)),
getVal(SI->getOperand(1)),
getVal(SI->getOperand(2)));
- DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult
+ << "\n");
} else if (auto *EVI = dyn_cast<ExtractValueInst>(CurInst)) {
InstResult = ConstantExpr::getExtractValue(
getVal(EVI->getAggregateOperand()), EVI->getIndices());
- DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " << *InstResult
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: "
+ << *InstResult << "\n");
} else if (auto *IVI = dyn_cast<InsertValueInst>(CurInst)) {
InstResult = ConstantExpr::getInsertValue(
getVal(IVI->getAggregateOperand()),
getVal(IVI->getInsertedValueOperand()), IVI->getIndices());
- DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " << *InstResult
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: "
+ << *InstResult << "\n");
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
Constant *P = getVal(GEP->getOperand(0));
SmallVector<Constant*, 8> GEPOps;
@@ -343,31 +345,33 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
InstResult =
ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), P, GEPOps,
cast<GEPOperator>(GEP)->isInBounds());
- DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult << "\n");
} else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
if (!LI->isSimple()) {
- DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n");
+ LLVM_DEBUG(
+ dbgs() << "Found a Load! Not a simple load, can not evaluate.\n");
return false; // no volatile/atomic accesses.
}
Constant *Ptr = getVal(LI->getOperand(0));
if (auto *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI)) {
Ptr = FoldedPtr;
- DEBUG(dbgs() << "Found a constant pointer expression, constant "
- "folding: " << *Ptr << "\n");
+ LLVM_DEBUG(dbgs() << "Found a constant pointer expression, constant "
+ "folding: "
+ << *Ptr << "\n");
}
InstResult = ComputeLoadResult(Ptr);
if (!InstResult) {
- DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load."
- "\n");
+ LLVM_DEBUG(
+ dbgs() << "Failed to compute load result. Can not evaluate load."
+ "\n");
return false; // Could not evaluate load.
}
- DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
+ LLVM_DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
if (AI->isArrayAllocation()) {
- DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
+ LLVM_DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
return false; // Cannot handle array allocs.
}
Type *Ty = AI->getAllocatedType();
@@ -375,28 +379,28 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty),
AI->getName()));
InstResult = AllocaTmps.back().get();
- DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
+ LLVM_DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
} else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
CallSite CS(&*CurInst);
// Debug info can safely be ignored here.
if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
- DEBUG(dbgs() << "Ignoring debug info.\n");
+ LLVM_DEBUG(dbgs() << "Ignoring debug info.\n");
++CurInst;
continue;
}
// Cannot handle inline asm.
if (isa<InlineAsm>(CS.getCalledValue())) {
- DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
+ LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
return false;
}
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
if (MSI->isVolatile()) {
- DEBUG(dbgs() << "Can not optimize a volatile memset " <<
- "intrinsic.\n");
+ LLVM_DEBUG(dbgs() << "Can not optimize a volatile memset "
+ << "intrinsic.\n");
return false;
}
Constant *Ptr = getVal(MSI->getDest());
@@ -404,7 +408,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *DestVal = ComputeLoadResult(getVal(Ptr));
if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
// This memset is a no-op.
- DEBUG(dbgs() << "Ignoring no-op memset.\n");
+ LLVM_DEBUG(dbgs() << "Ignoring no-op memset.\n");
++CurInst;
continue;
}
@@ -412,7 +416,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
II->getIntrinsicID() == Intrinsic::lifetime_end) {
- DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
+ LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
++CurInst;
continue;
}
@@ -421,7 +425,8 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// We don't insert an entry into Values, as it doesn't have a
// meaningful return value.
if (!II->use_empty()) {
- DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n");
+ LLVM_DEBUG(dbgs()
+ << "Found unused invariant_start. Can't evaluate.\n");
return false;
}
ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
@@ -433,34 +438,35 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Size->getValue().getLimitedValue() >=
DL.getTypeStoreSize(ElemTy)) {
Invariants.insert(GV);
- DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
- << "\n");
+ LLVM_DEBUG(dbgs() << "Found a global var that is an invariant: "
+ << *GV << "\n");
} else {
- DEBUG(dbgs() << "Found a global var, but can not treat it as an "
- "invariant.\n");
+ LLVM_DEBUG(dbgs()
+ << "Found a global var, but can not treat it as an "
+ "invariant.\n");
}
}
// Continue even if we do nothing.
++CurInst;
continue;
} else if (II->getIntrinsicID() == Intrinsic::assume) {
- DEBUG(dbgs() << "Skipping assume intrinsic.\n");
+ LLVM_DEBUG(dbgs() << "Skipping assume intrinsic.\n");
++CurInst;
continue;
} else if (II->getIntrinsicID() == Intrinsic::sideeffect) {
- DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n");
+ LLVM_DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n");
++CurInst;
continue;
}
- DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n");
+ LLVM_DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n");
return false;
}
// Resolve function pointers.
Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue()));
if (!Callee || Callee->isInterposable()) {
- DEBUG(dbgs() << "Can not resolve function pointer.\n");
+ LLVM_DEBUG(dbgs() << "Can not resolve function pointer.\n");
return false; // Cannot resolve.
}
@@ -472,15 +478,15 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// If this is a function we can constant fold, do it.
if (Constant *C = ConstantFoldCall(CS, Callee, Formals, TLI)) {
InstResult = C;
- DEBUG(dbgs() << "Constant folded function call. Result: " <<
- *InstResult << "\n");
+ LLVM_DEBUG(dbgs() << "Constant folded function call. Result: "
+ << *InstResult << "\n");
} else {
- DEBUG(dbgs() << "Can not constant fold function call.\n");
+ LLVM_DEBUG(dbgs() << "Can not constant fold function call.\n");
return false;
}
} else {
if (Callee->getFunctionType()->isVarArg()) {
- DEBUG(dbgs() << "Can not constant fold vararg function call.\n");
+ LLVM_DEBUG(dbgs() << "Can not constant fold vararg function call.\n");
return false;
}
@@ -488,21 +494,22 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// Execute the call, if successful, use the return value.
ValueStack.emplace_back();
if (!EvaluateFunction(Callee, RetVal, Formals)) {
- DEBUG(dbgs() << "Failed to evaluate function.\n");
+ LLVM_DEBUG(dbgs() << "Failed to evaluate function.\n");
return false;
}
ValueStack.pop_back();
InstResult = RetVal;
if (InstResult) {
- DEBUG(dbgs() << "Successfully evaluated function. Result: "
- << *InstResult << "\n\n");
+ LLVM_DEBUG(dbgs() << "Successfully evaluated function. Result: "
+ << *InstResult << "\n\n");
} else {
- DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n");
+ LLVM_DEBUG(dbgs()
+ << "Successfully evaluated function. Result: 0\n\n");
}
}
} else if (isa<TerminatorInst>(CurInst)) {
- DEBUG(dbgs() << "Found a terminator instruction.\n");
+ LLVM_DEBUG(dbgs() << "Found a terminator instruction.\n");
if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
if (BI->isUnconditional()) {
@@ -529,17 +536,18 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
NextBB = nullptr;
} else {
// invoke, unwind, resume, unreachable.
- DEBUG(dbgs() << "Can not handle terminator.");
+ LLVM_DEBUG(dbgs() << "Can not handle terminator.");
return false; // Cannot handle this terminator.
}
// We succeeded at evaluating this block!
- DEBUG(dbgs() << "Successfully evaluated block.\n");
+ LLVM_DEBUG(dbgs() << "Successfully evaluated block.\n");
return true;
} else {
// Did not know how to evaluate this!
- DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction."
- "\n");
+ LLVM_DEBUG(
+ dbgs() << "Failed to evaluate block due to unhandled instruction."
+ "\n");
return false;
}
@@ -553,7 +561,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// If we just processed an invoke, we finished evaluating the block.
if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
NextBB = II->getNormalDest();
- DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
+ LLVM_DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
return true;
}
@@ -592,7 +600,7 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
while (true) {
BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
- DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
+ LLVM_DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
if (!EvaluateBlock(CurInst, NextBB))
return false;
diff --git a/lib/Transforms/Utils/FlattenCFG.cpp b/lib/Transforms/Utils/FlattenCFG.cpp
index a1adc31e4992..1b8bb05fc91b 100644
--- a/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/lib/Transforms/Utils/FlattenCFG.cpp
@@ -312,7 +312,7 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder) {
new UnreachableInst(CB->getContext(), CB);
} while (Iteration);
- DEBUG(dbgs() << "Use parallel and/or in:\n" << *FirstCondBlock);
+ LLVM_DEBUG(dbgs() << "Use parallel and/or in:\n" << *FirstCondBlock);
return true;
}
@@ -469,7 +469,7 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
// Remove \param SecondEntryBlock
SecondEntryBlock->dropAllReferences();
SecondEntryBlock->eraseFromParent();
- DEBUG(dbgs() << "If conditions merged into:\n" << *FirstEntryBlock);
+ LLVM_DEBUG(dbgs() << "If conditions merged into:\n" << *FirstEntryBlock);
return true;
}
diff --git a/lib/Transforms/Utils/FunctionComparator.cpp b/lib/Transforms/Utils/FunctionComparator.cpp
index 75539428b688..aada9f542bd8 100644
--- a/lib/Transforms/Utils/FunctionComparator.cpp
+++ b/lib/Transforms/Utils/FunctionComparator.cpp
@@ -377,7 +377,7 @@ int FunctionComparator::cmpConstants(const Constant *L,
}
}
default: // Unknown constant, abort.
- DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
+ LLVM_DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
llvm_unreachable("Constant ValueID not recognized.");
return -1;
}
diff --git a/lib/Transforms/Utils/LibCallsShrinkWrap.cpp b/lib/Transforms/Utils/LibCallsShrinkWrap.cpp
index 80fb9cb1aae9..9832a6f24e1f 100644
--- a/lib/Transforms/Utils/LibCallsShrinkWrap.cpp
+++ b/lib/Transforms/Utils/LibCallsShrinkWrap.cpp
@@ -79,11 +79,11 @@ public:
bool perform() {
bool Changed = false;
for (auto &CI : WorkList) {
- DEBUG(dbgs() << "CDCE calls: " << CI->getCalledFunction()->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << "CDCE calls: " << CI->getCalledFunction()->getName()
+ << "\n");
if (perform(CI)) {
Changed = true;
- DEBUG(dbgs() << "Transformed\n");
+ LLVM_DEBUG(dbgs() << "Transformed\n");
}
}
return Changed;
@@ -421,7 +421,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI,
const LibFunc &Func) {
// FIXME: LibFunc_powf and powl TBD.
if (Func != LibFunc_pow) {
- DEBUG(dbgs() << "Not handled powf() and powl()\n");
+ LLVM_DEBUG(dbgs() << "Not handled powf() and powl()\n");
return nullptr;
}
@@ -433,7 +433,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI,
if (ConstantFP *CF = dyn_cast<ConstantFP>(Base)) {
double D = CF->getValueAPF().convertToDouble();
if (D < 1.0f || D > APInt::getMaxValue(8).getZExtValue()) {
- DEBUG(dbgs() << "Not handled pow(): constant base out of range\n");
+ LLVM_DEBUG(dbgs() << "Not handled pow(): constant base out of range\n");
return nullptr;
}
@@ -447,7 +447,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI,
// If the Base value coming from an integer type.
Instruction *I = dyn_cast<Instruction>(Base);
if (!I) {
- DEBUG(dbgs() << "Not handled pow(): FP type base\n");
+ LLVM_DEBUG(dbgs() << "Not handled pow(): FP type base\n");
return nullptr;
}
unsigned Opcode = I->getOpcode();
@@ -461,7 +461,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI,
else if (BW == 32)
UpperV = 32.0f;
else {
- DEBUG(dbgs() << "Not handled pow(): type too wide\n");
+ LLVM_DEBUG(dbgs() << "Not handled pow(): type too wide\n");
return nullptr;
}
@@ -477,7 +477,7 @@ Value *LibCallsShrinkWrap::generateCondForPow(CallInst *CI,
Value *Cond0 = BBBuilder.CreateFCmp(CmpInst::FCMP_OLE, Base, V0);
return BBBuilder.CreateOr(Cond0, Cond);
}
- DEBUG(dbgs() << "Not handled pow(): base not from integer convert\n");
+ LLVM_DEBUG(dbgs() << "Not handled pow(): base not from integer convert\n");
return nullptr;
}
@@ -496,9 +496,9 @@ void LibCallsShrinkWrap::shrinkWrapCI(CallInst *CI, Value *Cond) {
SuccBB->setName("cdce.end");
CI->removeFromParent();
CallBB->getInstList().insert(CallBB->getFirstInsertionPt(), CI);
- DEBUG(dbgs() << "== Basic Block After ==");
- DEBUG(dbgs() << *CallBB->getSinglePredecessor() << *CallBB
- << *CallBB->getSingleSuccessor() << "\n");
+ LLVM_DEBUG(dbgs() << "== Basic Block After ==");
+ LLVM_DEBUG(dbgs() << *CallBB->getSinglePredecessor() << *CallBB
+ << *CallBB->getSingleSuccessor() << "\n");
}
// Perform the transformation to a single candidate.
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index 75847cc783ab..5eaeccb5a298 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -740,8 +740,8 @@ static bool CanMergeValues(Value *First, Value *Second) {
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
- DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
- << Succ->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
+ << Succ->getName() << "\n");
// Shortcut, if there is only a single predecessor it must be BB and merging
// is always safe
if (Succ->getSinglePredecessor()) return true;
@@ -764,10 +764,11 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
if (BBPreds.count(IBB) &&
!CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
PN->getIncomingValue(PI))) {
- DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
- << Succ->getName() << " is conflicting with "
- << BBPN->getName() << " with regard to common predecessor "
- << IBB->getName() << "\n");
+ LLVM_DEBUG(dbgs()
+ << "Can't fold, phi node " << PN->getName() << " in "
+ << Succ->getName() << " is conflicting with "
+ << BBPN->getName() << " with regard to common predecessor "
+ << IBB->getName() << "\n");
return false;
}
}
@@ -780,9 +781,10 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
BasicBlock *IBB = PN->getIncomingBlock(PI);
if (BBPreds.count(IBB) &&
!CanMergeValues(Val, PN->getIncomingValue(PI))) {
- DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
- << Succ->getName() << " is conflicting with regard to common "
- << "predecessor " << IBB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
+ << " in " << Succ->getName()
+ << " is conflicting with regard to common "
+ << "predecessor " << IBB->getName() << "\n");
return false;
}
}
@@ -970,7 +972,7 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
}
}
- DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
+ LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
std::vector<DominatorTree::UpdateType> Updates;
if (DDT) {
@@ -1530,7 +1532,7 @@ void llvm::salvageDebugInfo(Instruction &I) {
DIExpression::prependOpcodes(DIExpr, Ops, DIExpression::WithStackValue);
DII->setOperand(0, wrapMD(I.getOperand(0)));
DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
- DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
+ LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
};
auto applyOffset = [&](DbgInfoIntrinsic *DII, uint64_t Offset) {
@@ -1553,7 +1555,7 @@ void llvm::salvageDebugInfo(Instruction &I) {
MetadataAsValue *CastSrc = wrapMD(I.getOperand(0));
for (auto *DII : DbgUsers) {
DII->setOperand(0, CastSrc);
- DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
+ LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
}
} else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
unsigned BitWidth =
@@ -1620,7 +1622,7 @@ void llvm::salvageDebugInfo(Instruction &I) {
DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
DII->setOperand(0, AddrMD);
DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
- DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
+ LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
}
}
}
@@ -2083,8 +2085,8 @@ static unsigned replaceDominatedUsesWith(Value *From, Value *To,
if (!Dominates(Root, U))
continue;
U.set(To);
- DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
- << *To << " in " << *U << "\n");
+ LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
+ << "' as " << *To << " in " << *U << "\n");
++Count;
}
return Count;
diff --git a/lib/Transforms/Utils/LoopRotationUtils.cpp b/lib/Transforms/Utils/LoopRotationUtils.cpp
index e178c99b456d..1678781a41e4 100644
--- a/lib/Transforms/Utils/LoopRotationUtils.cpp
+++ b/lib/Transforms/Utils/LoopRotationUtils.cpp
@@ -235,15 +235,16 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
CodeMetrics Metrics;
Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues);
if (Metrics.notDuplicatable) {
- DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"
- << " instructions: ";
- L->dump());
+ LLVM_DEBUG(
+ dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable"
+ << " instructions: ";
+ L->dump());
return false;
}
if (Metrics.convergent) {
- DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent "
- "instructions: ";
- L->dump());
+ LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent "
+ "instructions: ";
+ L->dump());
return false;
}
if (Metrics.NumInsts > MaxHeaderSize)
@@ -266,7 +267,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
if (SE)
SE->forgetTopmostLoop(L);
- DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
+ LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
// Find new Loop header. NewHeader is a Header's one and only successor
// that is inside loop. Header's other successor is outside the
@@ -477,7 +478,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// emitted code isn't too gross in this common case.
MergeBlockIntoPredecessor(OrigHeader, DT, LI);
- DEBUG(dbgs() << "LoopRotation: into "; L->dump());
+ LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump());
++NumRotated;
return true;
@@ -580,8 +581,8 @@ bool LoopRotate::simplifyLoopLatch(Loop *L) {
if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L))
return false;
- DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
- << LastExit->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
+ << LastExit->getName() << "\n");
// Hoist the instructions from Latch into LastExit.
LastExit->getInstList().splice(BI->getIterator(), Latch->getInstList(),
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index d70fc4ac028f..b33f4f8a8107 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -141,8 +141,8 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, DominatorTree *DT,
if (!PreheaderBB)
return nullptr;
- DEBUG(dbgs() << "LoopSimplify: Creating pre-header "
- << PreheaderBB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "LoopSimplify: Creating pre-header "
+ << PreheaderBB->getName() << "\n");
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
// code layout too horribly.
@@ -242,7 +242,7 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
OuterLoopPreds.push_back(PN->getIncomingBlock(i));
}
}
- DEBUG(dbgs() << "LoopSimplify: Splitting out a new outer loop\n");
+ LLVM_DEBUG(dbgs() << "LoopSimplify: Splitting out a new outer loop\n");
// If ScalarEvolution is around and knows anything about values in
// this loop, tell it to forget them, because we're about to
@@ -371,8 +371,8 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
BranchInst *BETerminator = BranchInst::Create(Header, BEBlock);
BETerminator->setDebugLoc(Header->getFirstNonPHI()->getDebugLoc());
- DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block "
- << BEBlock->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block "
+ << BEBlock->getName() << "\n");
// Move the new backedge block to right after the last backedge block.
Function::iterator InsertPos = ++BackedgeBlocks.back()->getIterator();
@@ -484,8 +484,8 @@ ReprocessLoop:
// Delete each unique out-of-loop (and thus dead) predecessor.
for (BasicBlock *P : BadPreds) {
- DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor "
- << P->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor "
+ << P->getName() << "\n");
// Zap the dead pred's terminator and replace it with unreachable.
TerminatorInst *TI = P->getTerminator();
@@ -504,8 +504,9 @@ ReprocessLoop:
if (BI->isConditional()) {
if (UndefValue *Cond = dyn_cast<UndefValue>(BI->getCondition())) {
- DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in "
- << ExitingBlock->getName() << "\n");
+ LLVM_DEBUG(dbgs()
+ << "LoopSimplify: Resolving \"br i1 undef\" to exit in "
+ << ExitingBlock->getName() << "\n");
BI->setCondition(ConstantInt::get(Cond->getType(),
!L->contains(BI->getSuccessor(0))));
@@ -641,8 +642,8 @@ ReprocessLoop:
// Success. The block is now dead, so remove it from the loop,
// update the dominator tree and delete it.
- DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block "
- << ExitingBlock->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block "
+ << ExitingBlock->getName() << "\n");
assert(pred_begin(ExitingBlock) == pred_end(ExitingBlock));
Changed = true;
diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index 980f0f73ddf7..ad50d9601eca 100644
--- a/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -110,7 +110,7 @@ foldBlockIntoPredecessor(BasicBlock *BB, LoopInfo *LI, ScalarEvolution *SE,
if (OnlyPred->getTerminator()->getNumSuccessors() != 1)
return nullptr;
- DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred);
+ LLVM_DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred);
// Resolve any PHI nodes at the start of the block. They are all
// guaranteed to have exactly one entry if they exist, unless there are
@@ -297,19 +297,19 @@ LoopUnrollResult llvm::UnrollLoop(
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
- DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
+ LLVM_DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
return LoopUnrollResult::Unmodified;
}
BasicBlock *LatchBlock = L->getLoopLatch();
if (!LatchBlock) {
- DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n");
+ LLVM_DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n");
return LoopUnrollResult::Unmodified;
}
// Loops with indirectbr cannot be cloned.
if (!L->isSafeToClone()) {
- DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
+ LLVM_DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
return LoopUnrollResult::Unmodified;
}
@@ -322,8 +322,9 @@ LoopUnrollResult llvm::UnrollLoop(
if (!BI || BI->isUnconditional()) {
// The loop-rotate pass can be helpful to avoid this in many cases.
- DEBUG(dbgs() <<
- " Can't unroll; loop not terminated by a conditional branch.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << " Can't unroll; loop not terminated by a conditional branch.\n");
return LoopUnrollResult::Unmodified;
}
@@ -332,22 +333,22 @@ LoopUnrollResult llvm::UnrollLoop(
};
if (!CheckSuccessors(0, 1) && !CheckSuccessors(1, 0)) {
- DEBUG(dbgs() << "Can't unroll; only loops with one conditional latch"
- " exiting the loop can be unrolled\n");
+ LLVM_DEBUG(dbgs() << "Can't unroll; only loops with one conditional latch"
+ " exiting the loop can be unrolled\n");
return LoopUnrollResult::Unmodified;
}
if (Header->hasAddressTaken()) {
// The loop-rotate pass can be helpful to avoid this in many cases.
- DEBUG(dbgs() <<
- " Won't unroll loop: address of header block is taken.\n");
+ LLVM_DEBUG(
+ dbgs() << " Won't unroll loop: address of header block is taken.\n");
return LoopUnrollResult::Unmodified;
}
if (TripCount != 0)
- DEBUG(dbgs() << " Trip Count = " << TripCount << "\n");
+ LLVM_DEBUG(dbgs() << " Trip Count = " << TripCount << "\n");
if (TripMultiple != 1)
- DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n");
+ LLVM_DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n");
// Effectively "DCE" unrolled iterations that are beyond the tripcount
// and will never be executed.
@@ -356,7 +357,7 @@ LoopUnrollResult llvm::UnrollLoop(
// Don't enter the unroll code if there is nothing to do.
if (TripCount == 0 && Count < 2 && PeelCount == 0) {
- DEBUG(dbgs() << "Won't unroll; almost nothing to do\n");
+ LLVM_DEBUG(dbgs() << "Won't unroll; almost nothing to do\n");
return LoopUnrollResult::Unmodified;
}
@@ -407,7 +408,7 @@ LoopUnrollResult llvm::UnrollLoop(
// Loops containing convergent instructions must have a count that divides
// their TripMultiple.
- DEBUG(
+ LLVM_DEBUG(
{
bool HasConvergent = false;
for (auto &BB : L->blocks())
@@ -430,9 +431,8 @@ LoopUnrollResult llvm::UnrollLoop(
if (Force)
RuntimeTripCount = false;
else {
- DEBUG(
- dbgs() << "Wont unroll; remainder loop could not be generated"
- "when assuming runtime trip count\n");
+ LLVM_DEBUG(dbgs() << "Wont unroll; remainder loop could not be generated"
+ "when assuming runtime trip count\n");
return LoopUnrollResult::Unmodified;
}
}
@@ -451,8 +451,8 @@ LoopUnrollResult llvm::UnrollLoop(
using namespace ore;
// Report the unrolling decision.
if (CompletelyUnroll) {
- DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
- << " with trip count " << TripCount << "!\n");
+ LLVM_DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
+ << " with trip count " << TripCount << "!\n");
if (ORE)
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
@@ -461,8 +461,8 @@ LoopUnrollResult llvm::UnrollLoop(
<< NV("UnrollCount", TripCount) << " iterations";
});
} else if (PeelCount) {
- DEBUG(dbgs() << "PEELING loop %" << Header->getName()
- << " with iteration count " << PeelCount << "!\n");
+ LLVM_DEBUG(dbgs() << "PEELING loop %" << Header->getName()
+ << " with iteration count " << PeelCount << "!\n");
if (ORE)
ORE->emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(),
@@ -478,29 +478,29 @@ LoopUnrollResult llvm::UnrollLoop(
<< NV("UnrollCount", Count);
};
- DEBUG(dbgs() << "UNROLLING loop %" << Header->getName()
- << " by " << Count);
+ LLVM_DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() << " by "
+ << Count);
if (TripMultiple == 0 || BreakoutTrip != TripMultiple) {
- DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
+ LLVM_DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
if (ORE)
ORE->emit([&]() {
return DiagBuilder() << " with a breakout at trip "
<< NV("BreakoutTrip", BreakoutTrip);
});
} else if (TripMultiple != 1) {
- DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
+ LLVM_DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
if (ORE)
ORE->emit([&]() {
return DiagBuilder() << " with " << NV("TripMultiple", TripMultiple)
<< " trips per branch";
});
} else if (RuntimeTripCount) {
- DEBUG(dbgs() << " with run-time trip count");
+ LLVM_DEBUG(dbgs() << " with run-time trip count");
if (ORE)
ORE->emit(
[&]() { return DiagBuilder() << " with run-time trip count"; });
}
- DEBUG(dbgs() << "!\n");
+ LLVM_DEBUG(dbgs() << "!\n");
}
// We are going to make changes to this loop. SCEV may be keeping cached info
diff --git a/lib/Transforms/Utils/LoopUnrollPeel.cpp b/lib/Transforms/Utils/LoopUnrollPeel.cpp
index 96d9acddb1af..13794c53f24b 100644
--- a/lib/Transforms/Utils/LoopUnrollPeel.cpp
+++ b/lib/Transforms/Utils/LoopUnrollPeel.cpp
@@ -253,8 +253,8 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize,
// If the user provided a peel count, use that.
bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0;
if (UserPeelCount) {
- DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
- << " iterations.\n");
+ LLVM_DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
+ << " iterations.\n");
UP.PeelCount = UnrollForcePeelCount;
return;
}
@@ -298,8 +298,9 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize,
DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount);
// Consider max peel count limitation.
assert(DesiredPeelCount > 0 && "Wrong loop size estimation?");
- DEBUG(dbgs() << "Peel " << DesiredPeelCount << " iteration(s) to turn"
- << " some Phis into invariants.\n");
+ LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount
+ << " iteration(s) to turn"
+ << " some Phis into invariants.\n");
UP.PeelCount = DesiredPeelCount;
return;
}
@@ -320,20 +321,22 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize,
if (!PeelCount)
return;
- DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount
- << "\n");
+ LLVM_DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount
+ << "\n");
if (*PeelCount) {
if ((*PeelCount <= UnrollPeelMaxCount) &&
(LoopSize * (*PeelCount + 1) <= UP.Threshold)) {
- DEBUG(dbgs() << "Peeling first " << *PeelCount << " iterations.\n");
+ LLVM_DEBUG(dbgs() << "Peeling first " << *PeelCount
+ << " iterations.\n");
UP.PeelCount = *PeelCount;
return;
}
- DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n");
- DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
- DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) << "\n");
- DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n");
+ LLVM_DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n");
+ LLVM_DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
+ LLVM_DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1)
+ << "\n");
+ LLVM_DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n");
}
}
}
diff --git a/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 0db7c0467dc3..1b0e3a72c2de 100644
--- a/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -418,8 +418,9 @@ canSafelyUnrollMultiExitLoop(Loop *L, SmallVectorImpl<BasicBlock *> &OtherExits,
// UnrollRuntimeMultiExit is true. This will need updating the logic in
// connectEpilog/connectProlog.
if (!LatchExit->getSinglePredecessor()) {
- DEBUG(dbgs() << "Bailout for multi-exit handling when latch exit has >1 "
- "predecessor.\n");
+ LLVM_DEBUG(
+ dbgs() << "Bailout for multi-exit handling when latch exit has >1 "
+ "predecessor.\n");
return false;
}
// FIXME: We bail out of multi-exit unrolling when epilog loop is generated
@@ -528,14 +529,14 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
LoopInfo *LI, ScalarEvolution *SE,
DominatorTree *DT, AssumptionCache *AC,
bool PreserveLCSSA) {
- DEBUG(dbgs() << "Trying runtime unrolling on Loop: \n");
- DEBUG(L->dump());
- DEBUG(UseEpilogRemainder ? dbgs() << "Using epilog remainder.\n" :
- dbgs() << "Using prolog remainder.\n");
+ LLVM_DEBUG(dbgs() << "Trying runtime unrolling on Loop: \n");
+ LLVM_DEBUG(L->dump());
+ LLVM_DEBUG(UseEpilogRemainder ? dbgs() << "Using epilog remainder.\n"
+ : dbgs() << "Using prolog remainder.\n");
// Make sure the loop is in canonical form.
if (!L->isLoopSimplifyForm()) {
- DEBUG(dbgs() << "Not in simplify form!\n");
+ LLVM_DEBUG(dbgs() << "Not in simplify form!\n");
return false;
}
@@ -561,7 +562,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
// Support only single exit and exiting block unless multi-exit loop unrolling is enabled.
if (!isMultiExitUnrollingEnabled &&
(!L->getExitingBlock() || OtherExits.size())) {
- DEBUG(
+ LLVM_DEBUG(
dbgs()
<< "Multiple exit/exiting blocks in loop and multi-exit unrolling not "
"enabled!\n");
@@ -581,7 +582,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
const SCEV *BECountSC = SE->getExitCount(L, Latch);
if (isa<SCEVCouldNotCompute>(BECountSC) ||
!BECountSC->getType()->isIntegerTy()) {
- DEBUG(dbgs() << "Could not compute exit block SCEV\n");
+ LLVM_DEBUG(dbgs() << "Could not compute exit block SCEV\n");
return false;
}
@@ -591,7 +592,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
const SCEV *TripCountSC =
SE->getAddExpr(BECountSC, SE->getConstant(BECountSC->getType(), 1));
if (isa<SCEVCouldNotCompute>(TripCountSC)) {
- DEBUG(dbgs() << "Could not compute trip count SCEV.\n");
+ LLVM_DEBUG(dbgs() << "Could not compute trip count SCEV.\n");
return false;
}
@@ -601,15 +602,16 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
SCEVExpander Expander(*SE, DL, "loop-unroll");
if (!AllowExpensiveTripCount &&
Expander.isHighCostExpansion(TripCountSC, L, PreHeaderBR)) {
- DEBUG(dbgs() << "High cost for expanding trip count scev!\n");
+ LLVM_DEBUG(dbgs() << "High cost for expanding trip count scev!\n");
return false;
}
// This constraint lets us deal with an overflowing trip count easily; see the
// comment on ModVal below.
if (Log2_32(Count) > BEWidth) {
- DEBUG(dbgs()
- << "Count failed constraint on overflow trip count calculation.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "Count failed constraint on overflow trip count calculation.\n");
return false;
}
@@ -896,7 +898,7 @@ bool llvm::UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
}
if (remainderLoop && UnrollRemainder) {
- DEBUG(dbgs() << "Unrolling remainder loop\n");
+ LLVM_DEBUG(dbgs() << "Unrolling remainder loop\n");
UnrollLoop(remainderLoop, /*Count*/ Count - 1, /*TripCount*/ Count - 1,
/*Force*/ false, /*AllowRuntime*/ false,
/*AllowExpensiveTripCount*/ false, /*PreserveCondBr*/ true,
diff --git a/lib/Transforms/Utils/LoopUtils.cpp b/lib/Transforms/Utils/LoopUtils.cpp
index cec34b09f208..46af120a428b 100644
--- a/lib/Transforms/Utils/LoopUtils.cpp
+++ b/lib/Transforms/Utils/LoopUtils.cpp
@@ -555,47 +555,48 @@ bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
if (AddReductionVar(Phi, RK_IntegerAdd, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_IntegerMult, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_IntegerOr, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_IntegerAnd, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_IntegerXor, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_IntegerMinMax, TheLoop, HasFunNoNaNAttr, RedDes,
DB, AC, DT)) {
- DEBUG(dbgs() << "Found a MINMAX reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found a MINMAX reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_FloatMult, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_FloatAdd, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
return true;
}
if (AddReductionVar(Phi, RK_FloatMinMax, TheLoop, HasFunNoNaNAttr, RedDes, DB,
AC, DT)) {
- DEBUG(dbgs() << "Found an float MINMAX reduction PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "Found an float MINMAX reduction PHI." << *Phi
+ << "\n");
return true;
}
// Not a reduction of known type.
@@ -1052,7 +1053,7 @@ bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
AR = PSE.getAsAddRec(Phi);
if (!AR) {
- DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
+ LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
return false;
}
@@ -1086,14 +1087,15 @@ bool InductionDescriptor::isInductionPHI(
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
if (!AR) {
- DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
+ LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
return false;
}
if (AR->getLoop() != TheLoop) {
// FIXME: We should treat this as a uniform. Unfortunately, we
// don't currently know how to handled uniform PHIs.
- DEBUG(dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
return false;
}
@@ -1174,11 +1176,12 @@ bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
BB, InLoopPredecessors, ".loopexit", DT, LI, PreserveLCSSA);
if (!NewExitBB)
- DEBUG(dbgs() << "WARNING: Can't create a dedicated exit block for loop: "
- << *L << "\n");
+ LLVM_DEBUG(
+ dbgs() << "WARNING: Can't create a dedicated exit block for loop: "
+ << *L << "\n");
else
- DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
- << NewExitBB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
+ << NewExitBB->getName() << "\n");
return true;
};
diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp
index 8643ad8cb00e..441c5fd8b5af 100644
--- a/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/lib/Transforms/Utils/LowerSwitch.cpp
@@ -242,14 +242,13 @@ LowerSwitch::switchConvert(CaseItr Begin, CaseItr End, ConstantInt *LowerBound,
unsigned Mid = Size / 2;
std::vector<CaseRange> LHS(Begin, Begin + Mid);
- DEBUG(dbgs() << "LHS: " << LHS << "\n");
+ LLVM_DEBUG(dbgs() << "LHS: " << LHS << "\n");
std::vector<CaseRange> RHS(Begin + Mid, End);
- DEBUG(dbgs() << "RHS: " << RHS << "\n");
+ LLVM_DEBUG(dbgs() << "RHS: " << RHS << "\n");
CaseRange &Pivot = *(Begin + Mid);
- DEBUG(dbgs() << "Pivot ==> "
- << Pivot.Low->getValue()
- << " -" << Pivot.High->getValue() << "\n");
+ LLVM_DEBUG(dbgs() << "Pivot ==> " << Pivot.Low->getValue() << " -"
+ << Pivot.High->getValue() << "\n");
// NewLowerBound here should never be the integer minimal value.
// This is because it is computed from a case range that is never
@@ -271,20 +270,14 @@ LowerSwitch::switchConvert(CaseItr Begin, CaseItr End, ConstantInt *LowerBound,
NewUpperBound = LHS.back().High;
}
- DEBUG(dbgs() << "LHS Bounds ==> ";
- if (LowerBound) {
- dbgs() << LowerBound->getSExtValue();
- } else {
- dbgs() << "NONE";
- }
- dbgs() << " - " << NewUpperBound->getSExtValue() << "\n";
- dbgs() << "RHS Bounds ==> ";
- dbgs() << NewLowerBound->getSExtValue() << " - ";
- if (UpperBound) {
- dbgs() << UpperBound->getSExtValue() << "\n";
- } else {
- dbgs() << "NONE\n";
- });
+ LLVM_DEBUG(dbgs() << "LHS Bounds ==> "; if (LowerBound) {
+ dbgs() << LowerBound->getSExtValue();
+ } else { dbgs() << "NONE"; } dbgs() << " - "
+ << NewUpperBound->getSExtValue() << "\n";
+ dbgs() << "RHS Bounds ==> ";
+ dbgs() << NewLowerBound->getSExtValue() << " - "; if (UpperBound) {
+ dbgs() << UpperBound->getSExtValue() << "\n";
+ } else { dbgs() << "NONE\n"; });
// Create a new node that checks if the value is < pivot. Go to the
// left branch if it is and right branch if not.
@@ -440,9 +433,9 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI,
// Prepare cases vector.
CaseVector Cases;
unsigned numCmps = Clusterify(Cases, SI);
- DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
- << ". Total compares: " << numCmps << "\n");
- DEBUG(dbgs() << "Cases: " << Cases << "\n");
+ LLVM_DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
+ << ". Total compares: " << numCmps << "\n");
+ LLVM_DEBUG(dbgs() << "Cases: " << Cases << "\n");
(void)numCmps;
ConstantInt *LowerBound = nullptr;
diff --git a/lib/Transforms/Utils/PredicateInfo.cpp b/lib/Transforms/Utils/PredicateInfo.cpp
index 62235895a1ad..6da3c7a90b61 100644
--- a/lib/Transforms/Utils/PredicateInfo.cpp
+++ b/lib/Transforms/Utils/PredicateInfo.cpp
@@ -625,15 +625,15 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
// we want to.
bool PossibleCopy = VD.PInfo != nullptr;
if (RenameStack.empty()) {
- DEBUG(dbgs() << "Rename Stack is empty\n");
+ LLVM_DEBUG(dbgs() << "Rename Stack is empty\n");
} else {
- DEBUG(dbgs() << "Rename Stack Top DFS numbers are ("
- << RenameStack.back().DFSIn << ","
- << RenameStack.back().DFSOut << ")\n");
+ LLVM_DEBUG(dbgs() << "Rename Stack Top DFS numbers are ("
+ << RenameStack.back().DFSIn << ","
+ << RenameStack.back().DFSOut << ")\n");
}
- DEBUG(dbgs() << "Current DFS numbers are (" << VD.DFSIn << ","
- << VD.DFSOut << ")\n");
+ LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << VD.DFSIn << ","
+ << VD.DFSOut << ")\n");
bool ShouldPush = (VD.Def || PossibleCopy);
bool OutOfScope = !stackIsInScope(RenameStack, VD);
@@ -652,7 +652,7 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
if (VD.Def || PossibleCopy)
continue;
if (!DebugCounter::shouldExecute(RenameCounter)) {
- DEBUG(dbgs() << "Skipping execution due to debug counter\n");
+ LLVM_DEBUG(dbgs() << "Skipping execution due to debug counter\n");
continue;
}
ValueDFS &Result = RenameStack.back();
@@ -663,8 +663,9 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
if (!Result.Def)
Result.Def = materializeStack(Counter, RenameStack, Op);
- DEBUG(dbgs() << "Found replacement " << *Result.Def << " for "
- << *VD.U->get() << " in " << *(VD.U->getUser()) << "\n");
+ LLVM_DEBUG(dbgs() << "Found replacement " << *Result.Def << " for "
+ << *VD.U->get() << " in " << *(VD.U->getUser())
+ << "\n");
assert(DT.dominates(cast<Instruction>(Result.Def), *VD.U) &&
"Predicateinfo def should have dominated this use");
VD.U->set(Result.Def);
diff --git a/lib/Transforms/Utils/SSAUpdater.cpp b/lib/Transforms/Utils/SSAUpdater.cpp
index b2231d68a301..ca184ed7c4e3 100644
--- a/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/lib/Transforms/Utils/SSAUpdater.cpp
@@ -178,7 +178,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
// If the client wants to know about all new instructions, tell it.
if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);
- DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n");
+ LLVM_DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n");
return InsertedPHI;
}
diff --git a/lib/Transforms/Utils/SSAUpdaterBulk.cpp b/lib/Transforms/Utils/SSAUpdaterBulk.cpp
index 61ceb9c92778..397bac2940a4 100644
--- a/lib/Transforms/Utils/SSAUpdaterBulk.cpp
+++ b/lib/Transforms/Utils/SSAUpdaterBulk.cpp
@@ -40,8 +40,8 @@ static BasicBlock *getUserBB(Use *U) {
/// AddAvailableValue or AddUse calls.
unsigned SSAUpdaterBulk::AddVariable(StringRef Name, Type *Ty) {
unsigned Var = Rewrites.size();
- DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": initialized with Ty = " << *Ty
- << ", Name = " << Name << "\n");
+ LLVM_DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": initialized with Ty = "
+ << *Ty << ", Name = " << Name << "\n");
RewriteInfo RI(Name, Ty);
Rewrites.push_back(RI);
return Var;
@@ -51,8 +51,9 @@ unsigned SSAUpdaterBulk::AddVariable(StringRef Name, Type *Ty) {
/// specified value.
void SSAUpdaterBulk::AddAvailableValue(unsigned Var, BasicBlock *BB, Value *V) {
assert(Var < Rewrites.size() && "Variable not found!");
- DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": added new available value"
- << *V << " in " << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "SSAUpdater: Var=" << Var
+ << ": added new available value" << *V << " in "
+ << BB->getName() << "\n");
Rewrites[Var].Defines[BB] = V;
}
@@ -60,8 +61,8 @@ void SSAUpdaterBulk::AddAvailableValue(unsigned Var, BasicBlock *BB, Value *V) {
/// rewritten value when RewriteAllUses is called.
void SSAUpdaterBulk::AddUse(unsigned Var, Use *U) {
assert(Var < Rewrites.size() && "Variable not found!");
- DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": added a use" << *U->get()
- << " in " << getUserBB(U)->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "SSAUpdater: Var=" << Var << ": added a use" << *U->get()
+ << " in " << getUserBB(U)->getName() << "\n");
Rewrites[Var].Uses.push_back(U);
}
@@ -134,7 +135,8 @@ void SSAUpdaterBulk::RewriteAllUses(DominatorTree *DT,
// this set for computing iterated dominance frontier (IDF).
// The IDF blocks are the blocks where we need to insert new phi-nodes.
ForwardIDFCalculator IDF(*DT);
- DEBUG(dbgs() << "SSAUpdater: rewriting " << R.Uses.size() << " use(s)\n");
+ LLVM_DEBUG(dbgs() << "SSAUpdater: rewriting " << R.Uses.size()
+ << " use(s)\n");
SmallPtrSet<BasicBlock *, 2> DefBlocks;
for (auto &Def : R.Defines)
@@ -181,8 +183,8 @@ void SSAUpdaterBulk::RewriteAllUses(DominatorTree *DT,
// Notify that users of the existing value that it is being replaced.
if (OldVal != V && OldVal->hasValueHandle())
ValueHandleBase::ValueIsRAUWd(OldVal, V);
- DEBUG(dbgs() << "SSAUpdater: replacing " << *OldVal << " with " << *V
- << "\n");
+ LLVM_DEBUG(dbgs() << "SSAUpdater: replacing " << *OldVal << " with " << *V
+ << "\n");
U->set(V);
}
}
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 28d2606e9ac8..833e13f890d5 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -845,9 +845,9 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor(
// Remove PHI node entries for the dead edge.
ThisCases[0].Dest->removePredecessor(TI->getParent());
- DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
- << "Through successor TI: " << *TI << "Leaving: " << *NI
- << "\n");
+ LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
+ << "Through successor TI: " << *TI << "Leaving: " << *NI
+ << "\n");
EraseTerminatorInstAndDCECond(TI);
return true;
@@ -859,8 +859,8 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor(
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
DeadCases.insert(PredCases[i].Value);
- DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
- << "Through successor TI: " << *TI);
+ LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
+ << "Through successor TI: " << *TI);
// Collect branch weights into a vector.
SmallVector<uint32_t, 8> Weights;
@@ -886,7 +886,7 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor(
if (HasWeight && Weights.size() >= 2)
setBranchWeights(SI, Weights);
- DEBUG(dbgs() << "Leaving: " << *TI << "\n");
+ LLVM_DEBUG(dbgs() << "Leaving: " << *TI << "\n");
return true;
}
@@ -927,9 +927,9 @@ bool SimplifyCFGOpt::SimplifyEqualityComparisonWithOnlyPredecessor(
Instruction *NI = Builder.CreateBr(TheRealDest);
(void)NI;
- DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
- << "Through successor TI: " << *TI << "Leaving: " << *NI
- << "\n");
+ LLVM_DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
+ << "Through successor TI: " << *TI << "Leaving: " << *NI
+ << "\n");
EraseTerminatorInstAndDCECond(TI);
return true;
@@ -1739,7 +1739,8 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) {
LockstepReverseIterator LRI(UnconditionalPreds);
while (LRI.isValid() &&
canSinkInstructions(*LRI, PHIOperands)) {
- DEBUG(dbgs() << "SINK: instruction can be sunk: " << *(*LRI)[0] << "\n");
+ LLVM_DEBUG(dbgs() << "SINK: instruction can be sunk: " << *(*LRI)[0]
+ << "\n");
InstructionsToSink.insert((*LRI).begin(), (*LRI).end());
++ScanIdx;
--LRI;
@@ -1751,7 +1752,7 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) {
for (auto *V : PHIOperands[I])
if (InstructionsToSink.count(V) == 0)
++NumPHIdValues;
- DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n");
+ LLVM_DEBUG(dbgs() << "SINK: #phid values: " << NumPHIdValues << "\n");
unsigned NumPHIInsts = NumPHIdValues / UnconditionalPreds.size();
if ((NumPHIdValues % UnconditionalPreds.size()) != 0)
NumPHIInsts++;
@@ -1779,7 +1780,7 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) {
if (!Profitable)
return false;
- DEBUG(dbgs() << "SINK: Splitting edge\n");
+ LLVM_DEBUG(dbgs() << "SINK: Splitting edge\n");
// We have a conditional edge and we're going to sink some instructions.
// Insert a new block postdominating all blocks we're going to sink from.
if (!SplitBlockPredecessors(BB, UnconditionalPreds, ".sink.split"))
@@ -1801,16 +1802,17 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) {
// and never actually sink it which means we produce more PHIs than intended.
// This is unlikely in practice though.
for (unsigned SinkIdx = 0; SinkIdx != ScanIdx; ++SinkIdx) {
- DEBUG(dbgs() << "SINK: Sink: "
- << *UnconditionalPreds[0]->getTerminator()->getPrevNode()
- << "\n");
+ LLVM_DEBUG(dbgs() << "SINK: Sink: "
+ << *UnconditionalPreds[0]->getTerminator()->getPrevNode()
+ << "\n");
// Because we've sunk every instruction in turn, the current instruction to
// sink is always at index 0.
LRI.reset();
if (!ProfitableToSinkInstruction(LRI)) {
// Too many PHIs would be created.
- DEBUG(dbgs() << "SINK: stopping here, too many PHIs would be created!\n");
+ LLVM_DEBUG(
+ dbgs() << "SINK: stopping here, too many PHIs would be created!\n");
break;
}
@@ -2053,7 +2055,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB,
return false;
// If we get here, we can hoist the instruction and if-convert.
- DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";);
+ LLVM_DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *ThenBB << "\n";);
// Insert a select of the value of the speculated store.
if (SpeculatedStoreValue) {
@@ -2359,8 +2361,9 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetTransformInfo &TTI,
}
}
- DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond << " T: "
- << IfTrue->getName() << " F: " << IfFalse->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond
+ << " T: " << IfTrue->getName()
+ << " F: " << IfFalse->getName() << "\n");
// If we can still promote the PHI nodes after this gauntlet of tests,
// do all of the PHI's now.
@@ -2484,9 +2487,9 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
(void)RI;
- DEBUG(dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:"
- << "\n " << *BI << "NewRet = " << *RI
- << "TRUEBLOCK: " << *TrueSucc << "FALSEBLOCK: " << *FalseSucc);
+ LLVM_DEBUG(dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:"
+ << "\n " << *BI << "NewRet = " << *RI << "TRUEBLOCK: "
+ << *TrueSucc << "FALSEBLOCK: " << *FalseSucc);
EraseTerminatorInstAndDCECond(BI);
@@ -2659,7 +2662,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold) {
continue;
}
- DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB);
+ LLVM_DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB);
IRBuilder<> Builder(PBI);
// If we need to invert the condition in the pred block to match, do so now.
@@ -3282,8 +3285,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
// Finally, if everything is ok, fold the branches to logical ops.
BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1);
- DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent()
- << "AND: " << *BI->getParent());
+ LLVM_DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent()
+ << "AND: " << *BI->getParent());
// If OtherDest *is* BB, then BB is a basic block with a single conditional
// branch in it, where one edge (OtherDest) goes back to itself but the other
@@ -3301,7 +3304,7 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
OtherDest = InfLoopBlock;
}
- DEBUG(dbgs() << *PBI->getParent()->getParent());
+ LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent());
// BI may have other predecessors. Because of this, we leave
// it alone, but modify PBI.
@@ -3385,8 +3388,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI,
}
}
- DEBUG(dbgs() << "INTO: " << *PBI->getParent());
- DEBUG(dbgs() << *PBI->getParent()->getParent());
+ LLVM_DEBUG(dbgs() << "INTO: " << *PBI->getParent());
+ LLVM_DEBUG(dbgs() << *PBI->getParent()->getParent());
// This basic block is probably dead. We know it has at least
// one fewer predecessor.
@@ -3686,9 +3689,9 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder,
BasicBlock *BB = BI->getParent();
- DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size()
- << " cases into SWITCH. BB is:\n"
- << *BB);
+ LLVM_DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size()
+ << " cases into SWITCH. BB is:\n"
+ << *BB);
// If there are any extra values that couldn't be folded into the switch
// then we evaluate them with an explicit branch first. Split the block
@@ -3711,8 +3714,8 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder,
// for the edge we just added.
AddPredecessorToBlock(EdgeBB, BB, NewBB);
- DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase
- << "\nEXTRABB = " << *BB);
+ LLVM_DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase
+ << "\nEXTRABB = " << *BB);
BB = NewBB;
}
@@ -3743,7 +3746,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, IRBuilder<> &Builder,
// Erase the old branch instruction.
EraseTerminatorInstAndDCECond(BI);
- DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n');
+ LLVM_DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n');
return true;
}
@@ -4071,8 +4074,8 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) {
if (!UncondBranchPreds.empty() && DupRet) {
while (!UncondBranchPreds.empty()) {
BasicBlock *Pred = UncondBranchPreds.pop_back_val();
- DEBUG(dbgs() << "FOLDING: " << *BB
- << "INTO UNCOND BRANCH PRED: " << *Pred);
+ LLVM_DEBUG(dbgs() << "FOLDING: " << *BB
+ << "INTO UNCOND BRANCH PRED: " << *Pred);
(void)FoldReturnIntoUncondBranch(RI, BB, Pred);
}
@@ -4396,7 +4399,8 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC,
if (Known.Zero.intersects(CaseVal) || !Known.One.isSubsetOf(CaseVal) ||
(CaseVal.getMinSignedBits() > MaxSignificantBitsInCond)) {
DeadCases.push_back(Case.getCaseValue());
- DEBUG(dbgs() << "SimplifyCFG: switch case " << CaseVal << " is dead.\n");
+ LLVM_DEBUG(dbgs() << "SimplifyCFG: switch case " << CaseVal
+ << " is dead.\n");
}
}
@@ -4412,7 +4416,7 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC,
if (HasDefault && DeadCases.empty() &&
NumUnknownBits < 64 /* avoid overflow */ &&
SI->getNumCases() == (1ULL << NumUnknownBits)) {
- DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n");
+ LLVM_DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n");
BasicBlock *NewDefault =
SplitBlockPredecessors(SI->getDefaultDest(), SI->getParent(), "");
SI->setDefaultDest(&*NewDefault);
@@ -5996,7 +6000,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
// or that just have themself as a predecessor. These are unreachable.
if ((pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) ||
BB->getSinglePredecessor() == BB) {
- DEBUG(dbgs() << "Removing BB: \n" << *BB);
+ LLVM_DEBUG(dbgs() << "Removing BB: \n" << *BB);
DeleteDeadBlock(BB);
return true;
}
diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp
index ad1faea0a7ae..a31403907ff1 100644
--- a/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -147,8 +147,8 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
if (SE->getSCEV(UseInst) != FoldedExpr)
return nullptr;
- DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
- << " -> " << *UseInst << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
+ << " -> " << *UseInst << '\n');
UseInst->setOperand(OperIdx, IVSrc);
assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper");
@@ -221,7 +221,7 @@ bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp,
// for now.
return false;
- DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
ICmp->setPredicate(InvariantPredicate);
ICmp->setOperand(0, NewLHS);
ICmp->setOperand(1, NewRHS);
@@ -252,11 +252,11 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) {
if (SE->isKnownPredicate(Pred, S, X)) {
ICmp->replaceAllUsesWith(ConstantInt::getTrue(ICmp->getContext()));
DeadInsts.emplace_back(ICmp);
- DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
} else if (SE->isKnownPredicate(ICmpInst::getInversePredicate(Pred), S, X)) {
ICmp->replaceAllUsesWith(ConstantInt::getFalse(ICmp->getContext()));
DeadInsts.emplace_back(ICmp);
- DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
} else if (makeIVComparisonInvariant(ICmp, IVOperand)) {
// fallthrough to end of function
} else if (ICmpInst::isSigned(OriginalPred) &&
@@ -267,7 +267,8 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) {
// we turn the instruction's predicate to its unsigned version. Note that
// we cannot rely on Pred here unless we check if we have swapped it.
assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
- DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp
+ << '\n');
ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
} else
return;
@@ -293,7 +294,7 @@ bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
SDiv->getName() + ".udiv", SDiv);
UDiv->setIsExact(SDiv->isExact());
SDiv->replaceAllUsesWith(UDiv);
- DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
++NumSimplifiedSDiv;
Changed = true;
DeadInsts.push_back(SDiv);
@@ -309,7 +310,7 @@ void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D,
Rem->getName() + ".urem", Rem);
Rem->replaceAllUsesWith(URem);
- DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
++NumSimplifiedSRem;
Changed = true;
DeadInsts.emplace_back(Rem);
@@ -318,7 +319,7 @@ void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
// i % n --> i if i is in [0,n).
void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) {
Rem->replaceAllUsesWith(Rem->getOperand(0));
- DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
++NumElimRem;
Changed = true;
DeadInsts.emplace_back(Rem);
@@ -332,7 +333,7 @@ void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) {
SelectInst *Sel =
SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem);
Rem->replaceAllUsesWith(Sel);
- DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
++NumElimRem;
Changed = true;
DeadInsts.emplace_back(Rem);
@@ -548,8 +549,8 @@ bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) {
auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP);
I->replaceAllUsesWith(Invariant);
- DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
- << " with loop invariant: " << *S << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
+ << " with loop invariant: " << *S << '\n');
++NumFoldedUser;
Changed = true;
DeadInsts.emplace_back(I);
@@ -589,7 +590,7 @@ bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
return false;
- DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
+ LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
UseInst->replaceAllUsesWith(IVOperand);
++NumElimIdentity;
diff --git a/lib/Transforms/Utils/SplitModule.cpp b/lib/Transforms/Utils/SplitModule.cpp
index 39a4e565c2e2..f8d758c54983 100644
--- a/lib/Transforms/Utils/SplitModule.cpp
+++ b/lib/Transforms/Utils/SplitModule.cpp
@@ -101,7 +101,8 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
// At this point module should have the proper mix of globals and locals.
// As we attempt to partition this module, we must not change any
// locals to globals.
- DEBUG(dbgs() << "Partition module with (" << M->size() << ")functions\n");
+ LLVM_DEBUG(dbgs() << "Partition module with (" << M->size()
+ << ")functions\n");
ClusterMapType GVtoClusterMap;
ComdatMembersType ComdatMembers;
@@ -194,16 +195,17 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
unsigned CurrentClusterSize = BalancinQueue.top().second;
BalancinQueue.pop();
- DEBUG(dbgs() << "Root[" << CurrentClusterID << "] cluster_size(" << I.first
- << ") ----> " << I.second->getData()->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Root[" << CurrentClusterID << "] cluster_size("
+ << I.first << ") ----> " << I.second->getData()->getName()
+ << "\n");
for (ClusterMapType::member_iterator MI =
GVtoClusterMap.findLeader(I.second);
MI != GVtoClusterMap.member_end(); ++MI) {
if (!Visited.insert(*MI).second)
continue;
- DEBUG(dbgs() << "----> " << (*MI)->getName()
- << ((*MI)->hasLocalLinkage() ? " l " : " e ") << "\n");
+ LLVM_DEBUG(dbgs() << "----> " << (*MI)->getName()
+ << ((*MI)->hasLocalLinkage() ? " l " : " e ") << "\n");
Visited.insert(*MI);
ClusterIDMap[*MI] = CurrentClusterID;
CurrentClusterSize++;
diff --git a/lib/Transforms/Utils/VNCoercion.cpp b/lib/Transforms/Utils/VNCoercion.cpp
index c3feea6a0a41..5f71bdcf5973 100644
--- a/lib/Transforms/Utils/VNCoercion.cpp
+++ b/lib/Transforms/Utils/VNCoercion.cpp
@@ -389,8 +389,8 @@ Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
NewLoad->takeName(SrcVal);
NewLoad->setAlignment(SrcVal->getAlignment());
- DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n");
- DEBUG(dbgs() << "TO: " << *NewLoad << "\n");
+ LLVM_DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n");
+ LLVM_DEBUG(dbgs() << "TO: " << *NewLoad << "\n");
// Replace uses of the original load with the wider load. On a big endian
// system, we need to shift down to get the relevant bits.
diff --git a/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index c1d3f9255150..a6acf3e558a2 100644
--- a/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -510,7 +510,7 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
SmallVector<Instruction *, 16> ChainInstrs;
bool IsLoadChain = isa<LoadInst>(Chain[0]);
- DEBUG({
+ LLVM_DEBUG({
for (Instruction *I : Chain) {
if (IsLoadChain)
assert(isa<LoadInst>(I) &&
@@ -532,11 +532,12 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
Intrinsic::sideeffect) {
// Ignore llvm.sideeffect calls.
} else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) {
- DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I << '\n');
+ LLVM_DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I
+ << '\n');
break;
} else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) {
- DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I
- << '\n');
+ LLVM_DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I
+ << '\n');
break;
}
}
@@ -588,7 +589,7 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
if (!AA.isNoAlias(MemoryLocation::get(MemInstr),
MemoryLocation::get(ChainInstr))) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "LSV: Found alias:\n"
" Aliasing instruction and pointer:\n"
<< " " << *MemInstr << '\n'
@@ -744,7 +745,7 @@ bool Vectorizer::vectorizeChains(InstrListMap &Map) {
if (Size < 2)
continue;
- DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
+ LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
// Process the stores in chunks of 64.
for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) {
@@ -758,7 +759,8 @@ bool Vectorizer::vectorizeChains(InstrListMap &Map) {
}
bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) {
- DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() << " instructions.\n");
+ LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size()
+ << " instructions.\n");
SmallVector<int, 16> Heads, Tails;
int ConsecutiveChain[64];
@@ -894,14 +896,14 @@ bool Vectorizer::vectorizeStoreChain(
// vector factor, break it into two pieces.
unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy);
if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
- DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
- " Creating two separate arrays.\n");
+ LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
+ " Creating two separate arrays.\n");
return vectorizeStoreChain(Chain.slice(0, TargetVF),
InstructionsProcessed) |
vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed);
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "LSV: Stores to vectorize:\n";
for (Instruction *I : Chain)
dbgs() << " " << *I << "\n";
@@ -1042,8 +1044,8 @@ bool Vectorizer::vectorizeLoadChain(
// vector factor, break it into two pieces.
unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy);
if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
- DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
- " Creating two separate arrays.\n");
+ LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
+ " Creating two separate arrays.\n");
return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) |
vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed);
}
@@ -1066,7 +1068,7 @@ bool Vectorizer::vectorizeLoadChain(
Alignment = NewAlign;
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "LSV: Loads to vectorize:\n";
for (Instruction *I : Chain)
I->dump();
@@ -1149,7 +1151,7 @@ bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
SzInBytes * 8, AddressSpace,
Alignment, &Fast);
- DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows
- << " and fast? " << Fast << "\n";);
+ LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows
+ << " and fast? " << Fast << "\n";);
return !Allows || !Fast;
}
diff --git a/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index d1fd2eb68a8b..697bc1b448d7 100644
--- a/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -98,26 +98,26 @@ LoopVectorizeHints::LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
// consider the loop to have been already vectorized because there's
// nothing more that we can do.
IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1;
- DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
- << "LV: Interleaving disabled by the pass manager\n");
+ LLVM_DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
+ << "LV: Interleaving disabled by the pass manager\n");
}
bool LoopVectorizeHints::allowVectorization(Function *F, Loop *L,
bool AlwaysVectorize) const {
if (getForce() == LoopVectorizeHints::FK_Disabled) {
- DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
emitRemarkWithHints();
return false;
}
if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
- DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
emitRemarkWithHints();
return false;
}
if (getIsVectorized() == 1) {
- DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
// FIXME: Add interleave.disable metadata. This will allow
// vectorize.disable to be used without disabling the pass and errors
// to differentiate between disabled vectorization and a width of 1.
@@ -223,7 +223,7 @@ void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) {
if (H->validate(Val))
H->Value = Val;
else
- DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
+ LLVM_DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
break;
}
}
@@ -309,7 +309,7 @@ bool LoopVectorizationRequirements::doesNotMeet(
<< "loop not vectorized: cannot prove it is safe to reorder "
"memory operations";
});
- DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
+ LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
Failed = true;
}
@@ -350,7 +350,7 @@ static bool isUniformLoop(Loop *Lp, Loop *OuterLp) {
// 1.
PHINode *IV = Lp->getCanonicalInductionVariable();
if (!IV) {
- DEBUG(dbgs() << "LV: Canonical IV not found.\n");
+ LLVM_DEBUG(dbgs() << "LV: Canonical IV not found.\n");
return false;
}
@@ -358,14 +358,15 @@ static bool isUniformLoop(Loop *Lp, Loop *OuterLp) {
BasicBlock *Latch = Lp->getLoopLatch();
auto *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator());
if (!LatchBr || LatchBr->isUnconditional()) {
- DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n");
+ LLVM_DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n");
return false;
}
// 3.
auto *LatchCmp = dyn_cast<CmpInst>(LatchBr->getCondition());
if (!LatchCmp) {
- DEBUG(dbgs() << "LV: Loop latch condition is not a compare instruction.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Loop latch condition is not a compare instruction.\n");
return false;
}
@@ -374,7 +375,7 @@ static bool isUniformLoop(Loop *Lp, Loop *OuterLp) {
Value *IVUpdate = IV->getIncomingValueForBlock(Latch);
if (!(CondOp0 == IVUpdate && OuterLp->isLoopInvariant(CondOp1)) &&
!(CondOp1 == IVUpdate && OuterLp->isLoopInvariant(CondOp0))) {
- DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n");
+ LLVM_DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n");
return false;
}
@@ -441,7 +442,7 @@ static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
Instruction *UI = cast<Instruction>(U);
// This user may be a reduction exit value.
if (!TheLoop->contains(UI)) {
- DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
return true;
}
}
@@ -474,7 +475,7 @@ bool LoopVectorizationLegality::canVectorizeOuterLoop() {
// not supported yet.
auto *Br = dyn_cast<BranchInst>(BB->getTerminator());
if (!Br) {
- DEBUG(dbgs() << "LV: Unsupported basic block terminator.\n");
+ LLVM_DEBUG(dbgs() << "LV: Unsupported basic block terminator.\n");
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
<< "loop control flow is not understood by vectorizer");
if (DoExtraAnalysis)
@@ -490,7 +491,7 @@ bool LoopVectorizationLegality::canVectorizeOuterLoop() {
!TheLoop->isLoopInvariant(Br->getCondition()) &&
!LI->isLoopHeader(Br->getSuccessor(0)) &&
!LI->isLoopHeader(Br->getSuccessor(1))) {
- DEBUG(dbgs() << "LV: Unsupported conditional branch.\n");
+ LLVM_DEBUG(dbgs() << "LV: Unsupported conditional branch.\n");
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
<< "loop control flow is not understood by vectorizer");
if (DoExtraAnalysis)
@@ -504,8 +505,9 @@ bool LoopVectorizationLegality::canVectorizeOuterLoop() {
// simple outer loops scenarios with uniform nested loops.
if (!isUniformLoopNest(TheLoop /*loop nest*/,
TheLoop /*context outer loop*/)) {
- DEBUG(dbgs()
- << "LV: Not vectorizing: Outer loop contains divergent loops.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "LV: Not vectorizing: Outer loop contains divergent loops.\n");
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
<< "loop control flow is not understood by vectorizer");
if (DoExtraAnalysis)
@@ -565,7 +567,7 @@ void LoopVectorizationLegality::addInductionPhi(
AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
}
- DEBUG(dbgs() << "LV: Found an induction variable.\n");
+ LLVM_DEBUG(dbgs() << "LV: Found an induction variable.\n");
}
bool LoopVectorizationLegality::canVectorizeInstrs() {
@@ -587,7 +589,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
!PhiTy->isPointerTy()) {
ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
<< "loop control flow is not understood by vectorizer");
- DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
+ LLVM_DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
return false;
}
@@ -609,7 +611,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (Phi->getNumIncomingValues() != 2) {
ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
<< "control flow not understood by vectorizer");
- DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
+ LLVM_DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
return false;
}
@@ -647,7 +649,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi)
<< "value that could not be identified as "
"reduction is used outside the loop");
- DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
return false;
} // end of PHI handling
@@ -662,7 +664,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
<< "call instruction cannot be vectorized");
- DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
return false;
}
@@ -674,7 +677,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI)
<< "intrinsic instruction cannot be vectorized");
- DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
+ LLVM_DEBUG(dbgs()
+ << "LV: Found unvectorizable intrinsic " << *CI << "\n");
return false;
}
}
@@ -686,7 +690,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
isa<ExtractElementInst>(I)) {
ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I)
<< "instruction return type cannot be vectorized");
- DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
+ LLVM_DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
return false;
}
@@ -706,7 +710,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// semantics.
} else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
!I.isFast()) {
- DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
+ LLVM_DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
Hints->setPotentiallyUnsafe();
}
@@ -721,7 +725,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
}
if (!PrimaryInduction) {
- DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
+ LLVM_DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
if (Inductions.empty()) {
ORE->emit(createMissedAnalysis("NoInductionVariable")
<< "loop induction variable could not be identified");
@@ -753,7 +757,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
if (LAI->hasStoreToLoopInvariantAddress()) {
ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress")
<< "write to a loop invariant address could not be vectorized");
- DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
+ LLVM_DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
return false;
}
@@ -903,7 +907,7 @@ bool LoopVectorizationLegality::canVectorizeLoopCFG(Loop *Lp,
// We must have a loop in canonical form. Loops with indirectbr in them cannot
// be canonicalized.
if (!Lp->getLoopPreheader()) {
- DEBUG(dbgs() << "LV: Loop doesn't have a legal pre-header.\n");
+ LLVM_DEBUG(dbgs() << "LV: Loop doesn't have a legal pre-header.\n");
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
<< "loop control flow is not understood by vectorizer");
if (DoExtraAnalysis)
@@ -989,8 +993,8 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
}
// We need to have a loop header.
- DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
- << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
+ << '\n');
// Specific checks for outer loops. We skip the remaining legal checks at this
// point because they don't support outer loops.
@@ -998,13 +1002,13 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
assert(UseVPlanNativePath && "VPlan-native path is not enabled.");
if (!canVectorizeOuterLoop()) {
- DEBUG(dbgs() << "LV: Not vectorizing: Unsupported outer loop.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Unsupported outer loop.\n");
// TODO: Implement DoExtraAnalysis when subsequent legal checks support
// outer loops.
return false;
}
- DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n");
+ LLVM_DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n");
return Result;
}
@@ -1012,7 +1016,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
// Check if we can if-convert non-single-bb loops.
unsigned NumBlocks = TheLoop->getNumBlocks();
if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
- DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
+ LLVM_DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
if (DoExtraAnalysis)
Result = false;
else
@@ -1021,7 +1025,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
// Check if we can vectorize the instructions and CFG in this loop.
if (!canVectorizeInstrs()) {
- DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
+ LLVM_DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
if (DoExtraAnalysis)
Result = false;
else
@@ -1030,18 +1034,18 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
// Go over each instruction and look at memory deps.
if (!canVectorizeMemory()) {
- DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
+ LLVM_DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
if (DoExtraAnalysis)
Result = false;
else
return false;
}
- DEBUG(dbgs() << "LV: We can vectorize this loop"
- << (LAI->getRuntimePointerChecking()->Need
- ? " (with a runtime bound check)"
- : "")
- << "!\n");
+ LLVM_DEBUG(dbgs() << "LV: We can vectorize this loop"
+ << (LAI->getRuntimePointerChecking()->Need
+ ? " (with a runtime bound check)"
+ : "")
+ << "!\n");
unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
@@ -1051,7 +1055,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks")
<< "Too many SCEV assumptions need to be made and checked "
<< "at runtime");
- DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
+ LLVM_DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
if (DoExtraAnalysis)
Result = false;
else
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index eefaf22d028d..a65dc09baa64 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1628,20 +1628,20 @@ static bool isExplicitVecOuterLoop(Loop *OuterLp,
Function *Fn = OuterLp->getHeader()->getParent();
if (!Hints.allowVectorization(Fn, OuterLp, false /*AlwaysVectorize*/)) {
- DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
+ LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
return false;
}
if (!Hints.getWidth()) {
- DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n");
emitMissedWarning(Fn, OuterLp, Hints, ORE);
return false;
}
if (Hints.getInterleave() > 1) {
// TODO: Interleave support is future work.
- DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
- "outer loops.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
+ "outer loops.\n");
emitMissedWarning(Fn, OuterLp, Hints, ORE);
return false;
}
@@ -4123,7 +4123,7 @@ void InnerLoopVectorizer::widenInstruction(Instruction &I) {
default:
// This instruction is not vectorized by simple widening.
- DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
+ LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
llvm_unreachable("Unhandled instruction!");
} // end of switch.
}
@@ -4235,7 +4235,7 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
}
for (auto *I : ScalarPtrs)
if (!PossibleNonScalarPtrs.count(I)) {
- DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
Worklist.insert(I);
}
@@ -4252,8 +4252,9 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
continue;
Worklist.insert(Ind);
Worklist.insert(IndUpdate);
- DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
- DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
+ << "\n");
}
// Insert the forced scalars.
@@ -4280,7 +4281,7 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
isScalarUse(J, Src));
})) {
Worklist.insert(Src);
- DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
}
}
@@ -4320,8 +4321,9 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
// The induction variable and its update instruction will remain scalar.
Worklist.insert(Ind);
Worklist.insert(IndUpdate);
- DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
- DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
+ << "\n");
}
Scalars[VF].insert(Worklist.begin(), Worklist.end());
@@ -4413,7 +4415,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
Worklist.insert(Cmp);
- DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
}
// Holds consecutive and consecutive-like pointers. Consecutive-like pointers
@@ -4474,7 +4476,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
// aren't also identified as possibly non-uniform.
for (auto *V : ConsecutiveLikePtrs)
if (!PossibleNonUniformPtrs.count(V)) {
- DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
Worklist.insert(V);
}
@@ -4497,7 +4499,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
isUniformDecision(J, VF));
})) {
Worklist.insert(OI);
- DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
}
}
}
@@ -4542,8 +4544,9 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
// The induction variable and its update instruction will remain uniform.
Worklist.insert(Ind);
Worklist.insert(IndUpdate);
- DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
- DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate
+ << "\n");
}
Uniforms[VF].insert(Worklist.begin(), Worklist.end());
@@ -4630,7 +4633,7 @@ void InterleavedAccessInfo::collectConstStrideAccesses(
// with other accesses that may precede it in program order. Note that a
// bottom-up order does not imply that WAW dependences should not be checked.
void InterleavedAccessInfo::analyzeInterleaving() {
- DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
+ LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
const ValueToValueMap &Strides = LAI->getSymbolicStrides();
// Holds all accesses with a constant stride.
@@ -4672,7 +4675,8 @@ void InterleavedAccessInfo::analyzeInterleaving() {
if (isStrided(DesB.Stride)) {
Group = getInterleaveGroup(B);
if (!Group) {
- DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
+ << '\n');
Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
}
if (B->mayWriteToMemory())
@@ -4775,8 +4779,9 @@ void InterleavedAccessInfo::analyzeInterleaving() {
// Try to insert A into B's group.
if (Group->insertMember(A, IndexA, DesA.Align)) {
- DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
- << " into the interleave group with" << *B << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
+ << " into the interleave group with" << *B
+ << '\n');
InterleaveGroupMap[A] = Group;
// Set the first load in program order as the insert position.
@@ -4789,8 +4794,9 @@ void InterleavedAccessInfo::analyzeInterleaving() {
// Remove interleaved store groups with gaps.
for (InterleaveGroup *Group : StoreGroups)
if (Group->getNumMembers() != Group->getFactor()) {
- DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due "
- "to gaps.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Invalidate candidate interleaved store group due "
+ "to gaps.\n");
releaseGroup(Group);
}
// Remove interleaved groups with gaps (currently only loads) whose memory
@@ -4822,8 +4828,9 @@ void InterleavedAccessInfo::analyzeInterleaving() {
Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
/*ShouldCheckWrap=*/true)) {
- DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
- "first group member potentially pointer-wrapping.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Invalidate candidate interleaved group due to "
+ "first group member potentially pointer-wrapping.\n");
releaseGroup(Group);
continue;
}
@@ -4832,8 +4839,9 @@ void InterleavedAccessInfo::analyzeInterleaving() {
Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
/*ShouldCheckWrap=*/true)) {
- DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
- "last group member potentially pointer-wrapping.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Invalidate candidate interleaved group due to "
+ "last group member potentially pointer-wrapping.\n");
releaseGroup(Group);
}
} else {
@@ -4843,12 +4851,14 @@ void InterleavedAccessInfo::analyzeInterleaving() {
// to look for a member at index factor - 1, since every group must have
// a member at index zero.
if (Group->isReverse()) {
- DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
- "a reverse access with gaps.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Invalidate candidate interleaved group due to "
+ "a reverse access with gaps.\n");
releaseGroup(Group);
continue;
}
- DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
RequiresScalarEpilogue = true;
}
}
@@ -4858,7 +4868,8 @@ Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
// TODO: It may by useful to do since it's still likely to be dynamically
// uniform if the target can skip.
- DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target");
+ LLVM_DEBUG(
+ dbgs() << "LV: Not inserting runtime ptr check for divergent target");
ORE->emit(
createMissedAnalysis("CantVersionLoopWithDivergentTarget")
@@ -4876,20 +4887,22 @@ Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
<< "runtime pointer checks needed. Enable vectorization of this "
"loop with '#pragma clang loop vectorize(enable)' when "
"compiling with -Os/-Oz");
- DEBUG(dbgs()
- << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
return None;
}
// If we optimize the program for size, avoid creating the tail loop.
- DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
// If we don't know the precise trip count, don't try to vectorize.
if (TC < 2) {
ORE->emit(
createMissedAnalysis("UnknownLoopCountComplexCFG")
<< "unable to calculate the loop count due to complex control flow");
- DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
return None;
}
@@ -4907,7 +4920,8 @@ Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
"same time. Enable vectorization of this loop "
"with '#pragma clang loop vectorize(enable)' "
"when compiling with -Os/-Oz");
- DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
return None;
}
@@ -4932,23 +4946,23 @@ LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
unsigned MaxVectorSize = WidestRegister / WidestType;
- DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
- << WidestType << " bits.\n");
- DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister
- << " bits.\n");
+ LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
+ << " / " << WidestType << " bits.\n");
+ LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
+ << WidestRegister << " bits.\n");
assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
" into one vector!");
if (MaxVectorSize == 0) {
- DEBUG(dbgs() << "LV: The target has no vector registers.\n");
+ LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
MaxVectorSize = 1;
return MaxVectorSize;
} else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
isPowerOf2_32(ConstTripCount)) {
// We need to clamp the VF to be the ConstTripCount. There is no point in
// choosing a higher viable VF as done in the loop below.
- DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
- << ConstTripCount << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
+ << ConstTripCount << "\n");
MaxVectorSize = ConstTripCount;
return MaxVectorSize;
}
@@ -4977,8 +4991,8 @@ LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
}
if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
if (MaxVF < MinVF) {
- DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
- << ") with target's minimum: " << MinVF << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
+ << ") with target's minimum: " << MinVF << '\n');
MaxVF = MinVF;
}
}
@@ -4991,7 +5005,7 @@ LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
float Cost = expectedCost(1).first;
const float ScalarCost = Cost;
unsigned Width = 1;
- DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
+ LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
// Ignore scalar width, because the user explicitly wants vectorization.
@@ -5006,10 +5020,10 @@ LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
// the vector elements.
VectorizationCostTy C = expectedCost(i);
float VectorCost = C.first / (float)i;
- DEBUG(dbgs() << "LV: Vector loop of width " << i
- << " costs: " << (int)VectorCost << ".\n");
+ LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
+ << " costs: " << (int)VectorCost << ".\n");
if (!C.second && !ForceVectorization) {
- DEBUG(
+ LLVM_DEBUG(
dbgs() << "LV: Not considering vector loop of width " << i
<< " because it will not generate any vector instructions.\n");
continue;
@@ -5023,15 +5037,16 @@ LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
if (!EnableCondStoresVectorization && NumPredStores) {
ORE->emit(createMissedAnalysis("ConditionalStore")
<< "store that is conditionally executed prevents vectorization");
- DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: No vectorization. There are conditional stores.\n");
Width = 1;
Cost = ScalarCost;
}
- DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
- << "LV: Vectorization seems to be not beneficial, "
- << "but was forced by a user.\n");
- DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
+ LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
+ << "LV: Vectorization seems to be not beneficial, "
+ << "but was forced by a user.\n");
+ LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
return Factor;
}
@@ -5123,8 +5138,8 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
return 1;
unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
- DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
- << " registers\n");
+ LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
+ << " registers\n");
if (VF == 1) {
if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
@@ -5182,7 +5197,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
// Interleave if we vectorized this loop and there is a reduction that could
// benefit from interleaving.
if (VF > 1 && !Legal->getReductionVars()->empty()) {
- DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
+ LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
return IC;
}
@@ -5193,7 +5208,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
// We want to interleave small loops in order to reduce the loop overhead and
// potentially expose ILP opportunities.
- DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
// We assume that the cost overhead is 1 and we use the cost model
// to estimate the cost of the loop and interleave until the cost of the
@@ -5221,11 +5236,12 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
if (EnableLoadStoreRuntimeInterleave &&
std::max(StoresIC, LoadsIC) > SmallIC) {
- DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Interleaving to saturate store or load ports.\n");
return std::max(StoresIC, LoadsIC);
}
- DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
+ LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
return SmallIC;
}
@@ -5233,11 +5249,11 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
// this point) that could benefit from interleaving.
bool HasReductions = !Legal->getReductionVars()->empty();
if (TTI.enableAggressiveInterleaving(HasReductions)) {
- DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
+ LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
return IC;
}
- DEBUG(dbgs() << "LV: Not Interleaving.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
return 1;
}
@@ -5327,7 +5343,7 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
SmallVector<RegisterUsage, 8> RUs(VFs.size());
SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
- DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
+ LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
// A lambda that gets the register usage for the given type and VF.
auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
@@ -5372,8 +5388,8 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
}
- DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
- << OpenIntervals.size() << '\n');
+ LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
+ << OpenIntervals.size() << '\n');
// Add the current instruction to the list of open intervals.
OpenIntervals.insert(I);
@@ -5388,9 +5404,10 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
Invariant += GetRegUsage(Inst->getType(), VFs[i]);
}
- DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
- DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
- DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
+ LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
+ LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
+ LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant
+ << '\n');
RU.LoopInvariantRegs = Invariant;
RU.MaxLocalUsers = MaxUsages[i];
@@ -5587,8 +5604,9 @@ LoopVectorizationCostModel::expectedCost(unsigned VF) {
BlockCost.first += C.first;
BlockCost.second |= C.second;
- DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
- << VF << " For instruction: " << I << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
+ << " for VF " << VF << " For instruction: " << I
+ << '\n');
}
// If we are vectorizing a predicated block, it will have been
@@ -6247,14 +6265,15 @@ LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize,
assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
assert(UserVF && "Expected UserVF for outer loop vectorization.");
assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
- DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
+ LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
buildVPlans(UserVF, UserVF);
return {UserVF, 0};
}
- DEBUG(dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
- "VPlan-native path.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
+ "VPlan-native path.\n");
return NoVectorization;
}
@@ -6268,13 +6287,13 @@ LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
return NoVectorization;
if (UserVF) {
- DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
+ LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
// Collect the instructions (and their associated costs) that will be more
// profitable to scalarize.
CM.selectUserVectorizationFactor(UserVF);
buildVPlans(UserVF, UserVF);
- DEBUG(printPlans(dbgs()));
+ LLVM_DEBUG(printPlans(dbgs()));
return {UserVF, 0};
}
@@ -6292,7 +6311,7 @@ LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
}
buildVPlans(1, MaxVF);
- DEBUG(printPlans(dbgs()));
+ LLVM_DEBUG(printPlans(dbgs()));
if (MaxVF == 1)
return NoVectorization;
@@ -6301,7 +6320,8 @@ LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
}
void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
- DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n');
+ LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
+ << '\n');
BestVF = VF;
BestUF = UF;
@@ -6777,11 +6797,11 @@ VPBasicBlock *LoopVectorizationPlanner::handleReplication(
// Finalize the recipe for Instr, first if it is not predicated.
if (!IsPredicated) {
- DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
VPBB->appendRecipe(Recipe);
return VPBB;
}
- DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
+ LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
assert(VPBB->getSuccessors().empty() &&
"VPBB has successors when handling predicated replication.");
// Record predicated instructions for above packing optimizations.
@@ -6906,8 +6926,9 @@ LoopVectorizationPlanner::buildVPlan(VFRange &Range,
// should follow.
auto SAIt = SinkAfter.find(Instr);
if (SAIt != SinkAfter.end()) {
- DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second
- << " to vectorize a 1st order recurrence.\n");
+ LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after"
+ << *SAIt->second
+ << " to vectorize a 1st order recurrence.\n");
SinkAfterInverse[SAIt->second] = Instr;
continue;
}
@@ -7208,21 +7229,22 @@ bool LoopVectorizePass::processLoop(Loop *L) {
const std::string DebugLocStr = getDebugLocString(L);
#endif /* NDEBUG */
- DEBUG(dbgs() << "\nLV: Checking a loop in \""
- << L->getHeader()->getParent()->getName() << "\" from "
- << DebugLocStr << "\n");
+ LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
+ << L->getHeader()->getParent()->getName() << "\" from "
+ << DebugLocStr << "\n");
LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
- DEBUG(dbgs() << "LV: Loop hints:"
- << " force="
- << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
- ? "disabled"
- : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
- ? "enabled"
- : "?"))
- << " width=" << Hints.getWidth()
- << " unroll=" << Hints.getInterleave() << "\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Loop hints:"
+ << " force="
+ << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
+ ? "disabled"
+ : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
+ ? "enabled"
+ : "?"))
+ << " width=" << Hints.getWidth()
+ << " unroll=" << Hints.getInterleave() << "\n");
// Function containing loop
Function *F = L->getHeader()->getParent();
@@ -7236,7 +7258,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
// benefit from vectorization, respectively.
if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
- DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
+ LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
return false;
}
@@ -7247,7 +7269,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE,
&Requirements, &Hints, DB, AC);
if (!LVL.canVectorize(EnableVPlanNativePath)) {
- DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
emitMissedWarning(F, L, Hints, ORE);
return false;
}
@@ -7297,13 +7319,13 @@ bool LoopVectorizePass::processLoop(Loop *L) {
}
if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
- DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
- << "This loop is worth vectorizing only if no scalar "
- << "iteration overheads are incurred.");
+ LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
+ << "This loop is worth vectorizing only if no scalar "
+ << "iteration overheads are incurred.");
if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
- DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
+ LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
else {
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
// Loops with a very small trip count are considered for vectorization
// under OptForSize, thereby making sure the cost of their loop body is
// dominant, free of runtime guards and scalar iteration overheads.
@@ -7316,8 +7338,8 @@ bool LoopVectorizePass::processLoop(Loop *L) {
// an integer loop and the vector instructions selected are purely integer
// vector instructions?
if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
- DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
- "attribute is used.\n");
+ LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
+ "attribute is used.\n");
ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(),
"NoImplicitFloat", L)
<< "loop not vectorized due to NoImplicitFloat attribute");
@@ -7331,7 +7353,8 @@ bool LoopVectorizePass::processLoop(Loop *L) {
// additional fp-math flags can help.
if (Hints.isPotentiallyUnsafe() &&
TTI->isFPVectorizationPotentiallyUnsafe()) {
- DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
+ LLVM_DEBUG(
+ dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
ORE->emit(
createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
<< "loop not vectorized due to unsafe FP support.");
@@ -7375,14 +7398,14 @@ bool LoopVectorizePass::processLoop(Loop *L) {
std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
bool VectorizeLoop = true, InterleaveLoop = true;
if (Requirements.doesNotMeet(F, L, Hints)) {
- DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
- "requirements.\n");
+ LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
+ "requirements.\n");
emitMissedWarning(F, L, Hints, ORE);
return false;
}
if (VF.Width == 1) {
- DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
+ LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
VecDiagMsg = std::make_pair(
"VectorizationNotBeneficial",
"the cost-model indicates that vectorization is not beneficial");
@@ -7391,7 +7414,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
if (IC == 1 && UserIC <= 1) {
// Tell the user interleaving is not beneficial.
- DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
+ LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
IntDiagMsg = std::make_pair(
"InterleavingNotBeneficial",
"the cost-model indicates that interleaving is not beneficial");
@@ -7403,8 +7426,8 @@ bool LoopVectorizePass::processLoop(Loop *L) {
}
} else if (IC > 1 && UserIC == 1) {
// Tell the user interleaving is beneficial, but it explicitly disabled.
- DEBUG(dbgs()
- << "LV: Interleaving is beneficial but is explicitly disabled.");
+ LLVM_DEBUG(
+ dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
IntDiagMsg = std::make_pair(
"InterleavingBeneficialButDisabled",
"the cost-model indicates that interleaving is beneficial "
@@ -7431,24 +7454,24 @@ bool LoopVectorizePass::processLoop(Loop *L) {
});
return false;
} else if (!VectorizeLoop && InterleaveLoop) {
- DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
ORE->emit([&]() {
return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
L->getStartLoc(), L->getHeader())
<< VecDiagMsg.second;
});
} else if (VectorizeLoop && !InterleaveLoop) {
- DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
- << DebugLocStr << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
+ << ") in " << DebugLocStr << '\n');
ORE->emit([&]() {
return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
L->getStartLoc(), L->getHeader())
<< IntDiagMsg.second;
});
} else if (VectorizeLoop && InterleaveLoop) {
- DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
- << DebugLocStr << '\n');
- DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
+ << ") in " << DebugLocStr << '\n');
+ LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
}
LVP.setBestPlan(VF.Width, IC);
@@ -7495,7 +7518,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
// Mark the loop as already vectorized to avoid vectorizing again.
Hints.setAlreadyVectorized();
- DEBUG(verifyFunction(*L->getHeader()->getParent()));
+ LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
return true;
}
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 639a05256244..2f9fcc7ec1a8 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1059,7 +1059,7 @@ private:
template <typename ReadyListType>
void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
SD->IsScheduled = true;
- DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
ScheduleData *BundleMember = SD;
while (BundleMember) {
@@ -1082,8 +1082,8 @@ private:
assert(!DepBundle->IsScheduled &&
"already scheduled bundle gets ready");
ReadyList.insert(DepBundle);
- DEBUG(dbgs()
- << "SLP: gets ready (def): " << *DepBundle << "\n");
+ LLVM_DEBUG(dbgs()
+ << "SLP: gets ready (def): " << *DepBundle << "\n");
}
});
}
@@ -1096,8 +1096,8 @@ private:
assert(!DepBundle->IsScheduled &&
"already scheduled bundle gets ready");
ReadyList.insert(DepBundle);
- DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle
- << "\n");
+ LLVM_DEBUG(dbgs()
+ << "SLP: gets ready (mem): " << *DepBundle << "\n");
}
}
BundleMember = BundleMember->NextInBundle;
@@ -1122,7 +1122,8 @@ private:
doForAllOpcodes(I, [&](ScheduleData *SD) {
if (SD->isSchedulingEntity() && SD->isReady()) {
ReadyList.insert(SD);
- DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n");
+ LLVM_DEBUG(dbgs()
+ << "SLP: initially in ready list: " << *I << "\n");
}
});
}
@@ -1398,12 +1399,12 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
// Check if the scalar is externally used as an extra arg.
auto ExtI = ExternallyUsedValues.find(Scalar);
if (ExtI != ExternallyUsedValues.end()) {
- DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " <<
- Lane << " from " << *Scalar << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
+ << Lane << " from " << *Scalar << ".\n");
ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
}
for (User *U : Scalar->users()) {
- DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
Instruction *UserInst = dyn_cast<Instruction>(U);
if (!UserInst)
@@ -1417,8 +1418,8 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
// be used.
if (UseScalar != U ||
!InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
- DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
- << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
+ << ".\n");
assert(!UseEntry->NeedToGather && "Bad state");
continue;
}
@@ -1428,8 +1429,8 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
if (is_contained(UserIgnoreList, UserInst))
continue;
- DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
- Lane << " from " << *Scalar << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
+ << Lane << " from " << *Scalar << ".\n");
ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
}
}
@@ -1442,28 +1443,28 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
InstructionsState S = getSameOpcode(VL);
if (Depth == RecursionMaxDepth) {
- DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
// Don't handle vectors.
if (S.OpValue->getType()->isVectorTy()) {
- DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
if (SI->getValueOperand()->getType()->isVectorTy()) {
- DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
// If all of the operands are identical or constant we have a simple solution.
if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) {
- DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1474,8 +1475,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// Don't vectorize ephemeral values.
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
if (EphValues.count(VL[i])) {
- DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
- ") is ephemeral.\n");
+ LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i]
+ << ") is ephemeral.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1483,16 +1484,17 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// Check if this is a duplicate of another entry.
if (TreeEntry *E = getTreeEntry(S.OpValue)) {
- DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
if (!E->isSame(VL)) {
- DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
// Record the reuse of the tree node. FIXME, currently this is only used to
// properly draw the graph rather than for the actual vectorization.
E->UserTreeIndices.push_back(UserTreeIdx);
- DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
+ << ".\n");
return;
}
@@ -1502,8 +1504,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!I)
continue;
if (getTreeEntry(I)) {
- DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
- ") is already in tree.\n");
+ LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i]
+ << ") is already in tree.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1513,7 +1515,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// we need to gather the scalars.
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
if (MustGather.count(VL[i])) {
- DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1527,7 +1529,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!DT->isReachableFromEntry(BB)) {
// Don't go into unreachable blocks. They may contain instructions with
// dependency cycles which confuse the final scheduling.
- DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
+ LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1545,9 +1547,9 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (UniqueValues.size() == VL.size()) {
ReuseShuffleIndicies.clear();
} else {
- DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
if (UniqueValues.size() <= 1 || !llvm::isPowerOf2_32(UniqueValues.size())) {
- DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
newTreeEntry(VL, false, UserTreeIdx);
return;
}
@@ -1561,14 +1563,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
BlockScheduling &BS = *BSRef.get();
if (!BS.tryScheduleBundle(VL, this, VL0)) {
- DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
+ LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
assert((!BS.getScheduleData(VL0) ||
!BS.getScheduleData(VL0)->isPartOfBundle()) &&
"tryScheduleBundle should cancelScheduling on failure");
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
return;
}
- DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
+ LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
unsigned ShuffleOrOp = S.IsAltShuffle ?
(unsigned) Instruction::ShuffleVector : S.Opcode;
@@ -1582,7 +1584,9 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
TerminatorInst *Term = dyn_cast<TerminatorInst>(
cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
if (Term) {
- DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
+ LLVM_DEBUG(
+ dbgs()
+ << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
return;
@@ -1590,7 +1594,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
}
newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
ValueList Operands;
@@ -1608,14 +1612,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
OrdersType CurrentOrder;
bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
if (Reuse) {
- DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
++NumOpsWantToKeepOriginalOrder;
newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx,
ReuseShuffleIndicies);
return;
}
if (!CurrentOrder.empty()) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
"with order";
for (unsigned Idx : CurrentOrder)
@@ -1631,7 +1635,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
StoredCurrentOrderAndNum->getFirst());
return;
}
- DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
newTreeEntry(VL, /*Vectorized=*/false, UserTreeIdx, ReuseShuffleIndicies);
BS.cancelScheduling(VL, VL0);
return;
@@ -1649,7 +1653,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
DL->getTypeAllocSizeInBits(ScalarTy)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
return;
}
@@ -1662,7 +1666,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!L->isSimple()) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
return;
}
*POIter = L->getPointerOperand();
@@ -1693,20 +1697,20 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
++NumOpsWantToKeepOriginalOrder;
newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx,
ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a vector of loads.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
} else {
// Need to reorder.
auto I = NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first;
++I->getSecond();
newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx,
ReuseShuffleIndicies, I->getFirst());
- DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
}
return;
}
}
- DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
return;
@@ -1729,12 +1733,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (Ty != SrcTy || !isValidElementType(Ty)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
+ LLVM_DEBUG(dbgs()
+ << "SLP: Gathering casts with different src types.\n");
return;
}
}
newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a vector of casts.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
ValueList Operands;
@@ -1757,13 +1762,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
Cmp->getOperand(0)->getType() != ComparedTy) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
+ LLVM_DEBUG(dbgs()
+ << "SLP: Gathering cmp with different predicate.\n");
return;
}
}
newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a vector of compares.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
ValueList Operands;
@@ -1795,7 +1801,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
case Instruction::Or:
case Instruction::Xor:
newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
// Sort operands of the instructions so that each side is more likely to
// have the same opcode.
@@ -1821,7 +1827,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// We don't combine GEPs with complicated (nested) indexing.
for (unsigned j = 0; j < VL.size(); ++j) {
if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
- DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
+ LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
return;
@@ -1834,7 +1840,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned j = 0; j < VL.size(); ++j) {
Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
if (Ty0 != CurTy) {
- DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
+ LLVM_DEBUG(dbgs()
+ << "SLP: not-vectorizable GEP (different types).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
return;
@@ -1845,8 +1852,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned j = 0; j < VL.size(); ++j) {
auto Op = cast<Instruction>(VL[j])->getOperand(1);
if (!isa<ConstantInt>(Op)) {
- DEBUG(
- dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
+ LLVM_DEBUG(dbgs()
+ << "SLP: not-vectorizable GEP (non-constant indexes).\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
return;
@@ -1854,7 +1861,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
}
newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
for (unsigned i = 0, e = 2; i < e; ++i) {
ValueList Operands;
// Prepare the operand vector.
@@ -1871,12 +1878,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
return;
}
newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a vector of stores.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
ValueList Operands;
for (Value *j : VL)
@@ -1894,7 +1901,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!isTriviallyVectorizable(ID)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
return;
}
Function *Int = CI->getCalledFunction();
@@ -1908,8 +1915,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
!CI->hasIdenticalOperandBundleSchema(*CI2)) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
- << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
+ << "\n");
return;
}
// ctlz,cttz and powi are special intrinsics whose second argument
@@ -1919,9 +1926,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (A1I != A1J) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
- << " argument "<< A1I<<"!=" << A1J
- << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
+ << " argument " << A1I << "!=" << A1J << "\n");
return;
}
}
@@ -1932,8 +1938,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
- << *VL[i] << '\n');
+ LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
+ << *CI << "!=" << *VL[i] << '\n');
return;
}
}
@@ -1956,11 +1962,11 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!S.IsAltShuffle) {
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
+ LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
return;
}
newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
+ LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
// Reorder operands if reordering would enable vectorization.
if (isa<BinaryOperator>(VL0)) {
@@ -1984,7 +1990,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
default:
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies);
- DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
return;
}
}
@@ -2411,9 +2417,9 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF,
VecTy->getNumElements());
- DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
- << " (" << VecCallCost << "-" << ScalarCallCost << ")"
- << " for " << *CI << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
+ << " (" << VecCallCost << "-" << ScalarCallCost << ")"
+ << " for " << *CI << "\n");
return ReuseShuffleCost + VecCallCost - ScalarCallCost;
}
@@ -2465,8 +2471,8 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
}
bool BoUpSLP::isFullyVectorizableTinyTree() {
- DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
- VectorizableTree.size() << " is fully vectorizable .\n");
+ LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
+ << VectorizableTree.size() << " is fully vectorizable .\n");
// We only handle trees of heights 1 and 2.
if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather)
@@ -2536,7 +2542,7 @@ int BoUpSLP::getSpillCost() {
LiveValues.insert(cast<Instruction>(&*J));
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "SLP: #LV: " << LiveValues.size();
for (auto *X : LiveValues)
dbgs() << " " << X->getName();
@@ -2575,8 +2581,8 @@ int BoUpSLP::getSpillCost() {
int BoUpSLP::getTreeCost() {
int Cost = 0;
- DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
- VectorizableTree.size() << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
+ << VectorizableTree.size() << ".\n");
unsigned BundleWidth = VectorizableTree[0].Scalars.size();
@@ -2603,8 +2609,9 @@ int BoUpSLP::getTreeCost() {
continue;
int C = getEntryCost(&TE);
- DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
- << *TE.Scalars[0] << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
+ << " for bundle that starts with " << *TE.Scalars[0]
+ << ".\n");
Cost += C;
}
@@ -2649,7 +2656,7 @@ int BoUpSLP::getTreeCost() {
<< "SLP: Extract Cost = " << ExtractCost << ".\n"
<< "SLP: Total Cost = " << Cost << ".\n";
}
- DEBUG(dbgs() << Str);
+ LLVM_DEBUG(dbgs() << Str);
if (ViewSLPTree)
ViewGraph(this, "SLP" + F->getName(), false, Str);
@@ -3080,7 +3087,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
IRBuilder<>::InsertPointGuard Guard(Builder);
if (E->VectorizedValue) {
- DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
return E->VectorizedValue;
}
@@ -3240,7 +3247,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Value *InVec = vectorizeTree(INVL);
if (E->VectorizedValue) {
- DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
@@ -3268,7 +3275,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Value *R = vectorizeTree(RHSV);
if (E->VectorizedValue) {
- DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
@@ -3303,7 +3310,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Value *False = vectorizeTree(FalseVec);
if (E->VectorizedValue) {
- DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
@@ -3351,7 +3358,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Value *RHS = vectorizeTree(RHSVL);
if (E->VectorizedValue) {
- DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
@@ -3509,7 +3516,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
}
Value *OpVec = vectorizeTree(OpVL);
- DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
OpVecs.push_back(OpVec);
}
@@ -3547,7 +3554,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Value *RHS = vectorizeTree(RHSVL);
if (E->VectorizedValue) {
- DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
@@ -3627,7 +3634,8 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
VectorizableTree[0].VectorizedValue = Trunc;
}
- DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
+ LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
+ << " values .\n");
// If necessary, sign-extend or zero-extend ScalarRoot to the larger type
// specified by ScalarType.
@@ -3713,7 +3721,7 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
User->replaceUsesOfWith(Scalar, Ex);
}
- DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
}
// For each vectorized value:
@@ -3734,7 +3742,7 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
if (!Ty->isVoidTy()) {
#ifndef NDEBUG
for (User *U : Scalar->users()) {
- DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
// It is legal to replace users in the ignorelist by undef.
assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
@@ -3744,7 +3752,7 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
Value *Undef = UndefValue::get(Ty);
Scalar->replaceAllUsesWith(Undef);
}
- DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
eraseInstruction(cast<Instruction>(Scalar));
}
}
@@ -3755,8 +3763,8 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
}
void BoUpSLP::optimizeGatherSequence() {
- DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
- << " gather sequences instructions.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
+ << " gather sequences instructions.\n");
// LICM InsertElementInst sequences.
for (Instruction *I : GatherSeq) {
if (!isa<InsertElementInst>(I) && !isa<ShuffleVectorInst>(I))
@@ -3849,7 +3857,7 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
ScheduleData *PrevInBundle = nullptr;
ScheduleData *Bundle = nullptr;
bool ReSchedule = false;
- DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n");
// Make sure that the scheduling region contains all
// instructions of the bundle.
@@ -3866,8 +3874,8 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
// A bundle member was scheduled as single instruction before and now
// needs to be scheduled as part of the bundle. We just get rid of the
// existing schedule.
- DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
- << " was already scheduled\n");
+ LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
+ << " was already scheduled\n");
ReSchedule = true;
}
assert(BundleMember->isSchedulingEntity() &&
@@ -3902,8 +3910,8 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
initialFillReadyList(ReadyInsts);
}
- DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
- << BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
+ << BB->getName() << "\n");
calculateDependencies(Bundle, true, SLP);
@@ -3933,7 +3941,7 @@ void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
return;
ScheduleData *Bundle = getScheduleData(OpValue);
- DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
assert(!Bundle->IsScheduled &&
"Can't cancel bundle which is already scheduled");
assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
@@ -3992,7 +4000,7 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
if (isOneOf(OpValue, I) != I)
CheckSheduleForI(I);
assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
- DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
return true;
}
// Search up and down at the same time, because we don't know if the new
@@ -4004,7 +4012,7 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
BasicBlock::iterator LowerEnd = BB->end();
while (true) {
if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
- DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
+ LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
return false;
}
@@ -4014,7 +4022,8 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
ScheduleStart = I;
if (isOneOf(OpValue, I) != I)
CheckSheduleForI(I);
- DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
+ << "\n");
return true;
}
UpIter++;
@@ -4027,7 +4036,8 @@ bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
if (isOneOf(OpValue, I) != I)
CheckSheduleForI(I);
assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
- DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I
+ << "\n");
return true;
}
DownIter++;
@@ -4091,7 +4101,8 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
assert(isInSchedulingRegion(BundleMember));
if (!BundleMember->hasValidDependencies()) {
- DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
+ << "\n");
BundleMember->Dependencies = 0;
BundleMember->resetUnscheduledDeps();
@@ -4192,7 +4203,8 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
}
if (InsertInReadyList && SD->isReady()) {
ReadyInsts.push_back(SD);
- DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst
+ << "\n");
}
}
}
@@ -4215,7 +4227,7 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
if (!BS->ScheduleStart)
return;
- DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
BS->resetSchedule();
@@ -4648,7 +4660,7 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
if (F.hasFnAttribute(Attribute::NoImplicitFloat))
return false;
- DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
// Use the bottom up slp vectorizer to construct chains that start with
// store instructions.
@@ -4663,8 +4675,8 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
// Vectorize trees that end at stores.
if (!Stores.empty()) {
- DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
- << " underlying objects.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
+ << " underlying objects.\n");
Changed |= vectorizeStoreChains(R);
}
@@ -4675,16 +4687,16 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
// is primarily intended to catch gather-like idioms ending at
// non-consecutive loads.
if (!GEPs.empty()) {
- DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
- << " underlying objects.\n");
+ LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
+ << " underlying objects.\n");
Changed |= vectorizeGEPIndices(BB, R);
}
}
if (Changed) {
R.optimizeGatherSequence();
- DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
- DEBUG(verifyFunction(F));
+ LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
+ LLVM_DEBUG(verifyFunction(F));
}
return Changed;
}
@@ -4705,8 +4717,8 @@ static bool hasValueBeenRAUWed(ArrayRef<Value *> VL,
bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
unsigned VecRegSize) {
const unsigned ChainLen = Chain.size();
- DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
- << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
+ << "\n");
const unsigned Sz = R.getVectorElementSize(Chain[0]);
const unsigned VF = VecRegSize / Sz;
@@ -4724,8 +4736,8 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
if (hasValueBeenRAUWed(Chain, TrackValues, i, VF))
continue;
- DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
- << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
+ << "\n");
ArrayRef<Value *> Operands = Chain.slice(i, VF);
R.buildTree(Operands);
@@ -4736,9 +4748,10 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
int Cost = R.getTreeCost();
- DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF
+ << "\n");
if (Cost < -SLPCostThreshold) {
- DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
using namespace ore;
@@ -4883,8 +4896,8 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
if (VL.size() < 2)
return false;
- DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size()
- << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
+ << VL.size() << ".\n");
// Check that all of the parts are scalar instructions of the same type.
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
@@ -4969,8 +4982,8 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth))
continue;
- DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
- << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
+ << "\n");
ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
R.buildTree(Ops);
@@ -4995,7 +5008,7 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
MinCost = std::min(MinCost, Cost);
if (Cost < -SLPCostThreshold) {
- DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
cast<Instruction>(Ops[0]))
<< "SLP vectorized with cost " << ore::NV("Cost", Cost)
@@ -5752,8 +5765,8 @@ public:
break;
}
- DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
- << ". (HorRdx)\n");
+ LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
+ << Cost << ". (HorRdx)\n");
V.getORE()->emit([&]() {
return OptimizationRemark(
SV_NAME, "VectorizedHorizontalReduction", cast<Instruction>(VL[0]))
@@ -5874,11 +5887,11 @@ private:
}
ScalarReduxCost *= (ReduxWidth - 1);
- DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
- << " for reduction that starts with " << *FirstReducedVal
- << " (It is a "
- << (IsPairwiseReduction ? "pairwise" : "splitting")
- << " reduction)\n");
+ LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
+ << " for reduction that starts with " << *FirstReducedVal
+ << " (It is a "
+ << (IsPairwiseReduction ? "pairwise" : "splitting")
+ << " reduction)\n");
return VecReduxCost - ScalarReduxCost;
}
@@ -6144,7 +6157,7 @@ bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
if (!findBuildAggregate(IVI, BuildVectorOpds))
return false;
- DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
+ LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
// Aggregate value is unlikely to be processed in vector register, we need to
// extract scalars into scalar registers, so NeedExtraction is set true.
return tryToVectorizeList(BuildVectorOpds, R);
@@ -6234,8 +6247,8 @@ bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
// Try to vectorize them.
unsigned NumElts = (SameTypeIt - IncIt);
- DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts
- << ")\n");
+ LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("
+ << NumElts << ")\n");
// The order in which the phi nodes appear in the program does not matter.
// So allow tryToVectorizeList to reorder them if it is beneficial. This
// is done when there are exactly two elements since tryToVectorizeList
@@ -6336,8 +6349,8 @@ bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
if (Entry.second.size() < 2)
continue;
- DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
- << Entry.second.size() << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
+ << Entry.second.size() << ".\n");
// We process the getelementptr list in chunks of 16 (like we do for
// stores) to minimize compile-time.
@@ -6419,8 +6432,8 @@ bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
if (it->second.size() < 2)
continue;
- DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
- << it->second.size() << ".\n");
+ LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
+ << it->second.size() << ".\n");
// Process the stores in chunks of 16.
// TODO: The limit of 16 inhibits greater vectorization factors.
diff --git a/lib/Transforms/Vectorize/VPlan.cpp b/lib/Transforms/Vectorize/VPlan.cpp
index 7146fcc098bb..50c71a323851 100644
--- a/lib/Transforms/Vectorize/VPlan.cpp
+++ b/lib/Transforms/Vectorize/VPlan.cpp
@@ -116,7 +116,7 @@ VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) {
BasicBlock *PrevBB = CFG.PrevBB;
BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(),
PrevBB->getParent(), CFG.LastBB);
- DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n');
// Hook up the new basic block to its predecessors.
for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
@@ -125,7 +125,7 @@ VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) {
BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB];
assert(PredBB && "Predecessor basic-block not found building successor.");
auto *PredBBTerminator = PredBB->getTerminator();
- DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
if (isa<UnreachableInst>(PredBBTerminator)) {
assert(PredVPSuccessors.size() == 1 &&
"Predecessor ending w/o branch must have single successor.");
@@ -175,8 +175,8 @@ void VPBasicBlock::execute(VPTransformState *State) {
}
// 2. Fill the IR basic block with IR instructions.
- DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName()
- << " in BB:" << NewBB->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName()
+ << " in BB:" << NewBB->getName() << '\n');
State->CFG.VPBB2IRBB[this] = NewBB;
State->CFG.PrevVPBB = this;
@@ -184,7 +184,7 @@ void VPBasicBlock::execute(VPTransformState *State) {
for (VPRecipeBase &Recipe : Recipes)
Recipe.execute(*State);
- DEBUG(dbgs() << "LV: filled BB:" << *NewBB);
+ LLVM_DEBUG(dbgs() << "LV: filled BB:" << *NewBB);
}
void VPRegionBlock::execute(VPTransformState *State) {
@@ -193,7 +193,7 @@ void VPRegionBlock::execute(VPTransformState *State) {
if (!isReplicator()) {
// Visit the VPBlocks connected to "this", starting from it.
for (VPBlockBase *Block : RPOT) {
- DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
Block->execute(State);
}
return;
@@ -210,7 +210,7 @@ void VPRegionBlock::execute(VPTransformState *State) {
State->Instance->Lane = Lane;
// Visit the VPBlocks connected to \p this, starting from it.
for (VPBlockBase *Block : RPOT) {
- DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
+ LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
Block->execute(State);
}
}
diff --git a/tools/bugpoint/ExtractFunction.cpp b/tools/bugpoint/ExtractFunction.cpp
index 343ecd54b489..48f1575c25eb 100644
--- a/tools/bugpoint/ExtractFunction.cpp
+++ b/tools/bugpoint/ExtractFunction.cpp
@@ -324,9 +324,9 @@ llvm::SplitFunctionsOutOfModule(Module *M, const std::vector<Function *> &F,
std::set<Function *> TestFunctions;
for (unsigned i = 0, e = F.size(); i != e; ++i) {
Function *TNOF = cast<Function>(VMap[F[i]]);
- DEBUG(errs() << "Removing function ");
- DEBUG(TNOF->printAsOperand(errs(), false));
- DEBUG(errs() << "\n");
+ LLVM_DEBUG(errs() << "Removing function ");
+ LLVM_DEBUG(TNOF->printAsOperand(errs(), false));
+ LLVM_DEBUG(errs() << "\n");
TestFunctions.insert(cast<Function>(NewVMap[TNOF]));
DeleteFunctionBody(TNOF); // Function is now external in this module!
}
diff --git a/tools/bugpoint/OptimizerDriver.cpp b/tools/bugpoint/OptimizerDriver.cpp
index 34f447831d0f..bdb6c5f19417 100644
--- a/tools/bugpoint/OptimizerDriver.cpp
+++ b/tools/bugpoint/OptimizerDriver.cpp
@@ -226,10 +226,10 @@ bool BugDriver::runPasses(Module &Program,
Args.push_back(*ExtraArgs);
Args.push_back(nullptr);
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = Args.size() - 1; i != e; ++i) errs()
- << " " << Args[i];
- errs() << "\n";);
+ LLVM_DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = Args.size() - 1; i != e; ++i) errs()
+ << " " << Args[i];
+ errs() << "\n";);
Optional<StringRef> Redirects[3] = {None, None, None};
// Redirect stdout and stderr to nowhere if SilencePasses is given.
diff --git a/tools/bugpoint/ToolRunner.cpp b/tools/bugpoint/ToolRunner.cpp
index ea0128d0e820..2fb9f93ad7da 100644
--- a/tools/bugpoint/ToolRunner.cpp
+++ b/tools/bugpoint/ToolRunner.cpp
@@ -194,10 +194,10 @@ Expected<int> LLI::ExecuteProgram(const std::string &Bitcode,
outs() << "<lli>";
outs().flush();
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = LLIArgs.size() - 1; i != e; ++i) errs()
- << " " << LLIArgs[i];
- errs() << "\n";);
+ LLVM_DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = LLIArgs.size() - 1; i != e; ++i) errs()
+ << " " << LLIArgs[i];
+ errs() << "\n";);
return RunProgramWithTimeout(LLIPath, &LLIArgs[0], InputFile, OutputFile,
OutputFile, Timeout, MemoryLimit);
}
@@ -481,10 +481,10 @@ Expected<CC::FileType> LLC::OutputCode(const std::string &Bitcode,
outs() << (UseIntegratedAssembler ? "<llc-ia>" : "<llc>");
outs().flush();
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = LLCArgs.size() - 1; i != e; ++i) errs()
- << " " << LLCArgs[i];
- errs() << "\n";);
+ LLVM_DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = LLCArgs.size() - 1; i != e; ++i) errs()
+ << " " << LLCArgs[i];
+ errs() << "\n";);
if (RunProgramWithTimeout(LLCPath, &LLCArgs[0], "", "", "", Timeout,
MemoryLimit))
return ProcessFailure(LLCPath, &LLCArgs[0], Timeout, MemoryLimit);
@@ -601,11 +601,11 @@ Expected<int> JIT::ExecuteProgram(const std::string &Bitcode,
outs() << "<jit>";
outs().flush();
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = JITArgs.size() - 1; i != e; ++i) errs()
- << " " << JITArgs[i];
- errs() << "\n";);
- DEBUG(errs() << "\nSending output to " << OutputFile << "\n");
+ LLVM_DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = JITArgs.size() - 1; i != e; ++i) errs()
+ << " " << JITArgs[i];
+ errs() << "\n";);
+ LLVM_DEBUG(errs() << "\nSending output to " << OutputFile << "\n");
return RunProgramWithTimeout(LLIPath, &JITArgs[0], InputFile, OutputFile,
OutputFile, Timeout, MemoryLimit);
}
@@ -710,10 +710,10 @@ Expected<int> CC::ExecuteProgram(const std::string &ProgramFile,
outs() << "<CC>";
outs().flush();
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = CCArgs.size() - 1; i != e; ++i) errs()
- << " " << CCArgs[i];
- errs() << "\n";);
+ LLVM_DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = CCArgs.size() - 1; i != e; ++i) errs()
+ << " " << CCArgs[i];
+ errs() << "\n";);
if (RunProgramWithTimeout(CCPath, &CCArgs[0], "", "", ""))
return ProcessFailure(CCPath, &CCArgs[0]);
@@ -758,15 +758,16 @@ Expected<int> CC::ExecuteProgram(const std::string &ProgramFile,
// Now that we have a binary, run it!
outs() << "<program>";
outs().flush();
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = ProgramArgs.size() - 1; i != e; ++i) errs()
- << " " << ProgramArgs[i];
- errs() << "\n";);
+ LLVM_DEBUG(
+ errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = ProgramArgs.size() - 1; i != e; ++i) errs()
+ << " " << ProgramArgs[i];
+ errs() << "\n";);
FileRemover OutputBinaryRemover(OutputBinary.str(), !SaveTemps);
if (RemoteClientPath.empty()) {
- DEBUG(errs() << "<run locally>");
+ LLVM_DEBUG(errs() << "<run locally>");
std::string Error;
int ExitCode = RunProgramWithTimeout(OutputBinary.str(), &ProgramArgs[0],
InputFile, OutputFile, OutputFile,
@@ -855,10 +856,10 @@ Error CC::MakeSharedObject(const std::string &InputFile, FileType fileType,
outs() << "<CC>";
outs().flush();
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = CCArgs.size() - 1; i != e; ++i) errs()
- << " " << CCArgs[i];
- errs() << "\n";);
+ LLVM_DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = CCArgs.size() - 1; i != e; ++i) errs()
+ << " " << CCArgs[i];
+ errs() << "\n";);
if (RunProgramWithTimeout(CCPath, &CCArgs[0], "", "", ""))
return ProcessFailure(CCPath, &CCArgs[0]);
return Error::success();
diff --git a/tools/lli/lli.cpp b/tools/lli/lli.cpp
index c24c1de383b0..17957c97e2cf 100644
--- a/tools/lli/lli.cpp
+++ b/tools/lli/lli.cpp
@@ -637,8 +637,8 @@ int main(int argc, char **argv, char * const *envp) {
// FIXME: argv and envp handling.
JITTargetAddress Entry = EE->getFunctionAddress(EntryFn->getName().str());
EE->finalizeObject();
- DEBUG(dbgs() << "Executing '" << EntryFn->getName() << "' at 0x"
- << format("%llx", Entry) << "\n");
+ LLVM_DEBUG(dbgs() << "Executing '" << EntryFn->getName() << "' at 0x"
+ << format("%llx", Entry) << "\n");
Result = ExitOnErr(R->callIntVoid(Entry));
// Like static constructors, the remote target MCJIT support doesn't handle
diff --git a/tools/llvm-dwarfdump/Statistics.cpp b/tools/llvm-dwarfdump/Statistics.cpp
index 9a7454a52624..5a274e1935d5 100644
--- a/tools/llvm-dwarfdump/Statistics.cpp
+++ b/tools/llvm-dwarfdump/Statistics.cpp
@@ -165,11 +165,11 @@ static void collectStatsRecursive(DWARFDie Die, std::string Prefix,
/// \{
static void printDatum(raw_ostream &OS, const char *Key, StringRef Value) {
OS << ",\"" << Key << "\":\"" << Value << '"';
- DEBUG(llvm::dbgs() << Key << ": " << Value << '\n');
+ LLVM_DEBUG(llvm::dbgs() << Key << ": " << Value << '\n');
}
static void printDatum(raw_ostream &OS, const char *Key, uint64_t Value) {
OS << ",\"" << Key << "\":" << Value;
- DEBUG(llvm::dbgs() << Key << ": " << Value << '\n');
+ LLVM_DEBUG(llvm::dbgs() << Key << ": " << Value << '\n');
}
/// \}
@@ -206,8 +206,9 @@ bool collectStatsForObjectFile(ObjectFile &Obj, DWARFContext &DICtx,
VarWithLoc += Stats.TotalVarWithLoc + Constants;
VarTotal += TotalVars + Constants;
VarUnique += Stats.VarsInFunction.size();
- DEBUG(for (auto V : Stats.VarsInFunction)
- llvm::dbgs() << Entry.getKey() << ": " << V << "\n");
+ LLVM_DEBUG(for (auto V
+ : Stats.VarsInFunction) llvm::dbgs()
+ << Entry.getKey() << ": " << V << "\n");
NumFunctions += Stats.IsFunction;
NumInlinedFunctions += Stats.IsFunction * Stats.NumFnInlined;
}
@@ -215,8 +216,8 @@ bool collectStatsForObjectFile(ObjectFile &Obj, DWARFContext &DICtx,
// Print summary.
OS.SetBufferSize(1024);
OS << "{\"version\":\"" << Version << '"';
- DEBUG(llvm::dbgs() << "Variable location quality metrics\n";
- llvm::dbgs() << "---------------------------------\n");
+ LLVM_DEBUG(llvm::dbgs() << "Variable location quality metrics\n";
+ llvm::dbgs() << "---------------------------------\n");
printDatum(OS, "file", Filename.str());
printDatum(OS, "format", FormatName);
printDatum(OS, "source functions", NumFunctions);
@@ -228,7 +229,7 @@ bool collectStatsForObjectFile(ObjectFile &Obj, DWARFContext &DICtx,
GlobalStats.ScopeBytesFromFirstDefinition);
printDatum(OS, "scope bytes covered", GlobalStats.ScopeBytesCovered);
OS << "}\n";
- DEBUG(
+ LLVM_DEBUG(
llvm::dbgs() << "Total Availability: "
<< (int)std::round((VarWithLoc * 100.0) / VarTotal) << "%\n";
llvm::dbgs() << "PC Ranges covered: "
diff --git a/tools/llvm-mca/Backend.cpp b/tools/llvm-mca/Backend.cpp
index de345108faea..4e6ec482eefa 100644
--- a/tools/llvm-mca/Backend.cpp
+++ b/tools/llvm-mca/Backend.cpp
@@ -48,7 +48,7 @@ void Backend::runCycle(unsigned Cycle) {
}
void Backend::notifyCycleBegin(unsigned Cycle) {
- DEBUG(dbgs() << "[E] Cycle begin: " << Cycle << '\n');
+ LLVM_DEBUG(dbgs() << "[E] Cycle begin: " << Cycle << '\n');
for (HWEventListener *Listener : Listeners)
Listener->onCycleBegin();
@@ -67,8 +67,8 @@ void Backend::notifyStallEvent(const HWStallEvent &Event) {
}
void Backend::notifyResourceAvailable(const ResourceRef &RR) {
- DEBUG(dbgs() << "[E] Resource Available: [" << RR.first << '.' << RR.second
- << "]\n");
+ LLVM_DEBUG(dbgs() << "[E] Resource Available: [" << RR.first << '.'
+ << RR.second << "]\n");
for (HWEventListener *Listener : Listeners)
Listener->onResourceAvailable(RR);
}
@@ -84,7 +84,7 @@ void Backend::notifyReleasedBuffers(ArrayRef<unsigned> Buffers) {
}
void Backend::notifyCycleEnd(unsigned Cycle) {
- DEBUG(dbgs() << "[E] Cycle end: " << Cycle << "\n\n");
+ LLVM_DEBUG(dbgs() << "[E] Cycle end: " << Cycle << "\n\n");
for (HWEventListener *Listener : Listeners)
Listener->onCycleEnd();
}
diff --git a/tools/llvm-mca/Dispatch.cpp b/tools/llvm-mca/Dispatch.cpp
index b077f0af21db..2c0227d33a99 100644
--- a/tools/llvm-mca/Dispatch.cpp
+++ b/tools/llvm-mca/Dispatch.cpp
@@ -176,7 +176,7 @@ void RegisterFile::collectWrites(SmallVectorImpl<WriteState *> &Writes,
assert(RegID && RegID < RegisterMappings.size());
WriteState *WS = RegisterMappings[RegID].first;
if (WS) {
- DEBUG(dbgs() << "Found a dependent use of RegID=" << RegID << '\n');
+ LLVM_DEBUG(dbgs() << "Found a dependent use of RegID=" << RegID << '\n');
Writes.push_back(WS);
}
@@ -184,8 +184,8 @@ void RegisterFile::collectWrites(SmallVectorImpl<WriteState *> &Writes,
for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
WS = RegisterMappings[*I].first;
if (WS && std::find(Writes.begin(), Writes.end(), WS) == Writes.end()) {
- DEBUG(dbgs() << "Found a dependent use of subReg " << *I << " (part of "
- << RegID << ")\n");
+ LLVM_DEBUG(dbgs() << "Found a dependent use of subReg " << *I
+ << " (part of " << RegID << ")\n");
Writes.push_back(WS);
}
}
@@ -254,12 +254,12 @@ void RegisterFile::dump() const {
void DispatchUnit::notifyInstructionDispatched(const InstRef &IR,
ArrayRef<unsigned> UsedRegs) {
- DEBUG(dbgs() << "[E] Instruction Dispatched: " << IR << '\n');
+ LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: " << IR << '\n');
Owner->notifyInstructionEvent(HWInstructionDispatchedEvent(IR, UsedRegs));
}
void DispatchUnit::notifyInstructionRetired(const InstRef &IR) {
- DEBUG(dbgs() << "[E] Instruction Retired: " << IR << '\n');
+ LLVM_DEBUG(dbgs() << "[E] Instruction Retired: " << IR << '\n');
SmallVector<unsigned, 4> FreedRegs(RAT->getNumRegisterFiles());
for (const std::unique_ptr<WriteState> &WS : IR.getInstruction()->getDefs())
RAT->invalidateRegisterMapping(*WS.get(), FreedRegs);
@@ -302,7 +302,8 @@ void DispatchUnit::updateRAWDependencies(ReadState &RS,
collectWrites(DependentWrites, RS.getRegisterID());
RS.setDependentWrites(DependentWrites.size());
- DEBUG(dbgs() << "Found " << DependentWrites.size() << " dependent writes\n");
+ LLVM_DEBUG(dbgs() << "Found " << DependentWrites.size()
+ << " dependent writes\n");
// We know that this read depends on all the writes in DependentWrites.
// For each write, check if we have ReadAdvance information, and use it
// to figure out in how many cycles this read becomes available.
diff --git a/tools/llvm-mca/InstrBuilder.cpp b/tools/llvm-mca/InstrBuilder.cpp
index 7b4ad38d8659..bca3e4bc0d73 100644
--- a/tools/llvm-mca/InstrBuilder.cpp
+++ b/tools/llvm-mca/InstrBuilder.cpp
@@ -113,7 +113,7 @@ static void initializeUsedResources(InstrDesc &ID,
}
}
- DEBUG({
+ LLVM_DEBUG({
for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
dbgs() << "\t\tMask=" << R.first << ", cy=" << R.second.size() << '\n';
for (const uint64_t R : ID.Buffers)
@@ -259,7 +259,7 @@ static void populateWrites(InstrDesc &ID, const MCInst &MCI,
}
Write.FullyUpdatesSuperRegs = FullyUpdatesSuperRegisters;
Write.IsOptionalDef = false;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", Latency=" << Write.Latency
<< ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
});
@@ -290,10 +290,10 @@ static void populateWrites(InstrDesc &ID, const MCInst &MCI,
Write.IsOptionalDef = false;
assert(Write.RegisterID != 0 && "Expected a valid phys register!");
- DEBUG(dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", PhysReg="
- << Write.RegisterID << ", Latency=" << Write.Latency
- << ", WriteResourceID=" << Write.SClassOrWriteResourceID
- << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOpIdx=" << Write.OpIndex << ", PhysReg="
+ << Write.RegisterID << ", Latency=" << Write.Latency
+ << ", WriteResourceID=" << Write.SClassOrWriteResourceID
+ << '\n');
}
if (MCDesc.hasOptionalDef()) {
@@ -352,7 +352,7 @@ static void populateReads(InstrDesc &ID, const MCInst &MCI,
Read.UseIndex = CurrentUse;
Read.HasReadAdvanceEntries = HasReadAdvanceEntries;
Read.SchedClassID = SchedClassID;
- DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex);
+ LLVM_DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex);
}
for (unsigned CurrentUse = 0; CurrentUse < NumImplicitUses; ++CurrentUse) {
@@ -362,8 +362,8 @@ static void populateReads(InstrDesc &ID, const MCInst &MCI,
Read.RegisterID = MCDesc.getImplicitUses()[CurrentUse];
Read.HasReadAdvanceEntries = HasReadAdvanceEntries;
Read.SchedClassID = SchedClassID;
- DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex
- << ", RegisterID=" << Read.RegisterID << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tOpIdx=" << Read.OpIndex
+ << ", RegisterID=" << Read.RegisterID << '\n');
}
}
@@ -413,8 +413,8 @@ const InstrDesc &InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
populateWrites(*ID, MCI, MCDesc, SCDesc, STI);
populateReads(*ID, MCI, MCDesc, SCDesc, STI);
- DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
- DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
// Now add the new descriptor.
Descriptors[Opcode] = std::move(ID);
diff --git a/tools/llvm-mca/LSUnit.cpp b/tools/llvm-mca/LSUnit.cpp
index 1b6d8485cfec..dfd3e53fb49a 100644
--- a/tools/llvm-mca/LSUnit.cpp
+++ b/tools/llvm-mca/LSUnit.cpp
@@ -36,8 +36,8 @@ void LSUnit::assignLQSlot(unsigned Index) {
assert(!isLQFull());
assert(LoadQueue.count(Index) == 0);
- DEBUG(dbgs() << "[LSUnit] - AssignLQSlot <Idx=" << Index
- << ",slot=" << LoadQueue.size() << ">\n");
+ LLVM_DEBUG(dbgs() << "[LSUnit] - AssignLQSlot <Idx=" << Index
+ << ",slot=" << LoadQueue.size() << ">\n");
LoadQueue.insert(Index);
}
@@ -45,8 +45,8 @@ void LSUnit::assignSQSlot(unsigned Index) {
assert(!isSQFull());
assert(StoreQueue.count(Index) == 0);
- DEBUG(dbgs() << "[LSUnit] - AssignSQSlot <Idx=" << Index
- << ",slot=" << StoreQueue.size() << ">\n");
+ LLVM_DEBUG(dbgs() << "[LSUnit] - AssignSQSlot <Idx=" << Index
+ << ",slot=" << StoreQueue.size() << ">\n");
StoreQueue.insert(Index);
}
@@ -123,15 +123,15 @@ void LSUnit::onInstructionExecuted(const InstRef &IR) {
const unsigned Index = IR.getSourceIndex();
std::set<unsigned>::iterator it = LoadQueue.find(Index);
if (it != LoadQueue.end()) {
- DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
- << " has been removed from the load queue.\n");
+ LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
+ << " has been removed from the load queue.\n");
LoadQueue.erase(it);
}
it = StoreQueue.find(Index);
if (it != StoreQueue.end()) {
- DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
- << " has been removed from the store queue.\n");
+ LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
+ << " has been removed from the store queue.\n");
StoreQueue.erase(it);
}
diff --git a/tools/llvm-mca/Scheduler.cpp b/tools/llvm-mca/Scheduler.cpp
index 5df677d741b3..4abdc97bb0e0 100644
--- a/tools/llvm-mca/Scheduler.cpp
+++ b/tools/llvm-mca/Scheduler.cpp
@@ -245,7 +245,8 @@ void Scheduler::scheduleInstruction(InstRef &IR) {
// If necessary, reserve queue entries in the load-store unit (LSU).
bool Reserved = LSU->reserve(IR);
if (!IR.getInstruction()->isReady() || (Reserved && !LSU->isReady(IR))) {
- DEBUG(dbgs() << "[SCHEDULER] Adding " << Idx << " to the Wait Queue\n");
+ LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << Idx
+ << " to the Wait Queue\n");
WaitQueue[Idx] = IR.getInstruction();
return;
}
@@ -266,12 +267,14 @@ void Scheduler::scheduleInstruction(InstRef &IR) {
// resources (i.e. BufferSize=1) is consumed.
if (!IsZeroLatency && !Resources->mustIssueImmediately(Desc)) {
- DEBUG(dbgs() << "[SCHEDULER] Adding " << IR << " to the Ready Queue\n");
+ LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding " << IR
+ << " to the Ready Queue\n");
ReadyQueue[IR.getSourceIndex()] = IR.getInstruction();
return;
}
- DEBUG(dbgs() << "[SCHEDULER] Instruction " << IR << " issued immediately\n");
+ LLVM_DEBUG(dbgs() << "[SCHEDULER] Instruction " << IR
+ << " issued immediately\n");
// Release buffered resources and issue MCIS to the underlying pipelines.
issueInstruction(IR);
}
@@ -441,8 +444,8 @@ void Scheduler::updateIssuedQueue(SmallVectorImpl<InstRef> &Executed) {
++I;
IssuedQueue.erase(ToRemove);
} else {
- DEBUG(dbgs() << "[SCHEDULER]: Instruction " << Entry.first
- << " is still executing.\n");
+ LLVM_DEBUG(dbgs() << "[SCHEDULER]: Instruction " << Entry.first
+ << " is still executing.\n");
++I;
}
}
@@ -450,7 +453,7 @@ void Scheduler::updateIssuedQueue(SmallVectorImpl<InstRef> &Executed) {
void Scheduler::notifyInstructionIssued(
const InstRef &IR, ArrayRef<std::pair<ResourceRef, double>> Used) {
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "[E] Instruction Issued: " << IR << '\n';
for (const std::pair<ResourceRef, unsigned> &Resource : Used) {
dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
@@ -463,14 +466,14 @@ void Scheduler::notifyInstructionIssued(
void Scheduler::notifyInstructionExecuted(const InstRef &IR) {
LSU->onInstructionExecuted(IR);
- DEBUG(dbgs() << "[E] Instruction Executed: " << IR << '\n');
+ LLVM_DEBUG(dbgs() << "[E] Instruction Executed: " << IR << '\n');
Owner->notifyInstructionEvent(
HWInstructionEvent(HWInstructionEvent::Executed, IR));
DU->onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
}
void Scheduler::notifyInstructionReady(const InstRef &IR) {
- DEBUG(dbgs() << "[E] Instruction Ready: " << IR << '\n');
+ LLVM_DEBUG(dbgs() << "[E] Instruction Ready: " << IR << '\n');
Owner->notifyInstructionEvent(
HWInstructionEvent(HWInstructionEvent::Ready, IR));
}
diff --git a/tools/verify-uselistorder/verify-uselistorder.cpp b/tools/verify-uselistorder/verify-uselistorder.cpp
index d0d6f01551f0..99c7007e4e99 100644
--- a/tools/verify-uselistorder/verify-uselistorder.cpp
+++ b/tools/verify-uselistorder/verify-uselistorder.cpp
@@ -107,7 +107,7 @@ struct ValueMapping {
bool TempFile::init(const std::string &Ext) {
SmallVector<char, 64> Vector;
- DEBUG(dbgs() << " - create-temp-file\n");
+ LLVM_DEBUG(dbgs() << " - create-temp-file\n");
if (auto EC = sys::fs::createTemporaryFile("uselistorder", Ext, Vector)) {
errs() << "verify-uselistorder: error: " << EC.message() << "\n";
return true;
@@ -122,7 +122,7 @@ bool TempFile::init(const std::string &Ext) {
}
bool TempFile::writeBitcode(const Module &M) const {
- DEBUG(dbgs() << " - write bitcode\n");
+ LLVM_DEBUG(dbgs() << " - write bitcode\n");
std::error_code EC;
raw_fd_ostream OS(Filename, EC, sys::fs::F_None);
if (EC) {
@@ -135,7 +135,7 @@ bool TempFile::writeBitcode(const Module &M) const {
}
bool TempFile::writeAssembly(const Module &M) const {
- DEBUG(dbgs() << " - write assembly\n");
+ LLVM_DEBUG(dbgs() << " - write assembly\n");
std::error_code EC;
raw_fd_ostream OS(Filename, EC, sys::fs::F_Text);
if (EC) {
@@ -148,7 +148,7 @@ bool TempFile::writeAssembly(const Module &M) const {
}
std::unique_ptr<Module> TempFile::readBitcode(LLVMContext &Context) const {
- DEBUG(dbgs() << " - read bitcode\n");
+ LLVM_DEBUG(dbgs() << " - read bitcode\n");
ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOr =
MemoryBuffer::getFile(Filename);
if (!BufferOr) {
@@ -169,7 +169,7 @@ std::unique_ptr<Module> TempFile::readBitcode(LLVMContext &Context) const {
}
std::unique_ptr<Module> TempFile::readAssembly(LLVMContext &Context) const {
- DEBUG(dbgs() << " - read assembly\n");
+ LLVM_DEBUG(dbgs() << " - read assembly\n");
SMDiagnostic Err;
std::unique_ptr<Module> M = parseAssemblyFile(Filename, Err, Context);
if (!M.get())
@@ -288,9 +288,9 @@ static void debugSizeMismatch(const ValueMapping &L, const ValueMapping &R) {
#endif
static bool matches(const ValueMapping &LM, const ValueMapping &RM) {
- DEBUG(dbgs() << "compare value maps\n");
+ LLVM_DEBUG(dbgs() << "compare value maps\n");
if (LM.Values.size() != RM.Values.size()) {
- DEBUG(debugSizeMismatch(LM, RM));
+ LLVM_DEBUG(debugSizeMismatch(LM, RM));
return false;
}
@@ -317,22 +317,22 @@ static bool matches(const ValueMapping &LM, const ValueMapping &RM) {
while (LU != LE) {
if (RU == RE) {
- DEBUG(debugUserMismatch(LM, RM, I));
+ LLVM_DEBUG(debugUserMismatch(LM, RM, I));
return false;
}
if (LM.lookup(LU->getUser()) != RM.lookup(RU->getUser())) {
- DEBUG(debugUserMismatch(LM, RM, I));
+ LLVM_DEBUG(debugUserMismatch(LM, RM, I));
return false;
}
if (LU->getOperandNo() != RU->getOperandNo()) {
- DEBUG(debugUserMismatch(LM, RM, I));
+ LLVM_DEBUG(debugUserMismatch(LM, RM, I));
return false;
}
skipUnmappedUsers(++LU, LE, LM);
skipUnmappedUsers(++RU, RE, RM);
}
if (RU != RE) {
- DEBUG(debugUserMismatch(LM, RM, I));
+ LLVM_DEBUG(debugUserMismatch(LM, RM, I));
return false;
}
}
@@ -397,7 +397,7 @@ static void shuffleValueUseLists(Value *V, std::minstd_rand0 &Gen,
// Generate random numbers between 10 and 99, which will line up nicely in
// debug output. We're not worried about collisons here.
- DEBUG(dbgs() << "V = "; V->dump());
+ LLVM_DEBUG(dbgs() << "V = "; V->dump());
std::uniform_int_distribution<short> Dist(10, 99);
SmallDenseMap<const Use *, short, 16> Order;
auto compareUses =
@@ -406,16 +406,16 @@ static void shuffleValueUseLists(Value *V, std::minstd_rand0 &Gen,
for (const Use &U : V->uses()) {
auto I = Dist(Gen);
Order[&U] = I;
- DEBUG(dbgs() << " - order: " << I << ", op = " << U.getOperandNo()
- << ", U = ";
- U.getUser()->dump());
+ LLVM_DEBUG(dbgs() << " - order: " << I << ", op = " << U.getOperandNo()
+ << ", U = ";
+ U.getUser()->dump());
}
} while (std::is_sorted(V->use_begin(), V->use_end(), compareUses));
- DEBUG(dbgs() << " => shuffle\n");
+ LLVM_DEBUG(dbgs() << " => shuffle\n");
V->sortUseList(compareUses);
- DEBUG({
+ LLVM_DEBUG({
for (const Use &U : V->uses()) {
dbgs() << " - order: " << Order.lookup(&U)
<< ", op = " << U.getOperandNo() << ", U = ";
@@ -437,7 +437,7 @@ static void reverseValueUseLists(Value *V, DenseSet<Value *> &Seen) {
// Nothing to shuffle for 0 or 1 users.
return;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "V = ";
V->dump();
for (const Use &U : V->uses()) {
@@ -449,7 +449,7 @@ static void reverseValueUseLists(Value *V, DenseSet<Value *> &Seen) {
V->reverseUseList();
- DEBUG({
+ LLVM_DEBUG({
for (const Use &U : V->uses()) {
dbgs() << " - order: op = " << U.getOperandNo() << ", U = ";
U.getUser()->dump();
@@ -515,13 +515,13 @@ static void shuffleUseLists(Module &M, unsigned SeedOffset) {
std::minstd_rand0 Gen(std::minstd_rand0::default_seed + SeedOffset);
DenseSet<Value *> Seen;
changeUseLists(M, [&](Value *V) { shuffleValueUseLists(V, Gen, Seen); });
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
}
static void reverseUseLists(Module &M) {
DenseSet<Value *> Seen;
changeUseLists(M, [&](Value *V) { reverseValueUseLists(V, Seen); });
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
}
int main(int argc, char **argv) {
diff --git a/unittests/IR/CFGBuilder.cpp b/unittests/IR/CFGBuilder.cpp
index 50494ab5c7ca..0f9fb8b18cc1 100644
--- a/unittests/IR/CFGBuilder.cpp
+++ b/unittests/IR/CFGBuilder.cpp
@@ -36,9 +36,9 @@ CFGBuilder::CFGBuilder(Function *F, const std::vector<Arc> &InitialArcs,
}
static void ConnectBlocks(BasicBlock *From, BasicBlock *To) {
- DEBUG(dbgs() << "Creating BB arc " << From->getName() << " -> "
- << To->getName() << "\n";
- dbgs().flush());
+ LLVM_DEBUG(dbgs() << "Creating BB arc " << From->getName() << " -> "
+ << To->getName() << "\n";
+ dbgs().flush());
auto *IntTy = IntegerType::get(From->getContext(), 32);
if (isa<UnreachableInst>(From->getTerminator()))
@@ -57,9 +57,9 @@ static void ConnectBlocks(BasicBlock *From, BasicBlock *To) {
}
static void DisconnectBlocks(BasicBlock *From, BasicBlock *To) {
- DEBUG(dbgs() << "Deleting BB arc " << From->getName() << " -> "
- << To->getName() << "\n";
- dbgs().flush());
+ LLVM_DEBUG(dbgs() << "Deleting BB arc " << From->getName() << " -> "
+ << To->getName() << "\n";
+ dbgs().flush());
SwitchInst *SI = cast<SwitchInst>(From->getTerminator());
if (SI->getNumCases() == 0) {
diff --git a/unittests/IR/DominatorTreeBatchUpdatesTest.cpp b/unittests/IR/DominatorTreeBatchUpdatesTest.cpp
index 3be087ea8d38..c75728b59ffe 100644
--- a/unittests/IR/DominatorTreeBatchUpdatesTest.cpp
+++ b/unittests/IR/DominatorTreeBatchUpdatesTest.cpp
@@ -62,9 +62,9 @@ TEST(DominatorTreeBatchUpdates, LegalizeDomUpdates) {
{Insert, B, D}, {Delete, C, D}, {Delete, A, B}};
SmallVector<DomUpdate, 4> Legalized;
DomSNCA::LegalizeUpdates(Updates, Legalized);
- DEBUG(dbgs() << "Legalized updates:\t");
- DEBUG(for (auto &U : Legalized) dbgs() << U << ", ");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Legalized updates:\t");
+ LLVM_DEBUG(for (auto &U : Legalized) dbgs() << U << ", ");
+ LLVM_DEBUG(dbgs() << "\n");
EXPECT_EQ(Legalized.size(), 3UL);
EXPECT_NE(llvm::find(Legalized, DomUpdate{Insert, B, C}), Legalized.end());
EXPECT_NE(llvm::find(Legalized, DomUpdate{Insert, B, D}), Legalized.end());
@@ -85,9 +85,9 @@ TEST(DominatorTreeBatchUpdates, LegalizePostDomUpdates) {
{Insert, B, D}, {Delete, C, D}, {Delete, A, B}};
SmallVector<DomUpdate, 4> Legalized;
PostDomSNCA::LegalizeUpdates(Updates, Legalized);
- DEBUG(dbgs() << "Legalized postdom updates:\t");
- DEBUG(for (auto &U : Legalized) dbgs() << U << ", ");
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "Legalized postdom updates:\t");
+ LLVM_DEBUG(for (auto &U : Legalized) dbgs() << U << ", ");
+ LLVM_DEBUG(dbgs() << "\n");
EXPECT_EQ(Legalized.size(), 3UL);
EXPECT_NE(llvm::find(Legalized, DomUpdate{Insert, C, B}), Legalized.end());
EXPECT_NE(llvm::find(Legalized, DomUpdate{Insert, D, B}), Legalized.end());
diff --git a/utils/TableGen/AsmMatcherEmitter.cpp b/utils/TableGen/AsmMatcherEmitter.cpp
index def14473d61d..9ff24100009f 100644
--- a/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/utils/TableGen/AsmMatcherEmitter.cpp
@@ -1091,7 +1091,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool IsAlias) const {
// Verify that any operand is only mentioned once.
// We reject aliases and ignore instructions for now.
if (!IsAlias && Tok[0] == '$' && !OperandNames.insert(Tok).second) {
- DEBUG({
+ LLVM_DEBUG({
errs() << "warning: '" << TheDef->getName() << "': "
<< "ignoring instruction with tied operand '"
<< Tok << "'\n";
@@ -1477,7 +1477,7 @@ void AsmMatcherInfo::buildInfo() {
SubtargetFeaturePairs.end());
#ifndef NDEBUG
for (const auto &Pair : SubtargetFeatures)
- DEBUG(Pair.second.dump());
+ LLVM_DEBUG(Pair.second.dump());
#endif // NDEBUG
assert(SubtargetFeatures.size() <= 64 && "Too many subtarget features!");
diff --git a/utils/TableGen/AsmWriterEmitter.cpp b/utils/TableGen/AsmWriterEmitter.cpp
index 723c0cd773f7..b897e9fdda34 100644
--- a/utils/TableGen/AsmWriterEmitter.cpp
+++ b/utils/TableGen/AsmWriterEmitter.cpp
@@ -351,8 +351,8 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
// If we don't have enough bits for this operand, don't include it.
if (NumBits > BitsLeft) {
- DEBUG(errs() << "Not enough bits to densely encode " << NumBits
- << " more bits\n");
+ LLVM_DEBUG(errs() << "Not enough bits to densely encode " << NumBits
+ << " more bits\n");
break;
}
diff --git a/utils/TableGen/CodeGenDAGPatterns.cpp b/utils/TableGen/CodeGenDAGPatterns.cpp
index be406aacfc93..875e6baff4a5 100644
--- a/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -3018,7 +3018,7 @@ void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) {
ThePat.resetError();
// If debugging, print out the pattern fragment result.
- DEBUG(ThePat.dump());
+ LLVM_DEBUG(ThePat.dump());
}
}
@@ -3633,7 +3633,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
const DAGInstruction &DI = parseInstructionPattern(CGI, LI, Instructions);
(void)DI;
- DEBUG(DI.getPattern()->dump());
+ LLVM_DEBUG(DI.getPattern()->dump());
}
// If we can, convert the instructions to be patterns that are matched!
@@ -4171,13 +4171,13 @@ static void FindDepVars(TreePatternNode *N, MultipleUseVarSet &DepVars) {
/// Dump the dependent variable set:
static void DumpDepVars(MultipleUseVarSet &DepVars) {
if (DepVars.empty()) {
- DEBUG(errs() << "<empty set>");
+ LLVM_DEBUG(errs() << "<empty set>");
} else {
- DEBUG(errs() << "[ ");
+ LLVM_DEBUG(errs() << "[ ");
for (const auto &DepVar : DepVars) {
- DEBUG(errs() << DepVar.getKey() << " ");
+ LLVM_DEBUG(errs() << DepVar.getKey() << " ");
}
- DEBUG(errs() << "]");
+ LLVM_DEBUG(errs() << "]");
}
}
#endif
@@ -4201,13 +4201,13 @@ static void CombineChildVariants(TreePatternNode *Orig,
bool NotDone;
do {
#ifndef NDEBUG
- DEBUG(if (!Idxs.empty()) {
- errs() << Orig->getOperator()->getName() << ": Idxs = [ ";
- for (unsigned Idx : Idxs) {
- errs() << Idx << " ";
- }
- errs() << "]\n";
- });
+ LLVM_DEBUG(if (!Idxs.empty()) {
+ errs() << Orig->getOperator()->getName() << ": Idxs = [ ";
+ for (unsigned Idx : Idxs) {
+ errs() << Idx << " ";
+ }
+ errs() << "]\n";
+ });
#endif
// Create the variant and add it to the output list.
std::vector<TreePatternNode*> NewChildren;
@@ -4410,7 +4410,7 @@ static void GenerateVariantsOf(TreePatternNode *N,
// GenerateVariants - Generate variants. For example, commutative patterns can
// match multiple ways. Add them to PatternsToMatch as well.
void CodeGenDAGPatterns::GenerateVariants() {
- DEBUG(errs() << "Generating instruction variants.\n");
+ LLVM_DEBUG(errs() << "Generating instruction variants.\n");
// Loop over all of the patterns we've collected, checking to see if we can
// generate variants of the instruction, through the exploitation of
@@ -4425,9 +4425,9 @@ void CodeGenDAGPatterns::GenerateVariants() {
MultipleUseVarSet DepVars;
std::vector<TreePatternNode*> Variants;
FindDepVars(PatternsToMatch[i].getSrcPattern(), DepVars);
- DEBUG(errs() << "Dependent/multiply used variables: ");
- DEBUG(DumpDepVars(DepVars));
- DEBUG(errs() << "\n");
+ LLVM_DEBUG(errs() << "Dependent/multiply used variables: ");
+ LLVM_DEBUG(DumpDepVars(DepVars));
+ LLVM_DEBUG(errs() << "\n");
GenerateVariantsOf(PatternsToMatch[i].getSrcPattern(), Variants, *this,
DepVars);
@@ -4435,16 +4435,14 @@ void CodeGenDAGPatterns::GenerateVariants() {
if (Variants.size() == 1) // No additional variants for this pattern.
continue;
- DEBUG(errs() << "FOUND VARIANTS OF: ";
- PatternsToMatch[i].getSrcPattern()->dump();
- errs() << "\n");
+ LLVM_DEBUG(errs() << "FOUND VARIANTS OF: ";
+ PatternsToMatch[i].getSrcPattern()->dump(); errs() << "\n");
for (unsigned v = 0, e = Variants.size(); v != e; ++v) {
TreePatternNode *Variant = Variants[v];
- DEBUG(errs() << " VAR#" << v << ": ";
- Variant->dump();
- errs() << "\n");
+ LLVM_DEBUG(errs() << " VAR#" << v << ": "; Variant->dump();
+ errs() << "\n");
// Scan to see if an instruction or explicit pattern already matches this.
bool AlreadyExists = false;
@@ -4456,7 +4454,7 @@ void CodeGenDAGPatterns::GenerateVariants() {
// Check to see if this variant already exists.
if (Variant->isIsomorphicTo(PatternsToMatch[p].getSrcPattern(),
DepVars)) {
- DEBUG(errs() << " *** ALREADY EXISTS, ignoring variant.\n");
+ LLVM_DEBUG(errs() << " *** ALREADY EXISTS, ignoring variant.\n");
AlreadyExists = true;
break;
}
@@ -4472,6 +4470,6 @@ void CodeGenDAGPatterns::GenerateVariants() {
PatternsToMatch[i].getAddedComplexity(), Record::getNewUID()));
}
- DEBUG(errs() << "\n");
+ LLVM_DEBUG(errs() << "\n");
}
}
diff --git a/utils/TableGen/CodeGenRegisters.cpp b/utils/TableGen/CodeGenRegisters.cpp
index f2643df73b37..b7833d017e18 100644
--- a/utils/TableGen/CodeGenRegisters.cpp
+++ b/utils/TableGen/CodeGenRegisters.cpp
@@ -1593,11 +1593,12 @@ static void computeUberWeights(std::vector<UberRegSet> &UberSets,
if (Weight > MaxWeight)
MaxWeight = Weight;
if (I->Weight != MaxWeight) {
- DEBUG(
- dbgs() << "UberSet " << I - UberSets.begin() << " Weight " << MaxWeight;
- for (auto &Unit : I->Regs)
- dbgs() << " " << Unit->getName();
- dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "UberSet " << I - UberSets.begin() << " Weight "
+ << MaxWeight;
+ for (auto &Unit
+ : I->Regs) dbgs()
+ << " " << Unit->getName();
+ dbgs() << "\n");
// Update the set weight.
I->Weight = MaxWeight;
}
@@ -1764,8 +1765,8 @@ void CodeGenRegBank::pruneUnitSets() {
&& (SubSet.Units.size() + 3 > SuperSet.Units.size())
&& UnitWeight == RegUnits[SuperSet.Units[0]].Weight
&& UnitWeight == RegUnits[SuperSet.Units.back()].Weight) {
- DEBUG(dbgs() << "UnitSet " << SubIdx << " subsumed by " << SuperIdx
- << "\n");
+ LLVM_DEBUG(dbgs() << "UnitSet " << SubIdx << " subsumed by " << SuperIdx
+ << "\n");
// We can pick any of the set names for the merged set. Go for the
// shortest one to avoid picking the name of one of the classes that are
// artificially created by tablegen. So "FPR128_lo" instead of
@@ -1818,29 +1819,26 @@ void CodeGenRegBank::computeRegUnitSets() {
RegUnitSets.pop_back();
}
- DEBUG(dbgs() << "\nBefore pruning:\n";
- for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
- USIdx < USEnd; ++USIdx) {
- dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
- << ":";
- for (auto &U : RegUnitSets[USIdx].Units)
- printRegUnitName(U);
- dbgs() << "\n";
- });
+ LLVM_DEBUG(dbgs() << "\nBefore pruning:\n"; for (unsigned USIdx = 0,
+ USEnd = RegUnitSets.size();
+ USIdx < USEnd; ++USIdx) {
+ dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";
+ for (auto &U : RegUnitSets[USIdx].Units)
+ printRegUnitName(U);
+ dbgs() << "\n";
+ });
// Iteratively prune unit sets.
pruneUnitSets();
- DEBUG(dbgs() << "\nBefore union:\n";
- for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
- USIdx < USEnd; ++USIdx) {
- dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
- << ":";
- for (auto &U : RegUnitSets[USIdx].Units)
- printRegUnitName(U);
- dbgs() << "\n";
- }
- dbgs() << "\nUnion sets:\n");
+ LLVM_DEBUG(dbgs() << "\nBefore union:\n"; for (unsigned USIdx = 0,
+ USEnd = RegUnitSets.size();
+ USIdx < USEnd; ++USIdx) {
+ dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";
+ for (auto &U : RegUnitSets[USIdx].Units)
+ printRegUnitName(U);
+ dbgs() << "\n";
+ } dbgs() << "\nUnion sets:\n");
// Iterate over all unit sets, including new ones added by this loop.
unsigned NumRegUnitSubSets = RegUnitSets.size();
@@ -1880,11 +1878,11 @@ void CodeGenRegBank::computeRegUnitSets() {
if (SetI != std::prev(RegUnitSets.end()))
RegUnitSets.pop_back();
else {
- DEBUG(dbgs() << "UnitSet " << RegUnitSets.size()-1
- << " " << RegUnitSets.back().Name << ":";
- for (auto &U : RegUnitSets.back().Units)
- printRegUnitName(U);
- dbgs() << "\n";);
+ LLVM_DEBUG(dbgs() << "UnitSet " << RegUnitSets.size() - 1 << " "
+ << RegUnitSets.back().Name << ":";
+ for (auto &U
+ : RegUnitSets.back().Units) printRegUnitName(U);
+ dbgs() << "\n";);
}
}
}
@@ -1892,15 +1890,14 @@ void CodeGenRegBank::computeRegUnitSets() {
// Iteratively prune unit sets after inferring supersets.
pruneUnitSets();
- DEBUG(dbgs() << "\n";
- for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
- USIdx < USEnd; ++USIdx) {
- dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name
- << ":";
- for (auto &U : RegUnitSets[USIdx].Units)
- printRegUnitName(U);
- dbgs() << "\n";
- });
+ LLVM_DEBUG(
+ dbgs() << "\n"; for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
+ USIdx < USEnd; ++USIdx) {
+ dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";
+ for (auto &U : RegUnitSets[USIdx].Units)
+ printRegUnitName(U);
+ dbgs() << "\n";
+ });
// For each register class, list the UnitSets that are supersets.
RegClassUnitSets.resize(RegClasses.size());
@@ -1918,20 +1915,20 @@ void CodeGenRegBank::computeRegUnitSets() {
if (RCRegUnits.empty())
continue;
- DEBUG(dbgs() << "RC " << RC.getName() << " Units: \n";
- for (auto U : RCRegUnits)
- printRegUnitName(U);
- dbgs() << "\n UnitSetIDs:");
+ LLVM_DEBUG(dbgs() << "RC " << RC.getName() << " Units: \n";
+ for (auto U
+ : RCRegUnits) printRegUnitName(U);
+ dbgs() << "\n UnitSetIDs:");
// Find all supersets.
for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
USIdx != USEnd; ++USIdx) {
if (isRegUnitSubSet(RCRegUnits, RegUnitSets[USIdx].Units)) {
- DEBUG(dbgs() << " " << USIdx);
+ LLVM_DEBUG(dbgs() << " " << USIdx);
RegClassUnitSets[RCIdx].push_back(USIdx);
}
}
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
assert(!RegClassUnitSets[RCIdx].empty() && "missing unit set for regclass");
}
diff --git a/utils/TableGen/CodeGenSchedule.cpp b/utils/TableGen/CodeGenSchedule.cpp
index 303aaaa7a20e..a520afb13827 100644
--- a/utils/TableGen/CodeGenSchedule.cpp
+++ b/utils/TableGen/CodeGenSchedule.cpp
@@ -208,7 +208,8 @@ CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
// Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
// ProcResourceDefs.
- DEBUG(dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n");
+ LLVM_DEBUG(
+ dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n");
collectProcResources();
// Collect optional processor description.
@@ -261,7 +262,7 @@ void CodeGenSchedModels::collectProcModels() {
ProcModelMap[NoModelDef] = 0;
// For each processor, find a unique machine model.
- DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n");
+ LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n");
for (Record *ProcRecord : ProcRecords)
addProcModel(ProcRecord);
}
@@ -285,7 +286,7 @@ void CodeGenSchedModels::addProcModel(Record *ProcDef) {
ProcModels.emplace_back(ProcModels.size(), Name,
ProcDef->getValueAsDef("SchedModel"), ModelKey);
}
- DEBUG(ProcModels.back().dump());
+ LLVM_DEBUG(ProcModels.back().dump());
}
// Recursively find all reachable SchedReadWrite records.
@@ -413,26 +414,26 @@ void CodeGenSchedModels::collectSchedRW() {
PrintFatalError(ADef->getLoc(), "Cannot Alias an Alias");
RW.Aliases.push_back(ADef);
}
- DEBUG(
- dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n";
- for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
- dbgs() << WIdx << ": ";
- SchedWrites[WIdx].dump();
- dbgs() << '\n';
- }
- for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; ++RIdx) {
- dbgs() << RIdx << ": ";
- SchedReads[RIdx].dump();
- dbgs() << '\n';
- }
- RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
- for (Record *RWDef : RWDefs) {
- if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) {
- StringRef Name = RWDef->getName();
- if (Name != "NoWrite" && Name != "ReadDefault")
- dbgs() << "Unused SchedReadWrite " << Name << '\n';
- }
- });
+ LLVM_DEBUG(
+ dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n";
+ for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
+ dbgs() << WIdx << ": ";
+ SchedWrites[WIdx].dump();
+ dbgs() << '\n';
+ } for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd;
+ ++RIdx) {
+ dbgs() << RIdx << ": ";
+ SchedReads[RIdx].dump();
+ dbgs() << '\n';
+ } RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
+ for (Record *RWDef
+ : RWDefs) {
+ if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) {
+ StringRef Name = RWDef->getName();
+ if (Name != "NoWrite" && Name != "ReadDefault")
+ dbgs() << "Unused SchedReadWrite " << Name << '\n';
+ }
+ });
}
/// Compute a SchedWrite name from a sequence of writes.
@@ -612,25 +613,25 @@ void CodeGenSchedModels::collectSchedClasses() {
// Create classes for InstRW defs.
RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
llvm::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord());
- DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n");
+ LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n");
for (Record *RWDef : InstRWDefs)
createInstRWClass(RWDef);
NumInstrSchedClasses = SchedClasses.size();
bool EnableDump = false;
- DEBUG(EnableDump = true);
+ LLVM_DEBUG(EnableDump = true);
if (!EnableDump)
return;
- DEBUG(
+ LLVM_DEBUG(
dbgs()
<< "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n");
for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
StringRef InstName = Inst->TheDef->getName();
unsigned SCIdx = getSchedClassIdx(*Inst);
if (!SCIdx) {
- DEBUG({
+ LLVM_DEBUG({
if (!Inst->hasNoSchedulingInfo)
dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n';
});
@@ -649,7 +650,7 @@ void CodeGenSchedModels::collectSchedClasses() {
}
if (!SC.Writes.empty()) {
ProcIndices.push_back(0);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "SchedRW machine model for " << InstName;
for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE;
++WI)
@@ -664,12 +665,13 @@ void CodeGenSchedModels::collectSchedClasses() {
const CodeGenProcModel &ProcModel =
getProcModel(RWDef->getValueAsDef("SchedModel"));
ProcIndices.push_back(ProcModel.Index);
- DEBUG(dbgs() << "InstRW on " << ProcModel.ModelName << " for " << InstName);
+ LLVM_DEBUG(dbgs() << "InstRW on " << ProcModel.ModelName << " for "
+ << InstName);
IdxVec Writes;
IdxVec Reads;
findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"),
Writes, Reads);
- DEBUG({
+ LLVM_DEBUG({
for (unsigned WIdx : Writes)
dbgs() << " " << SchedWrites[WIdx].Name;
for (unsigned RIdx : Reads)
@@ -678,7 +680,7 @@ void CodeGenSchedModels::collectSchedClasses() {
});
}
// If ProcIndices contains zero, the class applies to all processors.
- DEBUG({
+ LLVM_DEBUG({
if (!std::count(ProcIndices.begin(), ProcIndices.end(), 0)) {
for (const CodeGenProcModel &PM : ProcModels) {
if (!std::count(ProcIndices.begin(), ProcIndices.end(), PM.Index))
@@ -815,9 +817,9 @@ void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
}
}
}
- DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":"
- << SchedClasses[OldSCIdx].Name << " on "
- << RWModelDef->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":"
+ << SchedClasses[OldSCIdx].Name << " on "
+ << RWModelDef->getName() << "\n");
SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef);
continue;
}
@@ -826,8 +828,9 @@ void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
unsigned SCIdx = SchedClasses.size();
SchedClasses.emplace_back(SCIdx, createSchedClassName(InstDefs), nullptr);
CodeGenSchedClass &SC = SchedClasses.back();
- DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on "
- << InstRWDef->getValueAsDef("SchedModel")->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on "
+ << InstRWDef->getValueAsDef("SchedModel")->getName()
+ << "\n");
// Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef;
@@ -867,7 +870,7 @@ bool CodeGenSchedModels::hasItineraries() const {
// Gather the processor itineraries.
void CodeGenSchedModels::collectProcItins() {
- DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n");
+ LLVM_DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n");
for (CodeGenProcModel &ProcModel : ProcModels) {
if (!ProcModel.hasItineraries())
continue;
@@ -893,19 +896,20 @@ void CodeGenSchedModels::collectProcItins() {
}
}
if (!FoundClass) {
- DEBUG(dbgs() << ProcModel.ItinsDef->getName()
- << " missing class for itinerary " << ItinDef->getName() << '\n');
+ LLVM_DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+ << " missing class for itinerary "
+ << ItinDef->getName() << '\n');
}
}
// Check for missing itinerary entries.
assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
- DEBUG(
- for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
- if (!ProcModel.ItinDefList[i])
- dbgs() << ProcModel.ItinsDef->getName()
- << " missing itinerary for class "
- << SchedClasses[i].Name << '\n';
- });
+ LLVM_DEBUG(
+ for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
+ if (!ProcModel.ItinDefList[i])
+ dbgs() << ProcModel.ItinsDef->getName()
+ << " missing itinerary for class " << SchedClasses[i].Name
+ << '\n';
+ });
}
}
@@ -938,8 +942,9 @@ void CodeGenSchedModels::collectProcUnsupportedFeatures() {
/// Infer new classes from existing classes. In the process, this may create new
/// SchedWrites from sequences of existing SchedWrites.
void CodeGenSchedModels::inferSchedClasses() {
- DEBUG(dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n");
- DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n");
+ LLVM_DEBUG(
+ dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n");
+ LLVM_DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n");
// Visit all existing classes and newly created classes.
for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) {
@@ -1401,7 +1406,8 @@ void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites,
ArrayRef<unsigned> OperReads,
unsigned FromClassIdx,
ArrayRef<unsigned> ProcIndices) {
- DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices); dbgs() << ") ");
+ LLVM_DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices);
+ dbgs() << ") ");
// Create a seed transition with an empty PredTerm and the expanded sequences
// of SchedWrites for the current SchedClass.
@@ -1416,18 +1422,18 @@ void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites,
LastTransitions[0].WriteSequences.emplace_back();
SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences.back();
Seq.append(WriteSeq.begin(), WriteSeq.end());
- DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+ LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
}
- DEBUG(dbgs() << " Reads: ");
+ LLVM_DEBUG(dbgs() << " Reads: ");
for (unsigned ReadIdx : OperReads) {
IdxVec ReadSeq;
expandRWSequence(ReadIdx, ReadSeq, /*IsRead=*/true);
LastTransitions[0].ReadSequences.emplace_back();
SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences.back();
Seq.append(ReadSeq.begin(), ReadSeq.end());
- DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+ LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
}
- DEBUG(dbgs() << '\n');
+ LLVM_DEBUG(dbgs() << '\n');
// Collect all PredTransitions for individual operands.
// Iterate until no variant writes remain.
@@ -1435,7 +1441,7 @@ void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites,
PredTransitions Transitions(*this);
for (const PredTransition &Trans : LastTransitions)
Transitions.substituteVariants(Trans);
- DEBUG(Transitions.dump());
+ LLVM_DEBUG(Transitions.dump());
LastTransitions.swap(Transitions.TransVec);
}
// If the first transition has no variants, nothing to do.
@@ -1613,30 +1619,29 @@ void CodeGenSchedModels::collectProcResources() {
LessRecord());
llvm::sort(PM.ProcResourceDefs.begin(), PM.ProcResourceDefs.end(),
LessRecord());
- DEBUG(
- PM.dump();
- dbgs() << "WriteResDefs: ";
- for (RecIter RI = PM.WriteResDefs.begin(),
- RE = PM.WriteResDefs.end(); RI != RE; ++RI) {
- if ((*RI)->isSubClassOf("WriteRes"))
- dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " ";
- else
- dbgs() << (*RI)->getName() << " ";
- }
- dbgs() << "\nReadAdvanceDefs: ";
- for (RecIter RI = PM.ReadAdvanceDefs.begin(),
- RE = PM.ReadAdvanceDefs.end(); RI != RE; ++RI) {
- if ((*RI)->isSubClassOf("ReadAdvance"))
- dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " ";
- else
- dbgs() << (*RI)->getName() << " ";
- }
- dbgs() << "\nProcResourceDefs: ";
- for (RecIter RI = PM.ProcResourceDefs.begin(),
- RE = PM.ProcResourceDefs.end(); RI != RE; ++RI) {
- dbgs() << (*RI)->getName() << " ";
- }
- dbgs() << '\n');
+ LLVM_DEBUG(
+ PM.dump();
+ dbgs() << "WriteResDefs: "; for (RecIter RI = PM.WriteResDefs.begin(),
+ RE = PM.WriteResDefs.end();
+ RI != RE; ++RI) {
+ if ((*RI)->isSubClassOf("WriteRes"))
+ dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " ";
+ else
+ dbgs() << (*RI)->getName() << " ";
+ } dbgs() << "\nReadAdvanceDefs: ";
+ for (RecIter RI = PM.ReadAdvanceDefs.begin(),
+ RE = PM.ReadAdvanceDefs.end();
+ RI != RE; ++RI) {
+ if ((*RI)->isSubClassOf("ReadAdvance"))
+ dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " ";
+ else
+ dbgs() << (*RI)->getName() << " ";
+ } dbgs()
+ << "\nProcResourceDefs: ";
+ for (RecIter RI = PM.ProcResourceDefs.begin(),
+ RE = PM.ProcResourceDefs.end();
+ RI != RE; ++RI) { dbgs() << (*RI)->getName() << " "; } dbgs()
+ << '\n');
verifyProcResourceGroups(PM);
}
diff --git a/utils/TableGen/DAGISelEmitter.cpp b/utils/TableGen/DAGISelEmitter.cpp
index eadf5bc724b1..4a333356f686 100644
--- a/utils/TableGen/DAGISelEmitter.cpp
+++ b/utils/TableGen/DAGISelEmitter.cpp
@@ -137,13 +137,16 @@ void DAGISelEmitter::run(raw_ostream &OS) {
"// When neither of the GET_DAGISEL* macros is defined, the functions\n"
"// are emitted inline.\n\n";
- DEBUG(errs() << "\n\nALL PATTERNS TO MATCH:\n\n";
- for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
- E = CGP.ptm_end(); I != E; ++I) {
- errs() << "PATTERN: "; I->getSrcPattern()->dump();
- errs() << "\nRESULT: "; I->getDstPattern()->dump();
- errs() << "\n";
- });
+ LLVM_DEBUG(errs() << "\n\nALL PATTERNS TO MATCH:\n\n";
+ for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
+ E = CGP.ptm_end();
+ I != E; ++I) {
+ errs() << "PATTERN: ";
+ I->getSrcPattern()->dump();
+ errs() << "\nRESULT: ";
+ I->getDstPattern()->dump();
+ errs() << "\n";
+ });
// Add all the patterns to a temporary list so we can sort them.
std::vector<const PatternToMatch*> Patterns;
diff --git a/utils/TableGen/DAGISelMatcherOpt.cpp b/utils/TableGen/DAGISelMatcherOpt.cpp
index 0bb656826fbd..554c7438ce3d 100644
--- a/utils/TableGen/DAGISelMatcherOpt.cpp
+++ b/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -293,15 +293,12 @@ static void FactorNodes(std::unique_ptr<Matcher> &InputMatcherPtr) {
if (Scan != e &&
// Don't print it's obvious nothing extra could be merged anyway.
Scan+1 != e) {
- DEBUG(errs() << "Couldn't merge this:\n";
- Optn->print(errs(), 4);
- errs() << "into this:\n";
- OptionsToMatch[Scan]->print(errs(), 4);
- if (Scan+1 != e)
- OptionsToMatch[Scan+1]->printOne(errs());
- if (Scan+2 < e)
- OptionsToMatch[Scan+2]->printOne(errs());
- errs() << "\n");
+ LLVM_DEBUG(errs() << "Couldn't merge this:\n"; Optn->print(errs(), 4);
+ errs() << "into this:\n";
+ OptionsToMatch[Scan]->print(errs(), 4);
+ if (Scan + 1 != e) OptionsToMatch[Scan + 1]->printOne(errs());
+ if (Scan + 2 < e) OptionsToMatch[Scan + 2]->printOne(errs());
+ errs() << "\n");
}
// If we only found one option starting with this matcher, no factoring is
diff --git a/utils/TableGen/DFAPacketizerEmitter.cpp b/utils/TableGen/DFAPacketizerEmitter.cpp
index 1c1932a0144a..0db0f55f5ed6 100644
--- a/utils/TableGen/DFAPacketizerEmitter.cpp
+++ b/utils/TableGen/DFAPacketizerEmitter.cpp
@@ -278,30 +278,30 @@ public:
// dbgsInsnClass - When debugging, print instruction class stages.
//
void dbgsInsnClass(const std::vector<unsigned> &InsnClass) {
- DEBUG(dbgs() << "InsnClass: ");
+ LLVM_DEBUG(dbgs() << "InsnClass: ");
for (unsigned i = 0; i < InsnClass.size(); ++i) {
if (i > 0) {
- DEBUG(dbgs() << ", ");
+ LLVM_DEBUG(dbgs() << ", ");
}
- DEBUG(dbgs() << "0x" << Twine::utohexstr(InsnClass[i]));
+ LLVM_DEBUG(dbgs() << "0x" << Twine::utohexstr(InsnClass[i]));
}
DFAInput InsnInput = getDFAInsnInput(InsnClass);
- DEBUG(dbgs() << " (input: 0x" << Twine::utohexstr(InsnInput) << ")");
+ LLVM_DEBUG(dbgs() << " (input: 0x" << Twine::utohexstr(InsnInput) << ")");
}
//
// dbgsStateInfo - When debugging, print the set of state info.
//
void dbgsStateInfo(const std::set<unsigned> &stateInfo) {
- DEBUG(dbgs() << "StateInfo: ");
+ LLVM_DEBUG(dbgs() << "StateInfo: ");
unsigned i = 0;
for (std::set<unsigned>::iterator SI = stateInfo.begin();
SI != stateInfo.end(); ++SI, ++i) {
unsigned thisState = *SI;
if (i > 0) {
- DEBUG(dbgs() << ", ");
+ LLVM_DEBUG(dbgs() << ", ");
}
- DEBUG(dbgs() << "0x" << Twine::utohexstr(thisState));
+ LLVM_DEBUG(dbgs() << "0x" << Twine::utohexstr(thisState));
}
}
@@ -310,7 +310,7 @@ void dbgsStateInfo(const std::set<unsigned> &stateInfo) {
//
void dbgsIndent(unsigned indent) {
for (unsigned i = 0; i < indent; ++i) {
- DEBUG(dbgs() << " ");
+ LLVM_DEBUG(dbgs() << " ");
}
}
#endif // NDEBUG
@@ -361,7 +361,8 @@ void State::AddInsnClass(std::vector<unsigned> &InsnClass,
DenseSet<unsigned> VisitedResourceStates;
- DEBUG(dbgs() << " thisState: 0x" << Twine::utohexstr(thisState) << "\n");
+ LLVM_DEBUG(dbgs() << " thisState: 0x" << Twine::utohexstr(thisState)
+ << "\n");
AddInsnClassStages(InsnClass, ComboBitToBitsMap,
numstages - 1, numstages,
thisState, thisState,
@@ -378,7 +379,7 @@ void State::AddInsnClassStages(std::vector<unsigned> &InsnClass,
assert((chkstage < numstages) && "AddInsnClassStages: stage out of range");
unsigned thisStage = InsnClass[chkstage];
- DEBUG({
+ LLVM_DEBUG({
dbgsIndent((1 + numstages - chkstage) << 1);
dbgs() << "AddInsnClassStages " << chkstage << " (0x"
<< Twine::utohexstr(thisStage) << ") from ";
@@ -395,10 +396,10 @@ void State::AddInsnClassStages(std::vector<unsigned> &InsnClass,
if (resourceMask & thisStage) {
unsigned combo = ComboBitToBitsMap[resourceMask];
if (combo && ((~prevState & combo) != combo)) {
- DEBUG(dbgs() << "\tSkipped Add 0x" << Twine::utohexstr(prevState)
- << " - combo op 0x" << Twine::utohexstr(resourceMask)
- << " (0x" << Twine::utohexstr(combo)
- << ") cannot be scheduled\n");
+ LLVM_DEBUG(dbgs() << "\tSkipped Add 0x" << Twine::utohexstr(prevState)
+ << " - combo op 0x" << Twine::utohexstr(resourceMask)
+ << " (0x" << Twine::utohexstr(combo)
+ << ") cannot be scheduled\n");
continue;
}
//
@@ -406,7 +407,7 @@ void State::AddInsnClassStages(std::vector<unsigned> &InsnClass,
// resource state if that resource was used.
//
unsigned ResultingResourceState = prevState | resourceMask | combo;
- DEBUG({
+ LLVM_DEBUG({
dbgsIndent((2 + numstages - chkstage) << 1);
dbgs() << "0x" << Twine::utohexstr(prevState) << " | 0x"
<< Twine::utohexstr(resourceMask);
@@ -433,13 +434,15 @@ void State::AddInsnClassStages(std::vector<unsigned> &InsnClass,
if (VisitedResourceStates.count(ResultingResourceState) == 0) {
VisitedResourceStates.insert(ResultingResourceState);
PossibleStates.insert(ResultingResourceState);
- DEBUG(dbgs() << "\tResultingResourceState: 0x"
- << Twine::utohexstr(ResultingResourceState) << "\n");
+ LLVM_DEBUG(dbgs()
+ << "\tResultingResourceState: 0x"
+ << Twine::utohexstr(ResultingResourceState) << "\n");
} else {
- DEBUG(dbgs() << "\tSkipped Add - state already seen\n");
+ LLVM_DEBUG(dbgs() << "\tSkipped Add - state already seen\n");
}
} else {
- DEBUG(dbgs() << "\tSkipped Add - no final resources available\n");
+ LLVM_DEBUG(dbgs()
+ << "\tSkipped Add - no final resources available\n");
}
} else {
//
@@ -447,13 +450,13 @@ void State::AddInsnClassStages(std::vector<unsigned> &InsnClass,
// stage in InsnClass for available resources.
//
if (ResultingResourceState != prevState) {
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
AddInsnClassStages(InsnClass, ComboBitToBitsMap,
chkstage - 1, numstages,
ResultingResourceState, origState,
VisitedResourceStates, PossibleStates);
} else {
- DEBUG(dbgs() << "\tSkipped Add - no resources available\n");
+ LLVM_DEBUG(dbgs() << "\tSkipped Add - no resources available\n");
}
}
}
@@ -494,10 +497,11 @@ bool State::canMaybeAddInsnClass(std::vector<unsigned> &InsnClass,
// These cases are caught later in AddInsnClass.
unsigned combo = ComboBitToBitsMap[InsnClass[i]];
if (combo && ((~resources & combo) != combo)) {
- DEBUG(dbgs() << "\tSkipped canMaybeAdd 0x"
- << Twine::utohexstr(resources) << " - combo op 0x"
- << Twine::utohexstr(InsnClass[i]) << " (0x"
- << Twine::utohexstr(combo) << ") cannot be scheduled\n");
+ LLVM_DEBUG(dbgs() << "\tSkipped canMaybeAdd 0x"
+ << Twine::utohexstr(resources) << " - combo op 0x"
+ << Twine::utohexstr(InsnClass[i]) << " (0x"
+ << Twine::utohexstr(combo)
+ << ") cannot be scheduled\n");
available = false;
break;
}
@@ -537,9 +541,10 @@ void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName,
int maxResources, int numCombos, int maxStages) {
unsigned numStates = states.size();
- DEBUG(dbgs() << "-----------------------------------------------------------------------------\n");
- DEBUG(dbgs() << "writeTableAndAPI\n");
- DEBUG(dbgs() << "Total states: " << numStates << "\n");
+ LLVM_DEBUG(dbgs() << "-------------------------------------------------------"
+ "----------------------\n");
+ LLVM_DEBUG(dbgs() << "writeTableAndAPI\n");
+ LLVM_DEBUG(dbgs() << "Total states: " << numStates << "\n");
OS << "namespace llvm {\n";
@@ -647,9 +652,10 @@ int DFAPacketizerEmitter::collectAllFuncUnits(
std::map<std::string, unsigned> &FUNameToBitsMap,
int &maxFUs,
raw_ostream &OS) {
- DEBUG(dbgs() << "-----------------------------------------------------------------------------\n");
- DEBUG(dbgs() << "collectAllFuncUnits");
- DEBUG(dbgs() << " (" << ProcItinList.size() << " itineraries)\n");
+ LLVM_DEBUG(dbgs() << "-------------------------------------------------------"
+ "----------------------\n");
+ LLVM_DEBUG(dbgs() << "collectAllFuncUnits");
+ LLVM_DEBUG(dbgs() << " (" << ProcItinList.size() << " itineraries)\n");
int totalFUs = 0;
// Parse functional units for all the itineraries.
@@ -657,10 +663,8 @@ int DFAPacketizerEmitter::collectAllFuncUnits(
Record *Proc = ProcItinList[i];
std::vector<Record*> FUs = Proc->getValueAsListOfDefs("FU");
- DEBUG(dbgs() << " FU:" << i
- << " (" << FUs.size() << " FUs) "
- << Proc->getName());
-
+ LLVM_DEBUG(dbgs() << " FU:" << i << " (" << FUs.size() << " FUs) "
+ << Proc->getName());
// Convert macros to bits for each stage.
unsigned numFUs = FUs.size();
@@ -669,14 +673,14 @@ int DFAPacketizerEmitter::collectAllFuncUnits(
"Exceeded maximum number of representable resources");
unsigned FuncResources = (unsigned) (1U << j);
FUNameToBitsMap[FUs[j]->getName()] = FuncResources;
- DEBUG(dbgs() << " " << FUs[j]->getName() << ":0x"
- << Twine::utohexstr(FuncResources));
+ LLVM_DEBUG(dbgs() << " " << FUs[j]->getName() << ":0x"
+ << Twine::utohexstr(FuncResources));
}
if (((int) numFUs) > maxFUs) {
maxFUs = numFUs;
}
totalFUs += numFUs;
- DEBUG(dbgs() << "\n");
+ LLVM_DEBUG(dbgs() << "\n");
}
return totalFUs;
}
@@ -690,18 +694,18 @@ int DFAPacketizerEmitter::collectAllComboFuncs(
std::map<std::string, unsigned> &FUNameToBitsMap,
std::map<unsigned, unsigned> &ComboBitToBitsMap,
raw_ostream &OS) {
- DEBUG(dbgs() << "-----------------------------------------------------------------------------\n");
- DEBUG(dbgs() << "collectAllComboFuncs");
- DEBUG(dbgs() << " (" << ComboFuncList.size() << " sets)\n");
+ LLVM_DEBUG(dbgs() << "-------------------------------------------------------"
+ "----------------------\n");
+ LLVM_DEBUG(dbgs() << "collectAllComboFuncs");
+ LLVM_DEBUG(dbgs() << " (" << ComboFuncList.size() << " sets)\n");
int numCombos = 0;
for (unsigned i = 0, N = ComboFuncList.size(); i < N; ++i) {
Record *Func = ComboFuncList[i];
std::vector<Record*> FUs = Func->getValueAsListOfDefs("CFD");
- DEBUG(dbgs() << " CFD:" << i
- << " (" << FUs.size() << " combo FUs) "
- << Func->getName() << "\n");
+ LLVM_DEBUG(dbgs() << " CFD:" << i << " (" << FUs.size() << " combo FUs) "
+ << Func->getName() << "\n");
// Convert macros to bits for each stage.
for (unsigned j = 0, N = FUs.size(); j < N; ++j) {
@@ -714,20 +718,20 @@ int DFAPacketizerEmitter::collectAllComboFuncs(
const std::string &ComboFuncName = ComboFunc->getName();
unsigned ComboBit = FUNameToBitsMap[ComboFuncName];
unsigned ComboResources = ComboBit;
- DEBUG(dbgs() << " combo: " << ComboFuncName << ":0x"
- << Twine::utohexstr(ComboResources) << "\n");
+ LLVM_DEBUG(dbgs() << " combo: " << ComboFuncName << ":0x"
+ << Twine::utohexstr(ComboResources) << "\n");
for (unsigned k = 0, M = FuncList.size(); k < M; ++k) {
std::string FuncName = FuncList[k]->getName();
unsigned FuncResources = FUNameToBitsMap[FuncName];
- DEBUG(dbgs() << " " << FuncName << ":0x"
- << Twine::utohexstr(FuncResources) << "\n");
+ LLVM_DEBUG(dbgs() << " " << FuncName << ":0x"
+ << Twine::utohexstr(FuncResources) << "\n");
ComboResources |= FuncResources;
}
ComboBitToBitsMap[ComboBit] = ComboResources;
numCombos++;
- DEBUG(dbgs() << " => combo bits: " << ComboFuncName << ":0x"
- << Twine::utohexstr(ComboBit) << " = 0x"
- << Twine::utohexstr(ComboResources) << "\n");
+ LLVM_DEBUG(dbgs() << " => combo bits: " << ComboFuncName << ":0x"
+ << Twine::utohexstr(ComboBit) << " = 0x"
+ << Twine::utohexstr(ComboResources) << "\n");
}
}
return numCombos;
@@ -747,8 +751,8 @@ int DFAPacketizerEmitter::collectOneInsnClass(const std::string &ProcName,
// The number of stages.
unsigned NStages = StageList.size();
- DEBUG(dbgs() << " " << ItinData->getValueAsDef("TheClass")->getName()
- << "\n");
+ LLVM_DEBUG(dbgs() << " " << ItinData->getValueAsDef("TheClass")->getName()
+ << "\n");
std::vector<unsigned> UnitBits;
@@ -760,8 +764,8 @@ int DFAPacketizerEmitter::collectOneInsnClass(const std::string &ProcName,
const std::vector<Record*> &UnitList =
Stage->getValueAsListOfDefs("Units");
- DEBUG(dbgs() << " stage:" << i
- << " [" << UnitList.size() << " units]:");
+ LLVM_DEBUG(dbgs() << " stage:" << i << " [" << UnitList.size()
+ << " units]:");
unsigned dbglen = 26; // cursor after stage dbgs
// Compute the bitwise or of each unit used in this stage.
@@ -769,7 +773,7 @@ int DFAPacketizerEmitter::collectOneInsnClass(const std::string &ProcName,
for (unsigned j = 0, M = UnitList.size(); j < M; ++j) {
// Conduct bitwise or.
std::string UnitName = UnitList[j]->getName();
- DEBUG(dbgs() << " " << j << ":" << UnitName);
+ LLVM_DEBUG(dbgs() << " " << j << ":" << UnitName);
dbglen += 3 + UnitName.length();
assert(FUNameToBitsMap.count(UnitName));
UnitBitValue |= FUNameToBitsMap[UnitName];
@@ -780,15 +784,16 @@ int DFAPacketizerEmitter::collectOneInsnClass(const std::string &ProcName,
while (dbglen <= 64) { // line up bits dbgs
dbglen += 8;
- DEBUG(dbgs() << "\t");
+ LLVM_DEBUG(dbgs() << "\t");
}
- DEBUG(dbgs() << " (bits: 0x" << Twine::utohexstr(UnitBitValue) << ")\n");
+ LLVM_DEBUG(dbgs() << " (bits: 0x" << Twine::utohexstr(UnitBitValue)
+ << ")\n");
}
if (!UnitBits.empty())
allInsnClasses.push_back(UnitBits);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << " ";
dbgsInsnClass(UnitBits);
dbgs() << "\n";
@@ -811,10 +816,10 @@ int DFAPacketizerEmitter::collectAllInsnClasses(const std::string &ProcName,
unsigned M = ItinDataList.size();
int numInsnClasses = 0;
- DEBUG(dbgs() << "-----------------------------------------------------------------------------\n"
- << "collectAllInsnClasses "
- << ProcName
- << " (" << M << " classes)\n");
+ LLVM_DEBUG(dbgs() << "-------------------------------------------------------"
+ "----------------------\n"
+ << "collectAllInsnClasses " << ProcName << " (" << M
+ << " classes)\n");
// Collect stages for each instruction class for all itinerary data
for (unsigned j = 0; j < M; j++) {
@@ -914,7 +919,7 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
//
while (!WorkList.empty()) {
const State *current = WorkList.pop_back_val();
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "---------------------\n";
dbgs() << "Processing state: " << current->stateNum << " - ";
dbgsStateInfo(current->stateInfo);
@@ -922,7 +927,7 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
});
for (unsigned i = 0; i < allInsnClasses.size(); i++) {
std::vector<unsigned> InsnClass = allInsnClasses[i];
- DEBUG({
+ LLVM_DEBUG({
dbgs() << i << " ";
dbgsInsnClass(InsnClass);
dbgs() << "\n";
@@ -938,11 +943,11 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
const State *NewState = nullptr;
current->AddInsnClass(InsnClass, ComboBitToBitsMap, NewStateResources);
if (NewStateResources.empty()) {
- DEBUG(dbgs() << " Skipped - no new states generated\n");
+ LLVM_DEBUG(dbgs() << " Skipped - no new states generated\n");
continue;
}
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\t";
dbgsStateInfo(NewStateResources);
dbgs() << "\n";
@@ -954,7 +959,7 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
auto VI = Visited.find(NewStateResources);
if (VI != Visited.end()) {
NewState = VI->second;
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\tFound existing state: " << NewState->stateNum
<< " - ";
dbgsStateInfo(NewState->stateInfo);
@@ -965,7 +970,7 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) {
NewState->stateInfo = NewStateResources;
Visited[NewStateResources] = NewState;
WorkList.push_back(NewState);
- DEBUG({
+ LLVM_DEBUG({
dbgs() << "\tAccepted new state: " << NewState->stateNum << " - ";
dbgsStateInfo(NewState->stateInfo);
dbgs() << "\n";
diff --git a/utils/TableGen/FixedLenDecoderEmitter.cpp b/utils/TableGen/FixedLenDecoderEmitter.cpp
index dc7e35deca12..fcecc764d44b 100644
--- a/utils/TableGen/FixedLenDecoderEmitter.cpp
+++ b/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -1859,9 +1859,9 @@ static bool populateInstruction(CodeGenTarget &Target,
CGI.Operands.getSubOperandNumber(OpIdx);
const std::string &Name = CGI.Operands[SO.first].Name;
- DEBUG(dbgs() << "Numbered operand mapping for " << Def.getName() << ": " <<
- Name << "(" << SO.first << ", " << SO.second << ") => " <<
- Vals[i].getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Numbered operand mapping for " << Def.getName()
+ << ": " << Name << "(" << SO.first << ", " << SO.second
+ << ") => " << Vals[i].getName() << "\n");
std::string Decoder;
Record *TypeRecord = CGI.Operands[SO.first].Rec;
@@ -2023,7 +2023,7 @@ static bool populateInstruction(CodeGenTarget &Target,
Operands[Opc] = InsnOperands;
#if 0
- DEBUG({
+ LLVM_DEBUG({
// Dumps the instruction encoding bits.
dumpBits(errs(), Bits);
@@ -2065,8 +2065,10 @@ static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
// decodeInstruction().
static void emitDecodeInstruction(formatted_raw_ostream &OS) {
OS << "template<typename InsnType>\n"
- << "static DecodeStatus decodeInstruction(const uint8_t DecodeTable[], MCInst &MI,\n"
- << " InsnType insn, uint64_t Address,\n"
+ << "static DecodeStatus decodeInstruction(const uint8_t DecodeTable[], "
+ "MCInst &MI,\n"
+ << " InsnType insn, uint64_t "
+ "Address,\n"
<< " const void *DisAsm,\n"
<< " const MCSubtargetInfo &STI) {\n"
<< " const FeatureBitset& Bits = STI.getFeatureBits();\n"
@@ -2085,7 +2087,8 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " unsigned Len = *++Ptr;\n"
<< " ++Ptr;\n"
<< " CurFieldValue = fieldFromInstruction(insn, Start, Len);\n"
- << " DEBUG(dbgs() << Loc << \": OPC_ExtractField(\" << Start << \", \"\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_ExtractField(\" << Start << "
+ "\", \"\n"
<< " << Len << \"): \" << CurFieldValue << \"\\n\");\n"
<< " break;\n"
<< " }\n"
@@ -2101,9 +2104,12 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " // Perform the filter operation.\n"
<< " if (Val != CurFieldValue)\n"
<< " Ptr += NumToSkip;\n"
- << " DEBUG(dbgs() << Loc << \": OPC_FilterValue(\" << Val << \", \" << NumToSkip\n"
- << " << \"): \" << ((Val != CurFieldValue) ? \"FAIL:\" : \"PASS:\")\n"
- << " << \" continuing at \" << (Ptr - DecodeTable) << \"\\n\");\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_FilterValue(\" << Val << "
+ "\", \" << NumToSkip\n"
+ << " << \"): \" << ((Val != CurFieldValue) ? \"FAIL:\" "
+ ": \"PASS:\")\n"
+ << " << \" continuing at \" << (Ptr - DecodeTable) << "
+ "\"\\n\");\n"
<< "\n"
<< " break;\n"
<< " }\n"
@@ -2121,11 +2127,15 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " // If the actual and expected values don't match, skip.\n"
<< " if (ExpectedValue != FieldValue)\n"
<< " Ptr += NumToSkip;\n"
- << " DEBUG(dbgs() << Loc << \": OPC_CheckField(\" << Start << \", \"\n"
- << " << Len << \", \" << ExpectedValue << \", \" << NumToSkip\n"
- << " << \"): FieldValue = \" << FieldValue << \", ExpectedValue = \"\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_CheckField(\" << Start << "
+ "\", \"\n"
+ << " << Len << \", \" << ExpectedValue << \", \" << "
+ "NumToSkip\n"
+ << " << \"): FieldValue = \" << FieldValue << \", "
+ "ExpectedValue = \"\n"
<< " << ExpectedValue << \": \"\n"
- << " << ((ExpectedValue == FieldValue) ? \"PASS\\n\" : \"FAIL\\n\"));\n"
+ << " << ((ExpectedValue == FieldValue) ? \"PASS\\n\" : "
+ "\"FAIL\\n\"));\n"
<< " break;\n"
<< " }\n"
<< " case MCD::OPC_CheckPredicate: {\n"
@@ -2141,7 +2151,8 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " if (!(Pred = checkDecoderPredicate(PIdx, Bits)))\n"
<< " Ptr += NumToSkip;\n"
<< " (void)Pred;\n"
- << " DEBUG(dbgs() << Loc << \": OPC_CheckPredicate(\" << PIdx << \"): \"\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_CheckPredicate(\" << PIdx "
+ "<< \"): \"\n"
<< " << (Pred ? \"PASS\\n\" : \"FAIL\\n\"));\n"
<< "\n"
<< " break;\n"
@@ -2157,12 +2168,14 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " MI.clear();\n"
<< " MI.setOpcode(Opc);\n"
<< " bool DecodeComplete;\n"
- << " S = decodeToMCInst(S, DecodeIdx, insn, MI, Address, DisAsm, DecodeComplete);\n"
+ << " S = decodeToMCInst(S, DecodeIdx, insn, MI, Address, DisAsm, "
+ "DecodeComplete);\n"
<< " assert(DecodeComplete);\n"
<< "\n"
- << " DEBUG(dbgs() << Loc << \": OPC_Decode: opcode \" << Opc\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_Decode: opcode \" << Opc\n"
<< " << \", using decoder \" << DecodeIdx << \": \"\n"
- << " << (S != MCDisassembler::Fail ? \"PASS\" : \"FAIL\") << \"\\n\");\n"
+ << " << (S != MCDisassembler::Fail ? \"PASS\" : "
+ "\"FAIL\") << \"\\n\");\n"
<< " return S;\n"
<< " }\n"
<< " case MCD::OPC_TryDecode: {\n"
@@ -2180,21 +2193,26 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " MCInst TmpMI;\n"
<< " TmpMI.setOpcode(Opc);\n"
<< " bool DecodeComplete;\n"
- << " S = decodeToMCInst(S, DecodeIdx, insn, TmpMI, Address, DisAsm, DecodeComplete);\n"
- << " DEBUG(dbgs() << Loc << \": OPC_TryDecode: opcode \" << Opc\n"
+ << " S = decodeToMCInst(S, DecodeIdx, insn, TmpMI, Address, DisAsm, "
+ "DecodeComplete);\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_TryDecode: opcode \" << "
+ "Opc\n"
<< " << \", using decoder \" << DecodeIdx << \": \");\n"
<< "\n"
<< " if (DecodeComplete) {\n"
<< " // Decoding complete.\n"
- << " DEBUG(dbgs() << (S != MCDisassembler::Fail ? \"PASS\" : \"FAIL\") << \"\\n\");\n"
+ << " LLVM_DEBUG(dbgs() << (S != MCDisassembler::Fail ? \"PASS\" : "
+ "\"FAIL\") << \"\\n\");\n"
<< " MI = TmpMI;\n"
<< " return S;\n"
<< " } else {\n"
<< " assert(S == MCDisassembler::Fail);\n"
<< " // If the decoding was incomplete, skip.\n"
<< " Ptr += NumToSkip;\n"
- << " DEBUG(dbgs() << \"FAIL: continuing at \" << (Ptr - DecodeTable) << \"\\n\");\n"
- << " // Reset decode status. This also drops a SoftFail status that could be\n"
+ << " LLVM_DEBUG(dbgs() << \"FAIL: continuing at \" << (Ptr - "
+ "DecodeTable) << \"\\n\");\n"
+ << " // Reset decode status. This also drops a SoftFail status "
+ "that could be\n"
<< " // set before the decode attempt.\n"
<< " S = MCDisassembler::Success;\n"
<< " }\n"
@@ -2210,16 +2228,18 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) {
<< " bool Fail = (insn & PositiveMask) || (~insn & NegativeMask);\n"
<< " if (Fail)\n"
<< " S = MCDisassembler::SoftFail;\n"
- << " DEBUG(dbgs() << Loc << \": OPC_SoftFail: \" << (Fail ? \"FAIL\\n\":\"PASS\\n\"));\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_SoftFail: \" << (Fail ? "
+ "\"FAIL\\n\":\"PASS\\n\"));\n"
<< " break;\n"
<< " }\n"
<< " case MCD::OPC_Fail: {\n"
- << " DEBUG(dbgs() << Loc << \": OPC_Fail\\n\");\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_Fail\\n\");\n"
<< " return MCDisassembler::Fail;\n"
<< " }\n"
<< " }\n"
<< " }\n"
- << " llvm_unreachable(\"bogosity detected in disassembler state machine!\");\n"
+ << " llvm_unreachable(\"bogosity detected in disassembler state "
+ "machine!\");\n"
<< "}\n\n";
}
diff --git a/utils/TableGen/GlobalISelEmitter.cpp b/utils/TableGen/GlobalISelEmitter.cpp
index 40c8fa94f44e..1237f8c4e167 100644
--- a/utils/TableGen/GlobalISelEmitter.cpp
+++ b/utils/TableGen/GlobalISelEmitter.cpp
@@ -906,7 +906,7 @@ public:
// We currently don't hoist the record of instruction properly.
// Therefore we can only work on the orig instruction (InsnVarID
// == 0).
- DEBUG(dbgs() << "Non-zero instr ID not supported yet\n");
+ LLVM_DEBUG(dbgs() << "Non-zero instr ID not supported yet\n");
return false;
}
return B.getKind() == getKind() && InsnVarID == B.InsnVarID &&
@@ -3806,7 +3806,7 @@ std::vector<Matcher *> GlobalISelEmitter::optimizeRules(
OptRules.push_back(CurrentGroup.get());
StorageGroupMatcher.emplace_back(std::move(CurrentGroup));
}
- DEBUG(dbgs() << "NbGroup: " << NbGroup << "\n");
+ LLVM_DEBUG(dbgs() << "NbGroup: " << NbGroup << "\n");
return OptRules;
}
diff --git a/utils/TableGen/PseudoLoweringEmitter.cpp b/utils/TableGen/PseudoLoweringEmitter.cpp
index 63bdd36235a0..a363015730f3 100644
--- a/utils/TableGen/PseudoLoweringEmitter.cpp
+++ b/utils/TableGen/PseudoLoweringEmitter.cpp
@@ -120,13 +120,13 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn,
}
void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
- DEBUG(dbgs() << "Pseudo definition: " << Rec->getName() << "\n");
+ LLVM_DEBUG(dbgs() << "Pseudo definition: " << Rec->getName() << "\n");
// Validate that the result pattern has the corrent number and types
// of arguments for the instruction it references.
DagInit *Dag = Rec->getValueAsDag("ResultInst");
assert(Dag && "Missing result instruction in pseudo expansion!");
- DEBUG(dbgs() << " Result: " << *Dag << "\n");
+ LLVM_DEBUG(dbgs() << " Result: " << *Dag << "\n");
DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
if (!OpDef)
@@ -170,7 +170,7 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
for (unsigned i = 0, e = SourceInsn.Operands.size(); i != e; ++i)
SourceOperands[SourceInsn.Operands[i].Name] = i;
- DEBUG(dbgs() << " Operand mapping:\n");
+ LLVM_DEBUG(dbgs() << " Operand mapping:\n");
for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i) {
// We've already handled constant values. Just map instruction operands
// here.
@@ -188,7 +188,8 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
OperandMap[Insn.Operands[i].MIOperandNo + I].Data.Operand =
SourceOp->getValue();
- DEBUG(dbgs() << " " << SourceOp->getValue() << " ==> " << i << "\n");
+ LLVM_DEBUG(dbgs() << " " << SourceOp->getValue() << " ==> " << i
+ << "\n");
}
Expansions.push_back(PseudoExpansion(SourceInsn, Insn, OperandMap));
diff --git a/utils/TableGen/RISCVCompressInstEmitter.cpp b/utils/TableGen/RISCVCompressInstEmitter.cpp
index c362390a56dd..e03663b40f8a 100644
--- a/utils/TableGen/RISCVCompressInstEmitter.cpp
+++ b/utils/TableGen/RISCVCompressInstEmitter.cpp
@@ -164,10 +164,11 @@ bool RISCVCompressInstEmitter::validateTypes(Record *DagOpType,
// Let further validation happen when compress()/uncompress() functions are
// invoked.
- DEBUG(dbgs() << (IsSourceInst ? "Input" : "Output") << " Dag Operand Type: '"
- << DagOpType->getName() << "' and "
- << "Instruction Operand Type: '" << InstOpType->getName()
- << "' can't be checked at pattern validation time!\n");
+ LLVM_DEBUG(dbgs() << (IsSourceInst ? "Input" : "Output")
+ << " Dag Operand Type: '" << DagOpType->getName()
+ << "' and "
+ << "Instruction Operand Type: '" << InstOpType->getName()
+ << "' can't be checked at pattern validation time!\n");
return true;
}
@@ -234,10 +235,11 @@ void RISCVCompressInstEmitter::addDagOperandMapping(
// No pattern validation check possible for values of fixed immediate.
OperandMap[i].Kind = OpData::Imm;
OperandMap[i].Data.Imm = II->getValue();
- DEBUG(dbgs() << " Found immediate '" << II->getValue() << "' at "
- << (IsSourceInst ? "input " : "output ")
- << "Dag. No validation time check possible for values of "
- "fixed immediate.\n");
+ LLVM_DEBUG(
+ dbgs() << " Found immediate '" << II->getValue() << "' at "
+ << (IsSourceInst ? "input " : "output ")
+ << "Dag. No validation time check possible for values of "
+ "fixed immediate.\n");
} else
llvm_unreachable("Unhandled CompressPat argument type!");
}
@@ -338,7 +340,7 @@ void RISCVCompressInstEmitter::createInstOperandMapping(
// TiedCount keeps track of the number of operands skipped in Inst
// operands list to get to the corresponding Dag operand.
unsigned TiedCount = 0;
- DEBUG(dbgs() << " Operand mapping:\n Source Dest\n");
+ LLVM_DEBUG(dbgs() << " Operand mapping:\n Source Dest\n");
for (unsigned i = 0, e = DestInst.Operands.size(); i != e; ++i) {
int TiedInstOpIdx = DestInst.Operands[i].getTiedRegister();
if (TiedInstOpIdx != -1) {
@@ -348,9 +350,10 @@ void RISCVCompressInstEmitter::createInstOperandMapping(
if (DestOperandMap[i].Kind == OpData::Operand)
// No need to fill the SourceOperandMap here since it was mapped to
// destination operand 'TiedInstOpIdx' in a previous iteration.
- DEBUG(dbgs() << " " << DestOperandMap[i].Data.Operand << " ====> "
- << i << " Dest operand tied with operand '"
- << TiedInstOpIdx << "'\n");
+ LLVM_DEBUG(dbgs() << " " << DestOperandMap[i].Data.Operand
+ << " ====> " << i
+ << " Dest operand tied with operand '"
+ << TiedInstOpIdx << "'\n");
continue;
}
// Skip fixed immediates and registers, they were handled in
@@ -372,7 +375,8 @@ void RISCVCompressInstEmitter::createInstOperandMapping(
"Incorrect operand mapping detected!\n");
DestOperandMap[i].Data.Operand = SourceOp->getValue();
SourceOperandMap[SourceOp->getValue()].Data.Operand = i;
- DEBUG(dbgs() << " " << SourceOp->getValue() << " ====> " << i << "\n");
+ LLVM_DEBUG(dbgs() << " " << SourceOp->getValue() << " ====> " << i
+ << "\n");
}
}
@@ -402,7 +406,7 @@ void RISCVCompressInstEmitter::evaluateCompressPat(Record *Rec) {
// Validate input Dag operands.
DagInit *SourceDag = Rec->getValueAsDag("Input");
assert(SourceDag && "Missing 'Input' in compress pattern!");
- DEBUG(dbgs() << "Input: " << *SourceDag << "\n");
+ LLVM_DEBUG(dbgs() << "Input: " << *SourceDag << "\n");
DefInit *OpDef = dyn_cast<DefInit>(SourceDag->getOperator());
if (!OpDef)
@@ -419,7 +423,7 @@ void RISCVCompressInstEmitter::evaluateCompressPat(Record *Rec) {
// Validate output Dag operands.
DagInit *DestDag = Rec->getValueAsDag("Output");
assert(DestDag && "Missing 'Output' in compress pattern!");
- DEBUG(dbgs() << "Output: " << *DestDag << "\n");
+ LLVM_DEBUG(dbgs() << "Output: " << *DestDag << "\n");
DefInit *DestOpDef = dyn_cast<DefInit>(DestDag->getOperator());
if (!DestOpDef)
diff --git a/utils/TableGen/RegisterBankEmitter.cpp b/utils/TableGen/RegisterBankEmitter.cpp
index 5c6471688044..879b4162d629 100644
--- a/utils/TableGen/RegisterBankEmitter.cpp
+++ b/utils/TableGen/RegisterBankEmitter.cpp
@@ -291,9 +291,11 @@ void RegisterBankEmitter::run(raw_ostream &OS) {
visitRegisterBankClasses(
RegisterClassHierarchy, RC, "explicit",
[&Bank](const CodeGenRegisterClass *RC, StringRef Kind) {
- DEBUG(dbgs() << "Added " << RC->getName() << "(" << Kind << ")\n");
+ LLVM_DEBUG(dbgs()
+ << "Added " << RC->getName() << "(" << Kind << ")\n");
Bank.addRegisterClass(RC);
- }, VisitedRCs);
+ },
+ VisitedRCs);
}
Banks.push_back(Bank);
diff --git a/utils/TableGen/SubtargetEmitter.cpp b/utils/TableGen/SubtargetEmitter.cpp
index 344d89e6f002..5fd145fe63b8 100644
--- a/utils/TableGen/SubtargetEmitter.cpp
+++ b/utils/TableGen/SubtargetEmitter.cpp
@@ -988,9 +988,9 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
return;
std::vector<MCSchedClassDesc> &SCTab = SchedTables.ProcSchedClasses.back();
- DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
+ LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
for (const CodeGenSchedClass &SC : SchedModels.schedClasses()) {
- DEBUG(SC.dump(&SchedModels));
+ LLVM_DEBUG(SC.dump(&SchedModels));
SCTab.resize(SCTab.size() + 1);
MCSchedClassDesc &SCDesc = SCTab.back();
@@ -1056,8 +1056,9 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
}
}
if (Writes.empty()) {
- DEBUG(dbgs() << ProcModel.ModelName
- << " does not have resources for class " << SC.Name << '\n');
+ LLVM_DEBUG(dbgs() << ProcModel.ModelName
+ << " does not have resources for class " << SC.Name
+ << '\n');
}
}
// Sum resources across all operand writes.
@@ -1567,8 +1568,8 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
<< "void llvm::";
OS << Target;
OS << "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef FS) {\n"
- << " DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
- << " DEBUG(dbgs() << \"\\nCPU:\" << CPU << \"\\n\\n\");\n";
+ << " LLVM_DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
+ << " LLVM_DEBUG(dbgs() << \"\\nCPU:\" << CPU << \"\\n\\n\");\n";
if (Features.empty()) {
OS << "}\n";