diff options
author | Petar Jovanovic <petar.jovanovic@mips.com> | 2017-11-07 14:40:27 +0000 |
---|---|---|
committer | Petar Jovanovic <petar.jovanovic@mips.com> | 2017-11-07 14:40:27 +0000 |
commit | 8cec6c49168a1b9f49f1391c4b4750a7271ff354 (patch) | |
tree | 531432a9deace1e4aeaeca4486cf37ee617e476a | |
parent | d142ab12fc9f790f2cb08eaafab765c2ed69309f (diff) | |
download | llvm-8cec6c49168a1b9f49f1391c4b4750a7271ff354.tar.gz |
Reland "Correct dwarf unwind information in function epilogue for X86"
Reland r317100 with minor fix regarding ComputeCommonTailLength function in
BranchFolding.cpp. Skipping top CFI instructions block needs to executed on
several more return points in ComputeCommonTailLength().
Original r317100 message:
"Correct dwarf unwind information in function epilogue for X86"
This patch aims to provide correct dwarf unwind information in function
epilogue for X86.
It consists of two parts. The first part inserts CFI instructions that set
appropriate cfa offset and cfa register in emitEpilogue() in
X86FrameLowering. This part is X86 specific.
The second part is platform independent and ensures that:
- CFI instructions do not affect code generation
- Unwind information remains correct when a function is modified by
different passes. This is done in a late pass by analyzing information
about cfa offset and cfa register in BBs and inserting additional CFI
directives where necessary.
Changed CFI instructions so that they:
- are duplicable
- are not counted as instructions when tail duplicating or tail merging
- can be compared as equal
Added CFIInstrInserter pass:
- analyzes each basic block to determine cfa offset and register valid at
its entry and exit
- verifies that outgoing cfa offset and register of predecessor blocks match
incoming values of their successors
- inserts additional CFI directives at basic block beginning to correct the
rule for calculating CFA
Having CFI instructions in function epilogue can cause incorrect CFA
calculation rule for some basic blocks. This can happen if, due to basic
block reordering, or the existence of multiple epilogue blocks, some of the
blocks have wrong cfa offset and register values set by the epilogue block
above them.
CFIInstrInserter is currently run only on X86, but can be used by any target
that implements support for adding CFI instructions in epilogue.
Patch by Violeta Vukobrat.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317579 91177308-0d34-0410-b5e6-96231b3b80d8
101 files changed, 1709 insertions, 36 deletions
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h index c106ff6cdfef..bf35b7d653b7 100644 --- a/include/llvm/CodeGen/Passes.h +++ b/include/llvm/CodeGen/Passes.h @@ -420,6 +420,9 @@ namespace llvm { // This pass expands memcmp() to load/stores. FunctionPass *createExpandMemCmpPass(); + /// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp + FunctionPass *createCFIInstrInserter(); + } // End llvm namespace #endif diff --git a/include/llvm/CodeGen/TargetFrameLowering.h b/include/llvm/CodeGen/TargetFrameLowering.h index 5cf4627f3c96..a94dbd7c5c00 100644 --- a/include/llvm/CodeGen/TargetFrameLowering.h +++ b/include/llvm/CodeGen/TargetFrameLowering.h @@ -341,6 +341,14 @@ public: return false; return true; } + + /// Return initial CFA offset value i.e. the one valid at the beginning of the + /// function (before any stack operations). + virtual int getInitialCFAOffset(const MachineFunction &MF) const; + + /// Return initial CFA register value i.e. the one valid at the beginning of + /// the function (before any stack operations). + virtual unsigned getInitialCFARegister(const MachineFunction &MF) const; }; } // End llvm namespace diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index 9cdb49330ae1..7616534d8d5b 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -86,6 +86,7 @@ void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&); void initializeCFGPrinterLegacyPassPass(PassRegistry&); void initializeCFGSimplifyPassPass(PassRegistry&); void initializeCFGViewerLegacyPassPass(PassRegistry&); +void initializeCFIInstrInserterPass(PassRegistry&); void initializeCFLAndersAAWrapperPassPass(PassRegistry&); void initializeCFLSteensAAWrapperPassPass(PassRegistry&); void initializeCallGraphDOTPrinterPass(PassRegistry&); diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index 048bd1f2a0cc..927da1c72d46 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -902,7 +902,7 @@ def CFI_INSTRUCTION : Instruction { let InOperandList = (ins i32imm:$id); let AsmString = ""; let hasCtrlDep = 1; - let isNotDuplicable = 1; + let isNotDuplicable = 0; } def EH_LABEL : Instruction { let OutOperandList = (outs); diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index 40cb0c0cdf19..41b88a7ac573 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -296,6 +296,11 @@ static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) { return HashMachineInstr(*I); } +/// Whether MI should be counted as an instruction when calculating common tail. +static bool countsAsInstruction(const MachineInstr &MI) { + return !(MI.isDebugValue() || MI.isCFIInstruction()); +} + /// ComputeCommonTailLength - Given two machine basic blocks, compute the number /// of instructions they actually have in common together at their end. Return /// iterators for the first shared instruction in each block. @@ -310,26 +315,27 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, while (I1 != MBB1->begin() && I2 != MBB2->begin()) { --I1; --I2; // Skip debugging pseudos; necessary to avoid changing the code. - while (I1->isDebugValue()) { + while (!countsAsInstruction(*I1)) { if (I1==MBB1->begin()) { - while (I2->isDebugValue()) { - if (I2==MBB2->begin()) + while (!countsAsInstruction(*I2)) { + if (I2==MBB2->begin()) { // I1==DBG at begin; I2==DBG at begin - return TailLen; + goto SkipTopCFIAndReturn; + } --I2; } ++I2; // I1==DBG at begin; I2==non-DBG, or first of DBGs not at begin - return TailLen; + goto SkipTopCFIAndReturn; } --I1; } // I1==first (untested) non-DBG preceding known match - while (I2->isDebugValue()) { + while (!countsAsInstruction(*I2)) { if (I2==MBB2->begin()) { ++I1; // I1==non-DBG, or first of DBGs not at begin; I2==DBG at begin - return TailLen; + goto SkipTopCFIAndReturn; } --I2; } @@ -368,6 +374,37 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, } ++I1; } + +SkipTopCFIAndReturn: + // Ensure that I1 and I2 do not point to a CFI_INSTRUCTION. This can happen if + // I1 and I2 are non-identical when compared and then one or both of them ends + // up pointing to a CFI instruction after being incremented. For example: + /* + BB1: + ... + INSTRUCTION_A + ADD32ri8 <- last common instruction + ... + BB2: + ... + INSTRUCTION_B + CFI_INSTRUCTION + ADD32ri8 <- last common instruction + ... + */ + // When INSTRUCTION_A and INSTRUCTION_B are compared as not equal, after + // incrementing the iterators, I1 will point to ADD, however I2 will point to + // the CFI instruction. Later on, this leads to BB2 being 'hacked off' at the + // wrong place (in ReplaceTailWithBranchTo()) which results in losing this CFI + // instruction. + while (I1 != MBB1->end() && I1->isCFIInstruction()) { + ++I1; + } + + while (I2 != MBB2->end() && I2->isCFIInstruction()) { + ++I2; + } + return TailLen; } @@ -454,7 +491,7 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) { unsigned Time = 0; for (; I != E; ++I) { - if (I->isDebugValue()) + if (!countsAsInstruction(*I)) continue; if (I->isCall()) Time += 10; @@ -814,12 +851,12 @@ mergeOperations(MachineBasicBlock::iterator MBBIStartPos, assert(MBBI != MBBIE && "Reached BB end within common tail length!"); (void)MBBIE; - if (MBBI->isDebugValue()) { + if (!countsAsInstruction(*MBBI)) { ++MBBI; continue; } - while ((MBBICommon != MBBIECommon) && MBBICommon->isDebugValue()) + while ((MBBICommon != MBBIECommon) && !countsAsInstruction(*MBBICommon)) ++MBBICommon; assert(MBBICommon != MBBIECommon && @@ -859,7 +896,7 @@ void BranchFolder::mergeCommonTails(unsigned commonTailIndex) { } for (auto &MI : *MBB) { - if (MI.isDebugValue()) + if (!countsAsInstruction(MI)) continue; DebugLoc DL = MI.getDebugLoc(); for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) { @@ -869,7 +906,7 @@ void BranchFolder::mergeCommonTails(unsigned commonTailIndex) { auto &Pos = NextCommonInsts[i]; assert(Pos != SameTails[i].getBlock()->end() && "Reached BB end within common tail"); - while (Pos->isDebugValue()) { + while (!countsAsInstruction(*Pos)) { ++Pos; assert(Pos != SameTails[i].getBlock()->end() && "Reached BB end within common tail"); diff --git a/lib/CodeGen/CFIInstrInserter.cpp b/lib/CodeGen/CFIInstrInserter.cpp new file mode 100644 index 000000000000..011259193fdb --- /dev/null +++ b/lib/CodeGen/CFIInstrInserter.cpp @@ -0,0 +1,319 @@ +//===------ CFIInstrInserter.cpp - Insert additional CFI instructions -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file This pass verifies incoming and outgoing CFA information of basic +/// blocks. CFA information is information about offset and register set by CFI +/// directives, valid at the start and end of a basic block. This pass checks +/// that outgoing information of predecessors matches incoming information of +/// their successors. Then it checks if blocks have correct CFA calculation rule +/// set and inserts additional CFI instruction at their beginnings if they +/// don't. CFI instructions are inserted if basic blocks have incorrect offset +/// or register set by previous blocks, as a result of a non-linear layout of +/// blocks in a function. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetSubtargetInfo.h" +using namespace llvm; + +namespace { +class CFIInstrInserter : public MachineFunctionPass { + public: + static char ID; + + CFIInstrInserter() : MachineFunctionPass(ID) { + initializeCFIInstrInserterPass(*PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + bool runOnMachineFunction(MachineFunction &MF) override { + + if (!MF.getMMI().hasDebugInfo() && + !MF.getFunction()->needsUnwindTableEntry()) + return false; + + MBBVector.resize(MF.getNumBlockIDs()); + calculateCFAInfo(MF); +#ifndef NDEBUG + unsigned ErrorNum = verify(MF); + if (ErrorNum) + report_fatal_error("Found " + Twine(ErrorNum) + + " in/out CFI information errors."); +#endif + bool insertedCFI = insertCFIInstrs(MF); + MBBVector.clear(); + return insertedCFI; + } + + private: + struct MBBCFAInfo { + MachineBasicBlock *MBB; + /// Value of cfa offset valid at basic block entry. + int IncomingCFAOffset = -1; + /// Value of cfa offset valid at basic block exit. + int OutgoingCFAOffset = -1; + /// Value of cfa register valid at basic block entry. + unsigned IncomingCFARegister = 0; + /// Value of cfa register valid at basic block exit. + unsigned OutgoingCFARegister = 0; + /// If in/out cfa offset and register values for this block have already + /// been set or not. + bool Processed = false; + }; + + /// Contains cfa offset and register values valid at entry and exit of basic + /// blocks. + SmallVector<struct MBBCFAInfo, 4> MBBVector; + + /// Calculate cfa offset and register values valid at entry and exit for all + /// basic blocks in a function. + void calculateCFAInfo(MachineFunction &MF); + /// Calculate cfa offset and register values valid at basic block exit by + /// checking the block for CFI instructions. Block's incoming CFA info remains + /// the same. + void calculateOutgoingCFAInfo(struct MBBCFAInfo &MBBInfo); + /// Update in/out cfa offset and register values for successors of the basic + /// block. + void updateSuccCFAInfo(struct MBBCFAInfo &MBBInfo); + + /// Check if incoming CFA information of a basic block matches outgoing CFA + /// information of the previous block. If it doesn't, insert CFI instruction + /// at the beginning of the block that corrects the CFA calculation rule for + /// that block. + bool insertCFIInstrs(MachineFunction &MF); + /// Return the cfa offset value that should be set at the beginning of a MBB + /// if needed. The negated value is needed when creating CFI instructions that + /// set absolute offset. + int getCorrectCFAOffset(MachineBasicBlock *MBB) { + return -MBBVector[MBB->getNumber()].IncomingCFAOffset; + } + + void report(const char *msg, MachineBasicBlock &MBB); + /// Go through each MBB in a function and check that outgoing offset and + /// register of its predecessors match incoming offset and register of that + /// MBB, as well as that incoming offset and register of its successors match + /// outgoing offset and register of the MBB. + unsigned verify(MachineFunction &MF); +}; +} + +char CFIInstrInserter::ID = 0; +INITIALIZE_PASS(CFIInstrInserter, "cfi-instr-inserter", + "Check CFA info and insert CFI instructions if needed", false, + false) +FunctionPass *llvm::createCFIInstrInserter() { return new CFIInstrInserter(); } + +void CFIInstrInserter::calculateCFAInfo(MachineFunction &MF) { + // Initial CFA offset value i.e. the one valid at the beginning of the + // function. + int InitialOffset = + MF.getSubtarget().getFrameLowering()->getInitialCFAOffset(MF); + // Initial CFA register value i.e. the one valid at the beginning of the + // function. + unsigned InitialRegister = + MF.getSubtarget().getFrameLowering()->getInitialCFARegister(MF); + + // Initialize MBBMap. + for (MachineBasicBlock &MBB : MF) { + struct MBBCFAInfo MBBInfo; + MBBInfo.MBB = &MBB; + MBBInfo.IncomingCFAOffset = InitialOffset; + MBBInfo.OutgoingCFAOffset = InitialOffset; + MBBInfo.IncomingCFARegister = InitialRegister; + MBBInfo.OutgoingCFARegister = InitialRegister; + MBBVector[MBB.getNumber()] = MBBInfo; + } + + // Set in/out cfa info for all blocks in the function. This traversal is based + // on the assumption that the first block in the function is the entry block + // i.e. that it has initial cfa offset and register values as incoming CFA + // information. + for (MachineBasicBlock &MBB : MF) { + if (MBBVector[MBB.getNumber()].Processed) continue; + calculateOutgoingCFAInfo(MBBVector[MBB.getNumber()]); + updateSuccCFAInfo(MBBVector[MBB.getNumber()]); + } +} + +void CFIInstrInserter::calculateOutgoingCFAInfo(struct MBBCFAInfo &MBBInfo) { + // Outgoing cfa offset set by the block. + int SetOffset = MBBInfo.IncomingCFAOffset; + // Outgoing cfa register set by the block. + unsigned SetRegister = MBBInfo.IncomingCFARegister; + const std::vector<MCCFIInstruction> &Instrs = + MBBInfo.MBB->getParent()->getFrameInstructions(); + + // Determine cfa offset and register set by the block. + for (MachineInstr &MI : + make_range(MBBInfo.MBB->instr_begin(), MBBInfo.MBB->instr_end())) { + if (MI.isCFIInstruction()) { + unsigned CFIIndex = MI.getOperand(0).getCFIIndex(); + const MCCFIInstruction &CFI = Instrs[CFIIndex]; + if (CFI.getOperation() == MCCFIInstruction::OpDefCfaRegister) { + SetRegister = CFI.getRegister(); + } else if (CFI.getOperation() == MCCFIInstruction::OpDefCfaOffset) { + SetOffset = CFI.getOffset(); + } else if (CFI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset) { + SetOffset += CFI.getOffset(); + } else if (CFI.getOperation() == MCCFIInstruction::OpDefCfa) { + SetRegister = CFI.getRegister(); + SetOffset = CFI.getOffset(); + } + } + } + + MBBInfo.Processed = true; + + // Update outgoing CFA info. + MBBInfo.OutgoingCFAOffset = SetOffset; + MBBInfo.OutgoingCFARegister = SetRegister; +} + +void CFIInstrInserter::updateSuccCFAInfo(struct MBBCFAInfo &MBBInfo) { + + for (MachineBasicBlock *Succ : MBBInfo.MBB->successors()) { + struct MBBCFAInfo &SuccInfo = MBBVector[Succ->getNumber()]; + if (SuccInfo.Processed) continue; + SuccInfo.IncomingCFAOffset = MBBInfo.OutgoingCFAOffset; + SuccInfo.IncomingCFARegister = MBBInfo.OutgoingCFARegister; + calculateOutgoingCFAInfo(SuccInfo); + updateSuccCFAInfo(SuccInfo); + } +} + +bool CFIInstrInserter::insertCFIInstrs(MachineFunction &MF) { + + const struct MBBCFAInfo *PrevMBBInfo = &MBBVector[MF.front().getNumber()]; + const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); + bool InsertedCFIInstr = false; + + for (MachineBasicBlock &MBB : MF) { + // Skip the first MBB in a function + if (MBB.getNumber() == MF.front().getNumber()) continue; + + const struct MBBCFAInfo& MBBInfo = MBBVector[MBB.getNumber()]; + auto MBBI = MBBInfo.MBB->begin(); + DebugLoc DL = MBBInfo.MBB->findDebugLoc(MBBI); + + if (PrevMBBInfo->OutgoingCFAOffset != MBBInfo.IncomingCFAOffset) { + // If both outgoing offset and register of a previous block don't match + // incoming offset and register of this block, add a def_cfa instruction + // with the correct offset and register for this block. + if (PrevMBBInfo->OutgoingCFARegister != MBBInfo.IncomingCFARegister) { + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( + nullptr, MBBInfo.IncomingCFARegister, getCorrectCFAOffset(&MBB))); + BuildMI(*MBBInfo.MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + // If outgoing offset of a previous block doesn't match incoming offset + // of this block, add a def_cfa_offset instruction with the correct + // offset for this block. + } else { + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::createDefCfaOffset( + nullptr, getCorrectCFAOffset(&MBB))); + BuildMI(*MBBInfo.MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + InsertedCFIInstr = true; + // If outgoing register of a previous block doesn't match incoming + // register of this block, add a def_cfa_register instruction with the + // correct register for this block. + } else if (PrevMBBInfo->OutgoingCFARegister != MBBInfo.IncomingCFARegister) { + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( + nullptr, MBBInfo.IncomingCFARegister)); + BuildMI(*MBBInfo.MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + InsertedCFIInstr = true; + } + PrevMBBInfo = &MBBInfo; + } + return InsertedCFIInstr; +} + +void CFIInstrInserter::report(const char *msg, MachineBasicBlock &MBB) { + errs() << '\n'; + errs() << "*** " << msg << " ***\n" + << "- function: " << MBB.getParent()->getName() << "\n"; + errs() << "- basic block: BB#" << MBB.getNumber() << ' ' << MBB.getName() + << " (" << (const void *)&MBB << ')'; + errs() << '\n'; +} + +unsigned CFIInstrInserter::verify(MachineFunction &MF) { + unsigned ErrorNum = 0; + for (MachineBasicBlock &CurrMBB : MF) { + const struct MBBCFAInfo& CurrMBBInfo = MBBVector[CurrMBB.getNumber()]; + for (MachineBasicBlock *Pred : CurrMBB.predecessors()) { + const struct MBBCFAInfo& PredMBBInfo = MBBVector[Pred->getNumber()]; + // Check that outgoing offset values of predecessors match the incoming + // offset value of CurrMBB + if (PredMBBInfo.OutgoingCFAOffset != CurrMBBInfo.IncomingCFAOffset) { + report("The outgoing offset of a predecessor is inconsistent.", + CurrMBB); + errs() << "Predecessor BB#" << Pred->getNumber() + << " has outgoing offset (" << PredMBBInfo.OutgoingCFAOffset + << "), while BB#" << CurrMBB.getNumber() + << " has incoming offset (" << CurrMBBInfo.IncomingCFAOffset + << ").\n"; + ErrorNum++; + } + // Check that outgoing register values of predecessors match the incoming + // register value of CurrMBB + if (PredMBBInfo.OutgoingCFARegister != CurrMBBInfo.IncomingCFARegister) { + report("The outgoing register of a predecessor is inconsistent.", + CurrMBB); + errs() << "Predecessor BB#" << Pred->getNumber() + << " has outgoing register (" << PredMBBInfo.OutgoingCFARegister + << "), while BB#" << CurrMBB.getNumber() + << " has incoming register (" << CurrMBBInfo.IncomingCFARegister + << ").\n"; + ErrorNum++; + } + } + + for (MachineBasicBlock *Succ : CurrMBB.successors()) { + const struct MBBCFAInfo& SuccMBBInfo = MBBVector[Succ->getNumber()]; + // Check that incoming offset values of successors match the outgoing + // offset value of CurrMBB + if (SuccMBBInfo.IncomingCFAOffset != CurrMBBInfo.OutgoingCFAOffset) { + report("The incoming offset of a successor is inconsistent.", CurrMBB); + errs() << "Successor BB#" << Succ->getNumber() + << " has incoming offset (" << SuccMBBInfo.IncomingCFAOffset + << "), while BB#" << CurrMBB.getNumber() + << " has outgoing offset (" << CurrMBBInfo.OutgoingCFAOffset + << ").\n"; + ErrorNum++; + } + // Check that incoming register values of successors match the outgoing + // register value of CurrMBB + if (SuccMBBInfo.IncomingCFARegister != CurrMBBInfo.OutgoingCFARegister) { + report("The incoming register of a successor is inconsistent.", + CurrMBB); + errs() << "Successor BB#" << Succ->getNumber() + << " has incoming register (" << SuccMBBInfo.IncomingCFARegister + << "), while BB#" << CurrMBB.getNumber() + << " has outgoing register (" << CurrMBBInfo.OutgoingCFARegister + << ").\n"; + ErrorNum++; + } + } + } + return ErrorNum; +} diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt index df04cf85049f..1ae5aa80bf99 100644 --- a/lib/CodeGen/CMakeLists.txt +++ b/lib/CodeGen/CMakeLists.txt @@ -9,6 +9,7 @@ add_llvm_library(LLVMCodeGen BuiltinGCs.cpp CalcSpillWeights.cpp CallingConvLower.cpp + CFIInstrInserter.cpp CodeGen.cpp CodeGenPrepare.cpp CountingFunctionInserter.cpp diff --git a/lib/CodeGen/CodeGen.cpp b/lib/CodeGen/CodeGen.cpp index 2f119554a1e2..9d10d1b75f52 100644 --- a/lib/CodeGen/CodeGen.cpp +++ b/lib/CodeGen/CodeGen.cpp @@ -23,6 +23,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeAtomicExpandPass(Registry); initializeBranchFolderPassPass(Registry); initializeBranchRelaxationPass(Registry); + initializeCFIInstrInserterPass(Registry); initializeCodeGenPreparePass(Registry); initializeCountingFunctionInserterPass(Registry); initializeDeadMachineInstructionElimPass(Registry); diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index bb2dda980e41..9137c836caa0 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -320,8 +320,45 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const { } case MachineOperand::MO_MCSymbol: return getMCSymbol() == Other.getMCSymbol(); - case MachineOperand::MO_CFIIndex: - return getCFIIndex() == Other.getCFIIndex(); + case MachineOperand::MO_CFIIndex: { + const MachineFunction *MF = getParent()->getParent()->getParent(); + const MachineFunction *OtherMF = + Other.getParent()->getParent()->getParent(); + MCCFIInstruction Inst = MF->getFrameInstructions()[getCFIIndex()]; + MCCFIInstruction OtherInst = + OtherMF->getFrameInstructions()[Other.getCFIIndex()]; + MCCFIInstruction::OpType op = Inst.getOperation(); + if (op != OtherInst.getOperation()) return false; + switch (op) { + case MCCFIInstruction::OpDefCfa: + case MCCFIInstruction::OpOffset: + case MCCFIInstruction::OpRelOffset: + if (Inst.getRegister() != OtherInst.getRegister()) return false; + if (Inst.getOffset() != OtherInst.getOffset()) return false; + break; + case MCCFIInstruction::OpRestore: + case MCCFIInstruction::OpUndefined: + case MCCFIInstruction::OpSameValue: + case MCCFIInstruction::OpDefCfaRegister: + if (Inst.getRegister() != OtherInst.getRegister()) return false; + break; + case MCCFIInstruction::OpRegister: + if (Inst.getRegister() != OtherInst.getRegister()) return false; + if (Inst.getRegister2() != OtherInst.getRegister2()) return false; + break; + case MCCFIInstruction::OpDefCfaOffset: + case MCCFIInstruction::OpAdjustCfaOffset: + case MCCFIInstruction::OpGnuArgsSize: + if (Inst.getOffset() != OtherInst.getOffset()) return false; + break; + case MCCFIInstruction::OpRememberState: + case MCCFIInstruction::OpRestoreState: + case MCCFIInstruction::OpEscape: + case MCCFIInstruction::OpWindowSave: + break; + } + return true; + } case MachineOperand::MO_Metadata: return getMetadata() == Other.getMetadata(); case MachineOperand::MO_IntrinsicID: @@ -370,8 +407,13 @@ hash_code llvm::hash_value(const MachineOperand &MO) { return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata()); case MachineOperand::MO_MCSymbol: return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol()); - case MachineOperand::MO_CFIIndex: - return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex()); + case MachineOperand::MO_CFIIndex: { + const MachineFunction *MF = MO.getParent()->getParent()->getParent(); + MCCFIInstruction Inst = MF->getFrameInstructions()[MO.getCFIIndex()]; + return hash_combine(MO.getType(), MO.getTargetFlags(), Inst.getOperation(), + Inst.getRegister(), Inst.getRegister2(), + Inst.getOffset()); + } case MachineOperand::MO_IntrinsicID: return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIntrinsicID()); case MachineOperand::MO_Predicate: diff --git a/lib/CodeGen/TailDuplicator.cpp b/lib/CodeGen/TailDuplicator.cpp index bd3a20f936d8..93b53c3a1cdf 100644 --- a/lib/CodeGen/TailDuplicator.cpp +++ b/lib/CodeGen/TailDuplicator.cpp @@ -603,8 +603,8 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple, if (PreRegAlloc && MI.isCall()) return false; - if (!MI.isPHI() && !MI.isDebugValue()) - InstrCount += 1; + if (!MI.isPHI() && !MI.isMetaInstruction()) + InstrCount += 1; if (InstrCount > MaxDuplicateCount) return false; diff --git a/lib/CodeGen/TargetFrameLoweringImpl.cpp b/lib/CodeGen/TargetFrameLoweringImpl.cpp index 64962a5b796a..4d2130f5ebec 100644 --- a/lib/CodeGen/TargetFrameLoweringImpl.cpp +++ b/lib/CodeGen/TargetFrameLoweringImpl.cpp @@ -104,3 +104,12 @@ unsigned TargetFrameLowering::getStackAlignmentSkew( return 0; } + +int TargetFrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { + llvm_unreachable("getInitialCFAOffset() not implemented!"); +} + +unsigned TargetFrameLowering::getInitialCFARegister(const MachineFunction &MF) + const { + llvm_unreachable("getInitialCFARegister() not implemented!"); +}
\ No newline at end of file diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index c7af0ae43d2a..86e65b83ffa9 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -1562,6 +1562,11 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, bool HasFP = hasFP(MF); uint64_t NumBytes = 0; + bool NeedsDwarfCFI = + (!MF.getTarget().getTargetTriple().isOSDarwin() && + !MF.getTarget().getTargetTriple().isOSWindows()) && + (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry()); + if (IsFunclet) { assert(HasFP && "EH funclets without FP not yet implemented"); NumBytes = getWinEHFuncletFrameSize(MF); @@ -1584,6 +1589,13 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr) .setMIFlag(MachineInstr::FrameDestroy); + if (NeedsDwarfCFI) { + unsigned DwarfStackPtr = + TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true); + BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfa( + nullptr, DwarfStackPtr, -SlotSize)); + --MBBI; + } } MachineBasicBlock::iterator FirstCSPop = MBBI; @@ -1647,6 +1659,11 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, } else if (NumBytes) { // Adjust stack pointer back: ESP += numbytes. emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true); + if (!hasFP(MF) && NeedsDwarfCFI) { + // Define the current CFA rule to use the provided offset. + BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset( + nullptr, -CSSize - SlotSize)); + } --MBBI; } @@ -1659,6 +1676,23 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, if (NeedsWin64CFI && MF.hasWinCFI()) BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); + if (!hasFP(MF) && NeedsDwarfCFI) { + MBBI = FirstCSPop; + int64_t Offset = -CSSize - SlotSize; + // Mark callee-saved pop instruction. + // Define the current CFA rule to use the provided offset. + while (MBBI != MBB.end()) { + MachineBasicBlock::iterator PI = MBBI; + unsigned Opc = PI->getOpcode(); + ++MBBI; + if (Opc == X86::POP32r || Opc == X86::POP64r) { + Offset += SlotSize; + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::createDefCfaOffset(nullptr, Offset)); + } + } + } + if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { // Add the return addr area delta back since we are not tail calling. int Offset = -1 * X86FI->getTCReturnAddrDelta(); @@ -2840,6 +2874,15 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( return MBBI; } +int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { + return TRI->getSlotSize(); +} + +unsigned X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) + const { + return TRI->getDwarfRegNum(StackPtr, true); +} + namespace { // Struct used by orderFrameObjects to help sort the stack objects. struct X86FrameSortingObject { diff --git a/lib/Target/X86/X86FrameLowering.h b/lib/Target/X86/X86FrameLowering.h index 909319fc18fc..2bce79262d01 100644 --- a/lib/Target/X86/X86FrameLowering.h +++ b/lib/Target/X86/X86FrameLowering.h @@ -168,6 +168,10 @@ public: MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool RestoreSP = false) const; + int getInitialCFAOffset(const MachineFunction &MF) const override; + + unsigned getInitialCFARegister(const MachineFunction &MF) const override; + private: uint64_t calculateMaxStackAlign(const MachineFunction &MF) const; diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 6e6c724eb0af..11fe84f162da 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -436,4 +436,11 @@ void X86PassConfig::addPreEmitPass() { addPass(createX86FixupLEAs()); addPass(createX86EvexToVexInsts()); } + + // Verify basic block incoming and outgoing cfa offset and register values and + // correct CFA calculation rule where needed by inserting appropriate CFI + // instructions. + const Triple &TT = TM->getTargetTriple(); + if (!TT.isOSDarwin() && !TT.isOSWindows()) + addPass(createCFIInstrInserter()); } diff --git a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll index 6814ed1d894e..109962c2859a 100644 --- a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll +++ b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll @@ -23,6 +23,7 @@ lpad: ; preds = %cont, %entry } ; CHECK: lpad +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: Ltmp declare i32 @__gxx_personality_v0(...) diff --git a/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/test/CodeGen/X86/2011-10-19-widen_vselect.ll index 416761ffef45..dd0591005036 100644 --- a/test/CodeGen/X86/2011-10-19-widen_vselect.ll +++ b/test/CodeGen/X86/2011-10-19-widen_vselect.ll @@ -88,6 +88,7 @@ define void @full_test() { ; X32-NEXT: movss %xmm4, {{[0-9]+}}(%esp) ; X32-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X32-NEXT: addl $60, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: full_test: diff --git a/test/CodeGen/X86/GlobalISel/add-scalar.ll b/test/CodeGen/X86/GlobalISel/add-scalar.ll index 64a6313023be..9d28f441fb7e 100644 --- a/test/CodeGen/X86/GlobalISel/add-scalar.ll +++ b/test/CodeGen/X86/GlobalISel/add-scalar.ll @@ -20,6 +20,7 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) { ; X32-NEXT: addl 8(%ebp), %eax ; X32-NEXT: adcl 12(%ebp), %edx ; X32-NEXT: popl %ebp +; X32-NEXT: .cfi_def_cfa %esp, 4 ; X32-NEXT: retl %ret = add i64 %arg1, %arg2 ret i64 %ret diff --git a/test/CodeGen/X86/GlobalISel/brcond.ll b/test/CodeGen/X86/GlobalISel/brcond.ll index 917ee6f5bd8c..2467344776e2 100644 --- a/test/CodeGen/X86/GlobalISel/brcond.ll +++ b/test/CodeGen/X86/GlobalISel/brcond.ll @@ -36,6 +36,7 @@ define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) { ; X32-NEXT: movl %eax, (%esp) ; X32-NEXT: movl (%esp), %eax ; X32-NEXT: popl %ecx +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: %retval = alloca i32, align 4 diff --git a/test/CodeGen/X86/GlobalISel/callingconv.ll b/test/CodeGen/X86/GlobalISel/callingconv.ll index 4100a7217ac3..23987a3c365d 100644 --- a/test/CodeGen/X86/GlobalISel/callingconv.ll +++ b/test/CodeGen/X86/GlobalISel/callingconv.ll @@ -117,6 +117,7 @@ define <8 x i32> @test_v8i32_args(<8 x i32> %arg1, <8 x i32> %arg2) { ; X32-NEXT: movups 16(%esp), %xmm1 ; X32-NEXT: movaps %xmm2, %xmm0 ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_v8i32_args: @@ -135,6 +136,7 @@ define void @test_trivial_call() { ; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: calll trivial_callee ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_trivial_call: @@ -143,6 +145,7 @@ define void @test_trivial_call() { ; X64-NEXT: .cfi_def_cfa_offset 16 ; X64-NEXT: callq trivial_callee ; X64-NEXT: popq %rax +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void @trivial_callee() ret void @@ -160,6 +163,7 @@ define void @test_simple_arg_call(i32 %in0, i32 %in1) { ; X32-NEXT: movl %eax, 4(%esp) ; X32-NEXT: calll simple_arg_callee ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_simple_arg_call: @@ -171,6 +175,7 @@ define void @test_simple_arg_call(i32 %in0, i32 %in1) { ; X64-NEXT: movl %eax, %esi ; X64-NEXT: callq simple_arg_callee ; X64-NEXT: popq %rax +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void @simple_arg_callee(i32 %in1, i32 %in0) ret void @@ -193,6 +198,7 @@ define void @test_simple_arg8_call(i32 %in0) { ; X32-NEXT: movl %eax, 28(%esp) ; X32-NEXT: calll simple_arg8_callee ; X32-NEXT: addl $44, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_simple_arg8_call: @@ -208,6 +214,7 @@ define void @test_simple_arg8_call(i32 %in0) { ; X64-NEXT: movl %edi, %r9d ; X64-NEXT: callq simple_arg8_callee ; X64-NEXT: addq $24, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0) ret void @@ -224,6 +231,7 @@ define i32 @test_simple_return_callee() { ; X32-NEXT: calll simple_return_callee ; X32-NEXT: addl %eax, %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_simple_return_callee: @@ -234,6 +242,7 @@ define i32 @test_simple_return_callee() { ; X64-NEXT: callq simple_return_callee ; X64-NEXT: addl %eax, %eax ; X64-NEXT: popq %rcx +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %call = call i32 @simple_return_callee(i32 5) %r = add i32 %call, %call @@ -254,6 +263,7 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) { ; X32-NEXT: paddd (%esp), %xmm0 # 16-byte Folded Reload ; X32-NEXT: paddd 16(%esp), %xmm1 # 16-byte Folded Reload ; X32-NEXT: addl $44, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_split_return_callee: @@ -268,6 +278,7 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) { ; X64-NEXT: paddd (%rsp), %xmm0 # 16-byte Folded Reload ; X64-NEXT: paddd 16(%rsp), %xmm1 # 16-byte Folded Reload ; X64-NEXT: addq $40, %rsp +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %call = call <8 x i32> @split_return_callee(<8 x i32> %arg2) %r = add <8 x i32> %arg1, %call @@ -281,6 +292,7 @@ define void @test_indirect_call(void()* %func) { ; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: calll *16(%esp) ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_indirect_call: @@ -289,6 +301,7 @@ define void @test_indirect_call(void()* %func) { ; X64-NEXT: .cfi_def_cfa_offset 16 ; X64-NEXT: callq *%rdi ; X64-NEXT: popq %rax +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void %func() ret void @@ -317,8 +330,11 @@ define void @test_abi_exts_call(i8* %addr) { ; X32-NEXT: movl %esi, (%esp) ; X32-NEXT: calll take_char ; X32-NEXT: addl $4, %esp +; X32-NEXT: .cfi_def_cfa_offset 12 ; X32-NEXT: popl %esi +; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_abi_exts_call: @@ -335,6 +351,7 @@ define void @test_abi_exts_call(i8* %addr) { ; X64-NEXT: movl %ebx, %edi ; X64-NEXT: callq take_char ; X64-NEXT: popq %rbx +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %val = load i8, i8* %addr call void @take_char(i8 %val) @@ -357,6 +374,7 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) { ; X32-NEXT: movl %ecx, 4(%esp) ; X32-NEXT: calll variadic_callee ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_variadic_call_1: @@ -368,6 +386,7 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) { ; X64-NEXT: movb $0, %al ; X64-NEXT: callq variadic_callee ; X64-NEXT: popq %rax +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %addr = load i8*, i8** %addr_ptr @@ -393,6 +412,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) { ; X32-NEXT: movl %ecx, 4(%eax) ; X32-NEXT: calll variadic_callee ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_variadic_call_2: @@ -405,6 +425,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) { ; X64-NEXT: movq %rcx, %xmm0 ; X64-NEXT: callq variadic_callee ; X64-NEXT: popq %rax +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %addr = load i8*, i8** %addr_ptr diff --git a/test/CodeGen/X86/GlobalISel/frameIndex.ll b/test/CodeGen/X86/GlobalISel/frameIndex.ll index 7b2a050f1534..f260d0d707f6 100644 --- a/test/CodeGen/X86/GlobalISel/frameIndex.ll +++ b/test/CodeGen/X86/GlobalISel/frameIndex.ll @@ -18,6 +18,7 @@ define i32* @allocai32() { ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movl %esp, %eax ; X32-NEXT: popl %ecx +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X32ABI-LABEL: allocai32: diff --git a/test/CodeGen/X86/O0-pipeline.ll b/test/CodeGen/X86/O0-pipeline.ll index 1f7415ee2af6..8ecafad8022e 100644 --- a/test/CodeGen/X86/O0-pipeline.ll +++ b/test/CodeGen/X86/O0-pipeline.ll @@ -49,6 +49,7 @@ ; CHECK-NEXT: X86 pseudo instruction expansion pass ; CHECK-NEXT: Analyze Machine Code For Garbage Collection ; CHECK-NEXT: X86 vzeroupper inserter +; CHECK-NEXT: Check CFA info and insert CFI instructions if needed ; CHECK-NEXT: Contiguously Lay Out Funclets ; CHECK-NEXT: StackMap Liveness Analysis ; CHECK-NEXT: Live DEBUG_VALUE analysis diff --git a/test/CodeGen/X86/TruncAssertZext.ll b/test/CodeGen/X86/TruncAssertZext.ll index b9ae57ca0110..ed98fd51cc02 100644 --- a/test/CodeGen/X86/TruncAssertZext.ll +++ b/test/CodeGen/X86/TruncAssertZext.ll @@ -25,6 +25,7 @@ define i64 @main() { ; CHECK-NEXT: subq %rcx, %rax ; CHECK-NEXT: shrq $32, %rax ; CHECK-NEXT: popq %rcx +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %b = call i64 @foo() %or = and i64 %b, 18446744069414584575 ; this is 0xffffffff000000ff diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll index b75bd8cc3ee0..909e83986805 100644 --- a/test/CodeGen/X86/avx512-mask-op.ll +++ b/test/CodeGen/X86/avx512-mask-op.ll @@ -699,11 +699,13 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) { ; AVX512BW-NEXT: jg LBB17_1 ; AVX512BW-NEXT: ## BB#2: ; AVX512BW-NEXT: vpcmpltud %zmm2, %zmm1, %k0 -; AVX512BW-NEXT: jmp LBB17_3 +; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 +; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq ; AVX512BW-NEXT: LBB17_1: -; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0 -; AVX512BW-NEXT: LBB17_3: -; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 +; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0 +; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 ; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq diff --git a/test/CodeGen/X86/avx512-regcall-Mask.ll b/test/CodeGen/X86/avx512-regcall-Mask.ll index bb541f46567f..fa6adec675f8 100644 --- a/test/CodeGen/X86/avx512-regcall-Mask.ll +++ b/test/CodeGen/X86/avx512-regcall-Mask.ll @@ -209,12 +209,18 @@ define i64 @caller_argv64i1() #0 { ; LINUXOSX64-NEXT: pushq %rax ; LINUXOSX64-NEXT: .cfi_adjust_cfa_offset 8 ; LINUXOSX64-NEXT: callq test_argv64i1 -; LINUXOSX64-NEXT: addq $24, %rsp +; LINUXOSX64-NEXT: addq $16, %rsp ; LINUXOSX64-NEXT: .cfi_adjust_cfa_offset -16 +; LINUXOSX64-NEXT: addq $8, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 40 ; LINUXOSX64-NEXT: popq %r12 +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 32 ; LINUXOSX64-NEXT: popq %r13 +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 24 ; LINUXOSX64-NEXT: popq %r14 +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %r15 +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i64 4294967298 to <64 x i1> @@ -287,6 +293,7 @@ define <64 x i1> @caller_retv64i1() #0 { ; LINUXOSX64-NEXT: kmovq %rax, %k0 ; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm0 ; LINUXOSX64-NEXT: popq %rax +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <64 x i1> @test_retv64i1() @@ -397,7 +404,9 @@ define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1> ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm15 # 16-byte Reload ; LINUXOSX64-NEXT: addq $128, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: vzeroupper ; LINUXOSX64-NEXT: retq entry: @@ -451,6 +460,7 @@ define i32 @caller_argv32i1() #0 { ; LINUXOSX64-NEXT: movl $1, %edx ; LINUXOSX64-NEXT: callq test_argv32i1 ; LINUXOSX64-NEXT: popq %rcx +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i32 1 to <32 x i1> @@ -513,6 +523,7 @@ define i32 @caller_retv32i1() #0 { ; LINUXOSX64-NEXT: callq test_retv32i1 ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: popq %rcx +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <32 x i1> @test_retv32i1() @@ -626,7 +637,9 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm15 # 16-byte Reload ; LINUXOSX64-NEXT: addq $128, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %res = call i16 @test_argv16i1helper(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> %x2) ret i16 %res @@ -678,6 +691,7 @@ define i16 @caller_argv16i1() #0 { ; LINUXOSX64-NEXT: movl $1, %edx ; LINUXOSX64-NEXT: callq test_argv16i1 ; LINUXOSX64-NEXT: popq %rcx +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i16 1 to <16 x i1> @@ -746,6 +760,7 @@ define i16 @caller_retv16i1() #0 { ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> ; LINUXOSX64-NEXT: popq %rcx +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <16 x i1> @test_retv16i1() @@ -859,7 +874,9 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2) ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm15 # 16-byte Reload ; LINUXOSX64-NEXT: addq $128, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %res = call i8 @test_argv8i1helper(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2) ret i8 %res @@ -911,6 +928,7 @@ define i8 @caller_argv8i1() #0 { ; LINUXOSX64-NEXT: movl $1, %edx ; LINUXOSX64-NEXT: callq test_argv8i1 ; LINUXOSX64-NEXT: popq %rcx +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i8 1 to <8 x i1> @@ -984,9 +1002,11 @@ define <8 x i1> @caller_retv8i1() #0 { ; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm0 ; LINUXOSX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> ; LINUXOSX64-NEXT: popq %rax +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: vzeroupper ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <8 x i1> @test_retv8i1() ret <8 x i1> %call } + diff --git a/test/CodeGen/X86/avx512-regcall-NoMask.ll b/test/CodeGen/X86/avx512-regcall-NoMask.ll index 43a1871245ba..b4f1d2c776d9 100644 --- a/test/CodeGen/X86/avx512-regcall-NoMask.ll +++ b/test/CodeGen/X86/avx512-regcall-NoMask.ll @@ -63,6 +63,7 @@ define x86_regcallcc i1 @test_CallargReti1(i1 %a) { ; LINUXOSX64-NEXT: callq test_argReti1 ; LINUXOSX64-NEXT: incb %al ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i1 %a, 1 %c = call x86_regcallcc i1 @test_argReti1(i1 %b) @@ -130,6 +131,7 @@ define x86_regcallcc i8 @test_CallargReti8(i8 %a) { ; LINUXOSX64-NEXT: callq test_argReti8 ; LINUXOSX64-NEXT: incb %al ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i8 %a, 1 %c = call x86_regcallcc i8 @test_argReti8(i8 %b) @@ -200,6 +202,7 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) { ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i16 %a, 1 %c = call x86_regcallcc i16 @test_argReti16(i16 %b) @@ -261,6 +264,7 @@ define x86_regcallcc i32 @test_CallargReti32(i32 %a) { ; LINUXOSX64-NEXT: callq test_argReti32 ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i32 %a, 1 %c = call x86_regcallcc i32 @test_argReti32(i32 %b) @@ -327,6 +331,7 @@ define x86_regcallcc i64 @test_CallargReti64(i64 %a) { ; LINUXOSX64-NEXT: callq test_argReti64 ; LINUXOSX64-NEXT: incq %rax ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i64 %a, 1 %c = call x86_regcallcc i64 @test_argReti64(i64 %b) @@ -406,7 +411,9 @@ define x86_regcallcc float @test_CallargRetFloat(float %a) { ; LINUXOSX64-NEXT: vaddss %xmm8, %xmm0, %xmm0 ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload ; LINUXOSX64-NEXT: addq $16, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = fadd float 1.0, %a %c = call x86_regcallcc float @test_argRetFloat(float %b) @@ -486,7 +493,9 @@ define x86_regcallcc double @test_CallargRetDouble(double %a) { ; LINUXOSX64-NEXT: vaddsd %xmm8, %xmm0, %xmm0 ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload ; LINUXOSX64-NEXT: addq $16, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = fadd double 1.0, %a %c = call x86_regcallcc double @test_argRetDouble(double %b) @@ -548,6 +557,7 @@ define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a) { ; LINUXOSX64-NEXT: callq test_argRetf80 ; LINUXOSX64-NEXT: fadd %st(0), %st(0) ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = fadd x86_fp80 %a, %a %c = call x86_regcallcc x86_fp80 @test_argRetf80(x86_fp80 %b) @@ -611,6 +621,7 @@ define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) { ; LINUXOSX64-NEXT: callq test_argRetPointer ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = ptrtoint [4 x i32]* %a to i32 %c = add i32 %b, 1 @@ -694,7 +705,9 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) { ; LINUXOSX64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1} ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload ; LINUXOSX64-NEXT: addq $16, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = call x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i32> %a, <4 x i32> %a) %c = select <4 x i1> undef , <4 x i32> %a, <4 x i32> %b @@ -768,7 +781,9 @@ define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) { ; LINUXOSX64-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload ; LINUXOSX64-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1} ; LINUXOSX64-NEXT: addq $48, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = call x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i32> %a, <8 x i32> %a) %c = select <8 x i1> undef , <8 x i32> %a, <8 x i32> %b @@ -842,7 +857,9 @@ define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i32> %a) { ; LINUXOSX64-NEXT: vmovdqu64 (%rsp), %zmm1 # 64-byte Reload ; LINUXOSX64-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} ; LINUXOSX64-NEXT: addq $112, %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp +; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = call x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i32> %a, <16 x i32> %a) %c = select <16 x i1> undef , <16 x i32> %a, <16 x i32> %b diff --git a/test/CodeGen/X86/avx512-schedule.ll b/test/CodeGen/X86/avx512-schedule.ll index 8372fbdb9aba..abc8c1a7513e 100755 --- a/test/CodeGen/X86/avx512-schedule.ll +++ b/test/CodeGen/X86/avx512-schedule.ll @@ -8839,6 +8839,7 @@ define <16 x float> @broadcast_ss_spill(float %x) { ; GENERIC-NEXT: callq func_f32 ; GENERIC-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload ; GENERIC-NEXT: addq $24, %rsp # sched: [1:0.33] +; GENERIC-NEXT: .cfi_def_cfa_offset 8 ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: broadcast_ss_spill: @@ -8852,6 +8853,7 @@ define <16 x float> @broadcast_ss_spill(float %x) { ; SKX-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload sched: [8:0.50] ; SKX-NEXT: # sched: [8:0.50] ; SKX-NEXT: addq $24, %rsp # sched: [1:0.25] +; SKX-NEXT: .cfi_def_cfa_offset 8 ; SKX-NEXT: retq # sched: [7:1.00] %a = fadd float %x, %x call void @func_f32(float %a) @@ -8872,6 +8874,7 @@ define <8 x double> @broadcast_sd_spill(double %x) { ; GENERIC-NEXT: callq func_f64 ; GENERIC-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload ; GENERIC-NEXT: addq $24, %rsp # sched: [1:0.33] +; GENERIC-NEXT: .cfi_def_cfa_offset 8 ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: broadcast_sd_spill: @@ -8885,6 +8888,7 @@ define <8 x double> @broadcast_sd_spill(double %x) { ; SKX-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload sched: [8:0.50] ; SKX-NEXT: # sched: [8:0.50] ; SKX-NEXT: addq $24, %rsp # sched: [1:0.25] +; SKX-NEXT: .cfi_def_cfa_offset 8 ; SKX-NEXT: retq # sched: [7:1.00] %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll index 43cf9ee73582..51a7c685ed4a 100644 --- a/test/CodeGen/X86/avx512-select.ll +++ b/test/CodeGen/X86/avx512-select.ll @@ -115,6 +115,7 @@ define <16 x double> @select04(<16 x double> %a, <16 x double> %b) { ; X86-NEXT: vmovaps 8(%ebp), %zmm1 ; X86-NEXT: movl %ebp, %esp ; X86-NEXT: popl %ebp +; X86-NEXT: .cfi_def_cfa %esp, 4 ; X86-NEXT: retl ; ; X64-LABEL: select04: diff --git a/test/CodeGen/X86/avx512-vbroadcast.ll b/test/CodeGen/X86/avx512-vbroadcast.ll index 584968f1c6ef..9aacb23fbd5b 100644 --- a/test/CodeGen/X86/avx512-vbroadcast.ll +++ b/test/CodeGen/X86/avx512-vbroadcast.ll @@ -413,6 +413,7 @@ define <16 x float> @broadcast_ss_spill(float %x) { ; ALL-NEXT: callq func_f32 ; ALL-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload ; ALL-NEXT: addq $24, %rsp +; ALL-NEXT: .cfi_def_cfa_offset 8 ; ALL-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -432,6 +433,7 @@ define <8 x double> @broadcast_sd_spill(double %x) { ; ALL-NEXT: callq func_f64 ; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload ; ALL-NEXT: addq $24, %rsp +; ALL-NEXT: .cfi_def_cfa_offset 8 ; ALL-NEXT: retq %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll index d1bf8fd5f3f7..7f170cd51bf9 100644 --- a/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll @@ -717,6 +717,7 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext ; X32-NEXT: vpbroadcastb %eax, %zmm3 {%k1} ; X32-NEXT: vmovdqa64 %zmm3, %zmm0 ; X32-NEXT: popl %ebx +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_mask_set1_epi8: @@ -1444,6 +1445,7 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) { ; X32-NEXT: korq %k0, %k1, %k1 ; X32-NEXT: vpbroadcastb %eax, %zmm0 {%k1} {z} ; X32-NEXT: popl %ebx +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_maskz_set1_epi8: diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll index a5ef1809157b..87565ac129b9 100644 --- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -355,6 +355,7 @@ define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1) @@ -380,6 +381,7 @@ define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask) @@ -445,6 +447,7 @@ define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1) @@ -470,6 +473,7 @@ define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask) @@ -1702,6 +1706,7 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $60, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1) @@ -2503,8 +2508,11 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) { ; AVX512F-32-NEXT: addl %esi, %eax ; AVX512F-32-NEXT: adcl %ecx, %edx ; AVX512F-32-NEXT: addl $60, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: popl %esi +; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: popl %ebx +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask) @@ -2586,6 +2594,7 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $60, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1) @@ -3387,8 +3396,11 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m ; AVX512F-32-NEXT: addl %esi, %eax ; AVX512F-32-NEXT: adcl %ecx, %edx ; AVX512F-32-NEXT: addl $60, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: popl %esi +; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: popl %ebx +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask) diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll index e23deebd15b8..c2620642e5ce 100644 --- a/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -1499,6 +1499,7 @@ define i64@test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.kunpck.dq(i64 %x0, i64 %x1) ret i64 %res @@ -1522,6 +1523,7 @@ define i64@test_int_x86_avx512_cvtb2mask_512(<64 x i8> %x0) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.cvtb2mask.512(<64 x i8> %x0) ret i64 %res @@ -1712,6 +1714,7 @@ define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $20, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) %res1 = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1) @@ -1776,6 +1779,7 @@ define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 % ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $20, %esp +; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) %res1 = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1) diff --git a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll index f5578d6cc885..3f4a696af0cb 100644 --- a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll @@ -233,6 +233,7 @@ define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastd %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastd_epi32: @@ -265,6 +266,7 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastd_epi32: @@ -369,6 +371,7 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastq_epi64: @@ -398,6 +401,7 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastq_epi64: @@ -441,6 +445,7 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm1, %ymm0 {%k1} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_broadcastq_epi64: @@ -470,6 +475,7 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_broadcastq_epi64: @@ -513,6 +519,7 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastsd_pd: @@ -542,6 +549,7 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastsd_pd: @@ -585,6 +593,7 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2 ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_broadcastsd_pd: @@ -614,6 +623,7 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_broadcastsd_pd: @@ -657,6 +667,7 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastss %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastss_ps: @@ -686,6 +697,7 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastss_ps: @@ -781,6 +793,7 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_movddup_pd: @@ -810,6 +823,7 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_movddup_pd: @@ -853,6 +867,7 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = ymm1[0,0,2,2] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_movddup_pd: @@ -882,6 +897,7 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_movddup_pd: @@ -925,6 +941,7 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = xmm1[1,1,3,3] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_movehdup_ps: @@ -954,6 +971,7 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_movehdup_ps: @@ -1049,6 +1067,7 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = xmm1[0,0,2,2] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_moveldup_ps: @@ -1078,6 +1097,7 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_moveldup_ps: @@ -1173,6 +1193,7 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64 ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_permutex_epi64: @@ -1202,6 +1223,7 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_permutex_epi64: @@ -1245,6 +1267,7 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_permutex_pd: @@ -1274,6 +1297,7 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_permutex_pd: @@ -1317,6 +1341,7 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} = xmm1[1],xmm2[1] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_shuffle_pd: @@ -1346,6 +1371,7 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shuffle_pd: @@ -1389,6 +1415,7 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} = ymm1[1],ymm2[1],ymm1[2],ymm2[2] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shuffle_pd: @@ -1418,6 +1445,7 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[2],ymm1[2] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shuffle_pd: @@ -1461,6 +1489,7 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} = xmm1[0,1],xmm2[0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_shuffle_ps: @@ -1490,6 +1519,7 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1],xmm1[0,0] ; X32-NEXT: popl %eax +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shuffle_ps: diff --git a/test/CodeGen/X86/avx512vl-vbroadcast.ll b/test/CodeGen/X86/avx512vl-vbroadcast.ll index 9fc957297e24..1098e7bffe0c 100644 --- a/test/CodeGen/X86/avx512vl-vbroadcast.ll +++ b/test/CodeGen/X86/avx512vl-vbroadcast.ll @@ -12,6 +12,7 @@ define <8 x float> @_256_broadcast_ss_spill(float %x) { ; CHECK-NEXT: callq func_f32 ; CHECK-NEXT: vbroadcastss (%rsp), %ymm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -30,6 +31,7 @@ define <4 x float> @_128_broadcast_ss_spill(float %x) { ; CHECK-NEXT: callq func_f32 ; CHECK-NEXT: vbroadcastss (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -49,6 +51,7 @@ define <4 x double> @_256_broadcast_sd_spill(double %x) { ; CHECK-NEXT: callq func_f64 ; CHECK-NEXT: vbroadcastsd (%rsp), %ymm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll index 26a7d83a027a..bccf953fb0be 100644 --- a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll +++ b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll @@ -109,6 +109,7 @@ define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -227,6 +228,7 @@ define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -348,6 +350,7 @@ define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -470,6 +473,7 @@ define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -597,6 +601,7 @@ define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -720,6 +725,7 @@ define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -846,6 +852,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -973,6 +980,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1024,6 +1032,7 @@ define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1071,6 +1080,7 @@ define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1129,6 +1139,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1188,6 +1199,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1384,6 +1396,7 @@ define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1457,6 +1470,7 @@ define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1533,6 +1547,7 @@ define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1610,6 +1625,7 @@ define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1692,6 +1708,7 @@ define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1770,6 +1787,7 @@ define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1851,6 +1869,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1933,6 +1952,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2056,6 +2076,7 @@ define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2175,6 +2196,7 @@ define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2297,6 +2319,7 @@ define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2420,6 +2443,7 @@ define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2548,6 +2572,7 @@ define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2672,6 +2697,7 @@ define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2799,6 +2825,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2927,6 +2954,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3280,6 +3308,7 @@ define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3544,6 +3573,7 @@ define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3904,6 +3934,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4180,6 +4211,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5043,6 +5075,7 @@ define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5084,6 +5117,7 @@ define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5145,6 +5179,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5208,6 +5243,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5255,6 +5291,7 @@ define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5318,6 +5355,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5371,6 +5409,7 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5418,6 +5457,7 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5485,6 +5525,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5554,6 +5595,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5607,6 +5649,7 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5676,6 +5719,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5949,6 +5993,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6022,6 +6067,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6098,6 +6144,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6175,6 +6222,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6252,6 +6300,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6329,6 +6378,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6412,6 +6462,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6490,6 +6541,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6571,6 +6623,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6653,6 +6706,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6735,6 +6789,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6817,6 +6872,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6938,6 +6994,7 @@ define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7054,6 +7111,7 @@ define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7173,6 +7231,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7293,6 +7352,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7413,6 +7473,7 @@ define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7533,6 +7594,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7659,6 +7721,7 @@ define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7780,6 +7843,7 @@ define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7904,6 +7968,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8029,6 +8094,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8154,6 +8220,7 @@ define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8279,6 +8346,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9123,6 +9191,7 @@ define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9164,6 +9233,7 @@ define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9217,6 +9287,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9272,6 +9343,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9319,6 +9391,7 @@ define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9374,6 +9447,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9427,6 +9501,7 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9474,6 +9549,7 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9533,6 +9609,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9594,6 +9671,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9647,6 +9725,7 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9708,6 +9787,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10599,6 +10679,7 @@ define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10642,6 +10723,7 @@ define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10705,6 +10787,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10770,6 +10853,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10819,6 +10903,7 @@ define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10884,6 +10969,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10939,6 +11025,7 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10988,6 +11075,7 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11057,6 +11145,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11128,6 +11217,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11183,6 +11273,7 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11254,6 +11345,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11501,6 +11593,7 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11572,6 +11665,7 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11646,6 +11740,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11721,6 +11816,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11796,6 +11892,7 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11871,6 +11968,7 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11952,6 +12050,7 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12028,6 +12127,7 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12107,6 +12207,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12187,6 +12288,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12267,6 +12369,7 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12347,6 +12450,7 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12470,6 +12574,7 @@ define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12588,6 +12693,7 @@ define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12709,6 +12815,7 @@ define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12831,6 +12938,7 @@ define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12958,6 +13066,7 @@ define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13081,6 +13190,7 @@ define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13207,6 +13317,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13334,6 +13445,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13385,6 +13497,7 @@ define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13432,6 +13545,7 @@ define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13490,6 +13604,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13549,6 +13664,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13745,6 +13861,7 @@ define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13818,6 +13935,7 @@ define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13894,6 +14012,7 @@ define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13971,6 +14090,7 @@ define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14053,6 +14173,7 @@ define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14131,6 +14252,7 @@ define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14212,6 +14334,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14294,6 +14417,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14417,6 +14541,7 @@ define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14536,6 +14661,7 @@ define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14658,6 +14784,7 @@ define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14781,6 +14908,7 @@ define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14909,6 +15037,7 @@ define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15033,6 +15162,7 @@ define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15160,6 +15290,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15288,6 +15419,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15641,6 +15773,7 @@ define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15905,6 +16038,7 @@ define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16265,6 +16399,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16541,6 +16676,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17404,6 +17540,7 @@ define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17445,6 +17582,7 @@ define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17506,6 +17644,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17569,6 +17708,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17616,6 +17756,7 @@ define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17679,6 +17820,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17732,6 +17874,7 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17779,6 +17922,7 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17846,6 +17990,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17915,6 +18060,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17968,6 +18114,7 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18037,6 +18184,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18310,6 +18458,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18383,6 +18532,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18459,6 +18609,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18536,6 +18687,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18613,6 +18765,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18690,6 +18843,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18773,6 +18927,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18851,6 +19006,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18932,6 +19088,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19014,6 +19171,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19096,6 +19254,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19178,6 +19337,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19299,6 +19459,7 @@ define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19415,6 +19576,7 @@ define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19534,6 +19696,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19654,6 +19817,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19774,6 +19938,7 @@ define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19894,6 +20059,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20020,6 +20186,7 @@ define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20141,6 +20308,7 @@ define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20265,6 +20433,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20390,6 +20559,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20515,6 +20685,7 @@ define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20640,6 +20811,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21484,6 +21656,7 @@ define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21525,6 +21698,7 @@ define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21578,6 +21752,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21633,6 +21808,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21680,6 +21856,7 @@ define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21735,6 +21912,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21788,6 +21966,7 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21835,6 +22014,7 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21894,6 +22074,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21955,6 +22136,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22008,6 +22190,7 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22069,6 +22252,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22960,6 +23144,7 @@ define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23003,6 +23188,7 @@ define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23066,6 +23252,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23131,6 +23318,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23180,6 +23368,7 @@ define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23245,6 +23434,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23300,6 +23490,7 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23349,6 +23540,7 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23418,6 +23610,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23489,6 +23682,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23544,6 +23738,7 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23615,6 +23810,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23862,6 +24058,7 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23933,6 +24130,7 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24007,6 +24205,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24082,6 +24281,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24157,6 +24357,7 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24232,6 +24433,7 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24313,6 +24515,7 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24389,6 +24592,7 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24468,6 +24672,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24548,6 +24753,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24628,6 +24834,7 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24708,6 +24915,7 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24833,6 +25041,7 @@ define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24954,6 +25163,7 @@ define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25077,6 +25287,7 @@ define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25202,6 +25413,7 @@ define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25331,6 +25543,7 @@ define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25457,6 +25670,7 @@ define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25585,6 +25799,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25715,6 +25930,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25768,6 +25984,7 @@ define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25818,6 +26035,7 @@ define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25878,6 +26096,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25940,6 +26159,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26148,6 +26368,7 @@ define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26224,6 +26445,7 @@ define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26302,6 +26524,7 @@ define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26382,6 +26605,7 @@ define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26466,6 +26690,7 @@ define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26547,6 +26772,7 @@ define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26630,6 +26856,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26715,6 +26942,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26840,6 +27068,7 @@ define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26962,6 +27191,7 @@ define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27086,6 +27316,7 @@ define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27212,6 +27443,7 @@ define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27342,6 +27574,7 @@ define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27469,6 +27702,7 @@ define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27598,6 +27832,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27729,6 +27964,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -28085,6 +28321,7 @@ define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -28354,6 +28591,7 @@ define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -28717,6 +28955,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -28998,6 +29237,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -29879,6 +30119,7 @@ define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -29923,6 +30164,7 @@ define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -29984,6 +30226,7 @@ define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30048,6 +30291,7 @@ define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30097,6 +30341,7 @@ define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30160,6 +30405,7 @@ define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30215,6 +30461,7 @@ define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30265,6 +30512,7 @@ define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30332,6 +30580,7 @@ define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30402,6 +30651,7 @@ define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30457,6 +30707,7 @@ define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30526,6 +30777,7 @@ define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30799,6 +31051,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30872,6 +31125,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30948,6 +31202,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31025,6 +31280,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31102,6 +31358,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31179,6 +31436,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31262,6 +31520,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31340,6 +31599,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31421,6 +31681,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31503,6 +31764,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31585,6 +31847,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31667,6 +31930,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31788,6 +32052,7 @@ define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31904,6 +32169,7 @@ define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32023,6 +32289,7 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32143,6 +32410,7 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32263,6 +32531,7 @@ define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32383,6 +32652,7 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32509,6 +32779,7 @@ define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32630,6 +32901,7 @@ define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32754,6 +33026,7 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32879,6 +33152,7 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -33004,6 +33278,7 @@ define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -33129,6 +33404,7 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -33999,6 +34275,7 @@ define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34043,6 +34320,7 @@ define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34096,6 +34374,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34152,6 +34431,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34201,6 +34481,7 @@ define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34256,6 +34537,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34311,6 +34593,7 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34361,6 +34644,7 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34420,6 +34704,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34482,6 +34767,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34537,6 +34823,7 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34598,6 +34885,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35519,6 +35807,7 @@ define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35565,6 +35854,7 @@ define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35630,6 +35920,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35698,6 +35989,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35749,6 +36041,7 @@ define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35816,6 +36109,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35873,6 +36167,7 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35925,6 +36220,7 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35996,6 +36292,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36070,6 +36367,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36127,6 +36425,7 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36200,6 +36499,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36447,6 +36747,7 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36518,6 +36819,7 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36592,6 +36894,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36667,6 +36970,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36742,6 +37046,7 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36817,6 +37122,7 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36898,6 +37204,7 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36974,6 +37281,7 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37053,6 +37361,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37133,6 +37442,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37213,6 +37523,7 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37293,6 +37604,7 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37419,6 +37731,7 @@ define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37540,6 +37853,7 @@ define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37664,6 +37978,7 @@ define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37789,6 +38104,7 @@ define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37919,6 +38235,7 @@ define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38045,6 +38362,7 @@ define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38174,6 +38492,7 @@ define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38304,6 +38623,7 @@ define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38358,6 +38678,7 @@ define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38408,6 +38729,7 @@ define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38469,6 +38791,7 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38531,6 +38854,7 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38742,6 +39066,7 @@ define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38818,6 +39143,7 @@ define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38897,6 +39223,7 @@ define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38977,6 +39304,7 @@ define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39062,6 +39390,7 @@ define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39143,6 +39472,7 @@ define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39227,6 +39557,7 @@ define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39312,6 +39643,7 @@ define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39438,6 +39770,7 @@ define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39560,6 +39893,7 @@ define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39685,6 +40019,7 @@ define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39811,6 +40146,7 @@ define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39942,6 +40278,7 @@ define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40069,6 +40406,7 @@ define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40199,6 +40537,7 @@ define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40330,6 +40669,7 @@ define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40688,6 +41028,7 @@ define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40957,6 +41298,7 @@ define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -41322,6 +41664,7 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -41603,6 +41946,7 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42505,6 +42849,7 @@ define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42549,6 +42894,7 @@ define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42613,6 +42959,7 @@ define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42679,6 +43026,7 @@ define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42729,6 +43077,7 @@ define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42795,6 +43144,7 @@ define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42851,6 +43201,7 @@ define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42901,6 +43252,7 @@ define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42971,6 +43323,7 @@ define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43043,6 +43396,7 @@ define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43099,6 +43453,7 @@ define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43171,6 +43526,7 @@ define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43444,6 +43800,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43517,6 +43874,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43593,6 +43951,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43670,6 +44029,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43747,6 +44107,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43824,6 +44185,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43907,6 +44269,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43985,6 +44348,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44066,6 +44430,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44148,6 +44513,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44230,6 +44596,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44312,6 +44679,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44433,6 +44801,7 @@ define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44549,6 +44918,7 @@ define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44668,6 +45038,7 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44788,6 +45159,7 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44908,6 +45280,7 @@ define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45028,6 +45401,7 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45154,6 +45528,7 @@ define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45275,6 +45650,7 @@ define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45399,6 +45775,7 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45524,6 +45901,7 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45649,6 +46027,7 @@ define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45774,6 +46153,7 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46675,6 +47055,7 @@ define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46719,6 +47100,7 @@ define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46775,6 +47157,7 @@ define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46833,6 +47216,7 @@ define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46883,6 +47267,7 @@ define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46941,6 +47326,7 @@ define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46997,6 +47383,7 @@ define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47047,6 +47434,7 @@ define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47109,6 +47497,7 @@ define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47173,6 +47562,7 @@ define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47229,6 +47619,7 @@ define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47293,6 +47684,7 @@ define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48223,6 +48615,7 @@ define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48269,6 +48662,7 @@ define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48335,6 +48729,7 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48403,6 +48798,7 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48455,6 +48851,7 @@ define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48523,6 +48920,7 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48581,6 +48979,7 @@ define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48633,6 +49032,7 @@ define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48705,6 +49105,7 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48779,6 +49180,7 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48837,6 +49239,7 @@ define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48911,6 +49314,7 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49158,6 +49562,7 @@ define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49229,6 +49634,7 @@ define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49303,6 +49709,7 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49378,6 +49785,7 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49453,6 +49861,7 @@ define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49528,6 +49937,7 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49609,6 +50019,7 @@ define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49685,6 +50096,7 @@ define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49764,6 +50176,7 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49844,6 +50257,7 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49924,6 +50338,7 @@ define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50004,6 +50419,7 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50797,6 +51213,7 @@ define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50838,6 +51255,7 @@ define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50881,6 +51299,7 @@ define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, float* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50932,6 +51351,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask(i4 zeroext %__u, <2 x ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50983,6 +51403,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51036,6 +51457,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51089,6 +51511,7 @@ define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51136,6 +51559,7 @@ define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51185,6 +51609,7 @@ define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, float* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51242,6 +51667,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask(i4 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51299,6 +51725,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51358,6 +51785,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51631,6 +52059,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51704,6 +52133,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51778,6 +52208,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, float* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51855,6 +52286,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51932,6 +52364,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52010,6 +52443,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52094,6 +52528,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52172,6 +52607,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52251,6 +52687,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, float* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52333,6 +52770,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52415,6 +52853,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52498,6 +52937,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52620,6 +53060,7 @@ define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52736,6 +53177,7 @@ define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52853,6 +53295,7 @@ define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, float* ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52973,6 +53416,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53093,6 +53537,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53214,6 +53659,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53382,6 +53828,7 @@ define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53503,6 +53950,7 @@ define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53625,6 +54073,7 @@ define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, float* ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53750,6 +54199,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53875,6 +54325,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54001,6 +54452,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54854,6 +55306,7 @@ define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54895,6 +55348,7 @@ define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54938,6 +55392,7 @@ define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, double* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54988,6 +55443,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask(i2 zeroext %__u, <2 x ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55038,6 +55494,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem(i2 zeroext %__u, < ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55090,6 +55547,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b(i2 zeroext %__u, ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55143,6 +55601,7 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55190,6 +55649,7 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55239,6 +55699,7 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, double* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55295,6 +55756,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask(i2 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55351,6 +55813,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem(i2 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55409,6 +55872,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b(i2 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56228,6 +56692,7 @@ define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56271,6 +56736,7 @@ define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56316,6 +56782,7 @@ define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, double* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56369,6 +56836,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask(i4 zeroext %__u, <4 x ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56422,6 +56890,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56477,6 +56946,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56532,6 +57002,7 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56581,6 +57052,7 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56632,6 +57104,7 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, double* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56691,6 +57164,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask(i4 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56750,6 +57224,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56811,6 +57286,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57114,6 +57590,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57185,6 +57662,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57257,6 +57735,7 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, double* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57332,6 +57811,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57407,6 +57887,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57483,6 +57964,7 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57615,6 +58097,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57691,6 +58174,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57768,6 +58252,7 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, double* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57848,6 +58333,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57928,6 +58414,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -58009,6 +58496,7 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp +; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: diff --git a/test/CodeGen/X86/bitcast-and-setcc-256.ll b/test/CodeGen/X86/bitcast-and-setcc-256.ll index e197713c6793..c48222000c6b 100644 --- a/test/CodeGen/X86/bitcast-and-setcc-256.ll +++ b/test/CodeGen/X86/bitcast-and-setcc-256.ll @@ -439,6 +439,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/test/CodeGen/X86/bitcast-and-setcc-512.ll b/test/CodeGen/X86/bitcast-and-setcc-512.ll index f6cfbbb40440..f5fe395eaf3d 100644 --- a/test/CodeGen/X86/bitcast-and-setcc-512.ll +++ b/test/CodeGen/X86/bitcast-and-setcc-512.ll @@ -594,6 +594,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1239,6 +1240,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { ; AVX1-NEXT: orq %rcx, %rax ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp +; AVX1-NEXT: .cfi_def_cfa %rsp, 8 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -1457,6 +1459,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { ; AVX2-NEXT: orq %rcx, %rax ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp +; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1499,6 +1502,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { ; AVX512F-NEXT: orq %rcx, %rax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll index 4ed55ac0919e..1959000b859f 100644 --- a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll +++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -321,11 +321,17 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) { ; AVX512-NEXT: vpinsrb $15, %r9d, %xmm0, %xmm0 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: popq %rbx +; AVX512-NEXT: .cfi_def_cfa_offset 48 ; AVX512-NEXT: popq %r12 +; AVX512-NEXT: .cfi_def_cfa_offset 40 ; AVX512-NEXT: popq %r13 +; AVX512-NEXT: .cfi_def_cfa_offset 32 ; AVX512-NEXT: popq %r14 +; AVX512-NEXT: .cfi_def_cfa_offset 24 ; AVX512-NEXT: popq %r15 +; AVX512-NEXT: .cfi_def_cfa_offset 16 ; AVX512-NEXT: popq %rbp +; AVX512-NEXT: .cfi_def_cfa_offset 8 ; AVX512-NEXT: retq %1 = bitcast i16 %a0 to <16 x i1> %2 = zext <16 x i1> %1 to <16 x i8> diff --git a/test/CodeGen/X86/bitcast-setcc-256.ll b/test/CodeGen/X86/bitcast-setcc-256.ll index ee2dac1d466e..76160517546c 100644 --- a/test/CodeGen/X86/bitcast-setcc-256.ll +++ b/test/CodeGen/X86/bitcast-setcc-256.ll @@ -204,6 +204,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/test/CodeGen/X86/bitcast-setcc-512.ll b/test/CodeGen/X86/bitcast-setcc-512.ll index 2b73c6e16bd0..ef981080bb35 100644 --- a/test/CodeGen/X86/bitcast-setcc-512.ll +++ b/test/CodeGen/X86/bitcast-setcc-512.ll @@ -203,6 +203,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -769,6 +770,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) { ; AVX1-NEXT: orq %rcx, %rax ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp +; AVX1-NEXT: .cfi_def_cfa %rsp, 8 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -983,6 +985,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) { ; AVX2-NEXT: orq %rcx, %rax ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp +; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1021,6 +1024,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-NEXT: orq %rcx, %rax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/test/CodeGen/X86/bool-vector.ll b/test/CodeGen/X86/bool-vector.ll index eb40744c54d1..692d992df76e 100644 --- a/test/CodeGen/X86/bool-vector.ll +++ b/test/CodeGen/X86/bool-vector.ll @@ -93,6 +93,7 @@ define i32 @PR15215_good(<4 x i32> %input) { ; X32-NEXT: leal (%eax,%edx,4), %eax ; X32-NEXT: leal (%eax,%esi,8), %eax ; X32-NEXT: popl %esi +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X32-SSE2-LABEL: PR15215_good: @@ -115,6 +116,7 @@ define i32 @PR15215_good(<4 x i32> %input) { ; X32-SSE2-NEXT: leal (%eax,%edx,4), %eax ; X32-SSE2-NEXT: leal (%eax,%esi,8), %eax ; X32-SSE2-NEXT: popl %esi +; X32-SSE2-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE2-NEXT: retl ; ; X32-AVX2-LABEL: PR15215_good: @@ -134,6 +136,7 @@ define i32 @PR15215_good(<4 x i32> %input) { ; X32-AVX2-NEXT: leal (%eax,%edx,4), %eax ; X32-AVX2-NEXT: leal (%eax,%esi,8), %eax ; X32-AVX2-NEXT: popl %esi +; X32-AVX2-NEXT: .cfi_def_cfa_offset 4 ; X32-AVX2-NEXT: retl ; ; X64-LABEL: PR15215_good: diff --git a/test/CodeGen/X86/cmp.ll b/test/CodeGen/X86/cmp.ll index 82e133d25767..6f9abae6a715 100644 --- a/test/CodeGen/X86/cmp.ll +++ b/test/CodeGen/X86/cmp.ll @@ -247,10 +247,13 @@ define i32 @test12() ssp uwtable { ; CHECK-NEXT: # BB#1: # %T ; CHECK-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00] ; CHECK-NEXT: popq %rcx # encoding: [0x59] +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq # encoding: [0xc3] ; CHECK-NEXT: .LBB12_2: # %F +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movl $2, %eax # encoding: [0xb8,0x02,0x00,0x00,0x00] ; CHECK-NEXT: popq %rcx # encoding: [0x59] +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq # encoding: [0xc3] entry: %tmp1 = call zeroext i1 @test12b() diff --git a/test/CodeGen/X86/emutls-pie.ll b/test/CodeGen/X86/emutls-pie.ll index 3c312a926695..f4561fcbd35a 100644 --- a/test/CodeGen/X86/emutls-pie.ll +++ b/test/CodeGen/X86/emutls-pie.ll @@ -18,13 +18,16 @@ define i32 @my_get_xyz() { ; X32-NEXT: calll my_emutls_get_address@PLT ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $8, %esp +; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: my_get_xyz: ; X64: movq my_emutls_v_xyz@GOTPCREL(%rip), %rdi ; X64-NEXT: callq my_emutls_get_address@PLT ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -44,13 +47,16 @@ define i32 @f1() { ; X32-NEXT: calll __emutls_get_address@PLT ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $8, %esp +; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f1: ; X64: leaq __emutls_v.i(%rip), %rdi ; X64-NEXT: callq __emutls_get_address@PLT ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: diff --git a/test/CodeGen/X86/emutls.ll b/test/CodeGen/X86/emutls.ll index 8c0ba903659b..2321cd2fc284 100644 --- a/test/CodeGen/X86/emutls.ll +++ b/test/CodeGen/X86/emutls.ll @@ -16,12 +16,14 @@ define i32 @my_get_xyz() { ; X32-NEXT: calll my_emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: my_get_xyz: ; X64: movl $my_emutls_v_xyz, %edi ; X64-NEXT: callq my_emutls_get_address ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -45,12 +47,14 @@ define i32 @f1() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f1: ; X64: movl $__emutls_v.i1, %edi ; X64-NEXT: callq __emutls_get_address ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -63,11 +67,13 @@ define i32* @f2() { ; X32: movl $__emutls_v.i1, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f2: ; X64: movl $__emutls_v.i1, %edi ; X64-NEXT: callq __emutls_get_address ; X64-NEXT: popq %rcx +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -92,6 +98,7 @@ define i32* @f4() { ; X32: movl $__emutls_v.i2, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -116,6 +123,7 @@ define i32* @f6() { ; X32: movl $__emutls_v.i3, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -128,6 +136,7 @@ define i32 @f7() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -140,6 +149,7 @@ define i32* @f8() { ; X32: movl $__emutls_v.i4, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -152,6 +162,7 @@ define i32 @f9() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -164,6 +175,7 @@ define i32* @f10() { ; X32: movl $__emutls_v.i5, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -176,6 +188,7 @@ define i16 @f11() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movzwl (%eax), %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -189,6 +202,7 @@ define i32 @f12() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movswl (%eax), %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -203,6 +217,7 @@ define i8 @f13() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movb (%eax), %al ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -216,6 +231,7 @@ define i32 @f14() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movsbl (%eax), %eax ; X32-NEXT: addl $12, %esp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: diff --git a/test/CodeGen/X86/epilogue-cfi-fp.ll b/test/CodeGen/X86/epilogue-cfi-fp.ll new file mode 100644 index 000000000000..c2fe1c7eaac3 --- /dev/null +++ b/test/CodeGen/X86/epilogue-cfi-fp.ll @@ -0,0 +1,43 @@ +; RUN: llc -O0 %s -o - | FileCheck %s + +; ModuleID = 'epilogue-cfi-fp.c' +source_filename = "epilogue-cfi-fp.c" +target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" +target triple = "i686-pc-linux" + +; Function Attrs: noinline nounwind +define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) #0 { + +; CHECK-LABEL: foo: +; CHECK: popl %ebp +; CHECK-NEXT: .cfi_def_cfa %esp, 4 +; CHECK-NEXT: retl + +entry: + %i.addr = alloca i32, align 4 + %j.addr = alloca i32, align 4 + %k.addr = alloca i32, align 4 + %l.addr = alloca i32, align 4 + %m.addr = alloca i32, align 4 + store i32 %i, i32* %i.addr, align 4 + store i32 %j, i32* %j.addr, align 4 + store i32 %k, i32* %k.addr, align 4 + store i32 %l, i32* %l.addr, align 4 + store i32 %m, i32* %m.addr, align 4 + ret i32 0 +} + +attributes #0 = { "no-frame-pointer-elim"="true" } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4, !5, !6, !7} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "epilogue-cfi-fp.c", directory: "epilogue-dwarf/test") +!2 = !{} +!3 = !{i32 1, !"NumRegisterParameters", i32 0} +!4 = !{i32 2, !"Dwarf Version", i32 4} +!5 = !{i32 2, !"Debug Info Version", i32 3} +!6 = !{i32 1, !"wchar_size", i32 4} +!7 = !{i32 7, !"PIC Level", i32 2} + diff --git a/test/CodeGen/X86/epilogue-cfi-no-fp.ll b/test/CodeGen/X86/epilogue-cfi-no-fp.ll new file mode 100644 index 000000000000..79d6f478de8a --- /dev/null +++ b/test/CodeGen/X86/epilogue-cfi-no-fp.ll @@ -0,0 +1,46 @@ +; RUN: llc -O0 < %s | FileCheck %s + +; ModuleID = 'epilogue-cfi-no-fp.c' +source_filename = "epilogue-cfi-no-fp.c" +target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" +target triple = "i686-pc-linux" + +; Function Attrs: noinline nounwind +define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) { +; CHECK-LABEL: foo: +; CHECK: addl $20, %esp +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 12 +; CHECK-NEXT: popl %edi +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: .cfi_def_cfa_offset 4 +; CHECK-NEXT: retl +entry: + %i.addr = alloca i32, align 4 + %j.addr = alloca i32, align 4 + %k.addr = alloca i32, align 4 + %l.addr = alloca i32, align 4 + %m.addr = alloca i32, align 4 + store i32 %i, i32* %i.addr, align 4 + store i32 %j, i32* %j.addr, align 4 + store i32 %k, i32* %k.addr, align 4 + store i32 %l, i32* %l.addr, align 4 + store i32 %m, i32* %m.addr, align 4 + ret i32 0 +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4, !5, !6, !7} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) +!1 = !DIFile(filename: "epilogue-cfi-no-fp.c", directory: "epilogue-dwarf/test") +!2 = !{} +!3 = !{i32 1, !"NumRegisterParameters", i32 0} +!4 = !{i32 2, !"Dwarf Version", i32 4} +!5 = !{i32 2, !"Debug Info Version", i32 3} +!6 = !{i32 1, !"wchar_size", i32 4} +!7 = !{i32 7, !"PIC Level", i32 2} + + diff --git a/test/CodeGen/X86/fast-isel-int-float-conversion.ll b/test/CodeGen/X86/fast-isel-int-float-conversion.ll index 3e69710868b6..57b50abab535 100644 --- a/test/CodeGen/X86/fast-isel-int-float-conversion.ll +++ b/test/CodeGen/X86/fast-isel-int-float-conversion.ll @@ -31,6 +31,7 @@ define double @int_to_double_rr(i32 %a) { ; SSE2_X86-NEXT: fldl (%esp) ; SSE2_X86-NEXT: movl %ebp, %esp ; SSE2_X86-NEXT: popl %ebp +; SSE2_X86-NEXT: .cfi_def_cfa %esp, 4 ; SSE2_X86-NEXT: retl ; ; AVX_X86-LABEL: int_to_double_rr: @@ -47,6 +48,7 @@ define double @int_to_double_rr(i32 %a) { ; AVX_X86-NEXT: fldl (%esp) ; AVX_X86-NEXT: movl %ebp, %esp ; AVX_X86-NEXT: popl %ebp +; AVX_X86-NEXT: .cfi_def_cfa %esp, 4 ; AVX_X86-NEXT: retl entry: %0 = sitofp i32 %a to double @@ -80,6 +82,7 @@ define double @int_to_double_rm(i32* %a) { ; SSE2_X86-NEXT: fldl (%esp) ; SSE2_X86-NEXT: movl %ebp, %esp ; SSE2_X86-NEXT: popl %ebp +; SSE2_X86-NEXT: .cfi_def_cfa %esp, 4 ; SSE2_X86-NEXT: retl ; ; AVX_X86-LABEL: int_to_double_rm: @@ -97,6 +100,7 @@ define double @int_to_double_rm(i32* %a) { ; AVX_X86-NEXT: fldl (%esp) ; AVX_X86-NEXT: movl %ebp, %esp ; AVX_X86-NEXT: popl %ebp +; AVX_X86-NEXT: .cfi_def_cfa %esp, 4 ; AVX_X86-NEXT: retl entry: %0 = load i32, i32* %a @@ -130,6 +134,7 @@ define double @int_to_double_rm_optsize(i32* %a) optsize { ; SSE2_X86-NEXT: fldl (%esp) ; SSE2_X86-NEXT: movl %ebp, %esp ; SSE2_X86-NEXT: popl %ebp +; SSE2_X86-NEXT: .cfi_def_cfa %esp, 4 ; SSE2_X86-NEXT: retl ; ; AVX_X86-LABEL: int_to_double_rm_optsize: @@ -147,6 +152,7 @@ define double @int_to_double_rm_optsize(i32* %a) optsize { ; AVX_X86-NEXT: fldl (%esp) ; AVX_X86-NEXT: movl %ebp, %esp ; AVX_X86-NEXT: popl %ebp +; AVX_X86-NEXT: .cfi_def_cfa %esp, 4 ; AVX_X86-NEXT: retl entry: %0 = load i32, i32* %a @@ -174,6 +180,7 @@ define float @int_to_float_rr(i32 %a) { ; SSE2_X86-NEXT: movss %xmm0, (%esp) ; SSE2_X86-NEXT: flds (%esp) ; SSE2_X86-NEXT: popl %eax +; SSE2_X86-NEXT: .cfi_def_cfa_offset 4 ; SSE2_X86-NEXT: retl ; ; AVX_X86-LABEL: int_to_float_rr: @@ -184,6 +191,7 @@ define float @int_to_float_rr(i32 %a) { ; AVX_X86-NEXT: vmovss %xmm0, (%esp) ; AVX_X86-NEXT: flds (%esp) ; AVX_X86-NEXT: popl %eax +; AVX_X86-NEXT: .cfi_def_cfa_offset 4 ; AVX_X86-NEXT: retl entry: %0 = sitofp i32 %a to float @@ -211,6 +219,7 @@ define float @int_to_float_rm(i32* %a) { ; SSE2_X86-NEXT: movss %xmm0, (%esp) ; SSE2_X86-NEXT: flds (%esp) ; SSE2_X86-NEXT: popl %eax +; SSE2_X86-NEXT: .cfi_def_cfa_offset 4 ; SSE2_X86-NEXT: retl ; ; AVX_X86-LABEL: int_to_float_rm: @@ -222,6 +231,7 @@ define float @int_to_float_rm(i32* %a) { ; AVX_X86-NEXT: vmovss %xmm0, (%esp) ; AVX_X86-NEXT: flds (%esp) ; AVX_X86-NEXT: popl %eax +; AVX_X86-NEXT: .cfi_def_cfa_offset 4 ; AVX_X86-NEXT: retl entry: %0 = load i32, i32* %a @@ -249,6 +259,7 @@ define float @int_to_float_rm_optsize(i32* %a) optsize { ; SSE2_X86-NEXT: movss %xmm0, (%esp) ; SSE2_X86-NEXT: flds (%esp) ; SSE2_X86-NEXT: popl %eax +; SSE2_X86-NEXT: .cfi_def_cfa_offset 4 ; SSE2_X86-NEXT: retl ; ; AVX_X86-LABEL: int_to_float_rm_optsize: @@ -260,6 +271,7 @@ define float @int_to_float_rm_optsize(i32* %a) optsize { ; AVX_X86-NEXT: vmovss %xmm0, (%esp) ; AVX_X86-NEXT: flds (%esp) ; AVX_X86-NEXT: popl %eax +; AVX_X86-NEXT: .cfi_def_cfa_offset 4 ; AVX_X86-NEXT: retl entry: %0 = load i32, i32* %a diff --git a/test/CodeGen/X86/fast-isel-store.ll b/test/CodeGen/X86/fast-isel-store.ll index e359e6205636..e2412e9c5c04 100644 --- a/test/CodeGen/X86/fast-isel-store.ll +++ b/test/CodeGen/X86/fast-isel-store.ll @@ -375,6 +375,7 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double ; SSE64-NEXT: movupd %xmm0, (%eax) ; SSE64-NEXT: movupd %xmm1, 16(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVX32-LABEL: test_store_4xf64: @@ -413,6 +414,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4 ; SSE64-NEXT: movapd %xmm0, (%eax) ; SSE64-NEXT: movapd %xmm1, 16(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVX32-LABEL: test_store_4xf64_aligned: @@ -452,6 +454,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va ; SSE64-NEXT: movups %xmm2, 32(%eax) ; SSE64-NEXT: movups %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xi32: @@ -501,6 +504,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x ; SSE64-NEXT: movaps %xmm2, 32(%eax) ; SSE64-NEXT: movaps %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xi32_aligned: @@ -550,6 +554,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa ; SSE64-NEXT: movups %xmm2, 32(%eax) ; SSE64-NEXT: movups %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xf32: @@ -599,6 +604,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1 ; SSE64-NEXT: movaps %xmm2, 32(%eax) ; SSE64-NEXT: movaps %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xf32_aligned: @@ -656,6 +662,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double ; SSE64-NEXT: movupd %xmm2, 32(%eax) ; SSE64-NEXT: movupd %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_8xf64: @@ -682,6 +689,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double ; AVXONLY64-NEXT: vmovupd %ymm1, 32(%eax) ; AVXONLY64-NEXT: movl %ebp, %esp ; AVXONLY64-NEXT: popl %ebp +; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4 ; AVXONLY64-NEXT: retl ; ; AVX51232-LABEL: test_store_8xf64: @@ -729,6 +737,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; SSE64-NEXT: movapd %xmm2, 32(%eax) ; SSE64-NEXT: movapd %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp +; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_8xf64_aligned: @@ -755,6 +764,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; AVXONLY64-NEXT: vmovapd %ymm1, 32(%eax) ; AVXONLY64-NEXT: movl %ebp, %esp ; AVXONLY64-NEXT: popl %ebp +; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4 ; AVXONLY64-NEXT: retl ; ; AVX51232-LABEL: test_store_8xf64_aligned: diff --git a/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll b/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll index ba80c839fdda..ee64790d1d94 100644 --- a/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll +++ b/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll @@ -18,11 +18,15 @@ entry: } ; CHECK-LABEL: noDebug -; CHECK: addq $24, %rsp -; CHECK: popq %rbx -; CHECK-NEXT: popq %r14 -; CHECK-NEXT: retq - +; CHECK: addq $16, %rsp +; CHECK-NEXT: .cfi_adjust_cfa_offset -16 +; CHECK-NEXT: addq $8, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 24 +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: popq %r14 +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: retq define void @withDebug() !dbg !18 { entry: @@ -42,9 +46,11 @@ entry: ; CHECK-LABEL: withDebug ; CHECK: callq printf ; CHECK: callq printf -; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: addq $16, %rsp ; CHECK: popq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) diff --git a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll index f9ecf707810b..de9d6bf93d6c 100644 --- a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll +++ b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll @@ -9,6 +9,7 @@ define i64 @fn1NoDebug(i64 %a) { ; CHECK-LABEL: fn1NoDebug ; CHECK: popq %rcx +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: ret define i64 @fn1WithDebug(i64 %a) !dbg !4 { @@ -19,6 +20,7 @@ define i64 @fn1WithDebug(i64 %a) !dbg !4 { ; CHECK-LABEL: fn1WithDebug ; CHECK: popq %rcx +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: ret %struct.Buffer = type { i8, [63 x i8] } @@ -33,6 +35,7 @@ define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) { ; CHECK-NOT: sub ; CHECK: mov ; CHECK-NEXT: pop +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { @@ -46,6 +49,7 @@ define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { ; CHECK-NOT: sub ; CHECK: mov ; CHECK-NEXT: pop +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret declare i64 @fn(i64, i64) diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll index e32c7452b0c0..7126fb233e65 100644 --- a/test/CodeGen/X86/haddsub-2.ll +++ b/test/CodeGen/X86/haddsub-2.ll @@ -724,11 +724,17 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) { ; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0] ; SSE3-NEXT: popq %rbx +; SSE3-NEXT: .cfi_def_cfa_offset 48 ; SSE3-NEXT: popq %r12 +; SSE3-NEXT: .cfi_def_cfa_offset 40 ; SSE3-NEXT: popq %r13 +; SSE3-NEXT: .cfi_def_cfa_offset 32 ; SSE3-NEXT: popq %r14 +; SSE3-NEXT: .cfi_def_cfa_offset 24 ; SSE3-NEXT: popq %r15 +; SSE3-NEXT: .cfi_def_cfa_offset 16 ; SSE3-NEXT: popq %rbp +; SSE3-NEXT: .cfi_def_cfa_offset 8 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: avx2_vphadd_w_test: @@ -1351,11 +1357,17 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) { ; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0] ; SSE3-NEXT: popq %rbx +; SSE3-NEXT: .cfi_def_cfa_offset 48 ; SSE3-NEXT: popq %r12 +; SSE3-NEXT: .cfi_def_cfa_offset 40 ; SSE3-NEXT: popq %r13 +; SSE3-NEXT: .cfi_def_cfa_offset 32 ; SSE3-NEXT: popq %r14 +; SSE3-NEXT: .cfi_def_cfa_offset 24 ; SSE3-NEXT: popq %r15 +; SSE3-NEXT: .cfi_def_cfa_offset 16 ; SSE3-NEXT: popq %rbp +; SSE3-NEXT: .cfi_def_cfa_offset 8 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: avx2_hadd_w: diff --git a/test/CodeGen/X86/hipe-cc64.ll b/test/CodeGen/X86/hipe-cc64.ll index efe07cf6301e..ce2d0e9c6717 100644 --- a/test/CodeGen/X86/hipe-cc64.ll +++ b/test/CodeGen/X86/hipe-cc64.ll @@ -87,6 +87,7 @@ define cc 11 { i64, i64, i64 } @tailcaller(i64 %hp, i64 %p) #0 { ; CHECK-NEXT: movl $47, %ecx ; CHECK-NEXT: movl $63, %r8d ; CHECK-NEXT: popq %rax + ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: jmp tailcallee %ret = tail call cc11 { i64, i64, i64 } @tailcallee(i64 %hp, i64 %p, i64 15, i64 31, i64 47, i64 63, i64 79) #1 diff --git a/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/test/CodeGen/X86/illegal-bitfield-loadstore.ll index fd503aa6c6ee..e3b25a539c1a 100644 --- a/test/CodeGen/X86/illegal-bitfield-loadstore.ll +++ b/test/CodeGen/X86/illegal-bitfield-loadstore.ll @@ -81,6 +81,7 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) { ; X86-NEXT: orl %edx, %eax ; X86-NEXT: movw %ax, (%ecx) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: i24_insert_bit: diff --git a/test/CodeGen/X86/imul.ll b/test/CodeGen/X86/imul.ll index e364b001f945..02782f721083 100644 --- a/test/CodeGen/X86/imul.ll +++ b/test/CodeGen/X86/imul.ll @@ -307,6 +307,7 @@ define i64 @test5(i64 %a) { ; X86-NEXT: subl %ecx, %edx ; X86-NEXT: subl %esi, %edx ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, -31 @@ -362,6 +363,7 @@ define i64 @test7(i64 %a) { ; X86-NEXT: subl %ecx, %edx ; X86-NEXT: subl %esi, %edx ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, -33 @@ -390,6 +392,7 @@ define i64 @testOverflow(i64 %a) { ; X86-NEXT: addl %esi, %edx ; X86-NEXT: subl {{[0-9]+}}(%esp), %edx ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, 9223372036854775807 diff --git a/test/CodeGen/X86/lea-opt-cse1.ll b/test/CodeGen/X86/lea-opt-cse1.ll index 05b47690e819..4c9ec3e0d7a3 100644 --- a/test/CodeGen/X86/lea-opt-cse1.ll +++ b/test/CodeGen/X86/lea-opt-cse1.ll @@ -30,6 +30,7 @@ define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr { ; X86-NEXT: leal 1(%edx,%ecx), %ecx ; X86-NEXT: movl %ecx, 16(%eax) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 diff --git a/test/CodeGen/X86/lea-opt-cse2.ll b/test/CodeGen/X86/lea-opt-cse2.ll index 865dd49a6e1f..cee6f6792cb4 100644 --- a/test/CodeGen/X86/lea-opt-cse2.ll +++ b/test/CodeGen/X86/lea-opt-cse2.ll @@ -46,7 +46,9 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 { ; X86-NEXT: leal 1(%esi,%edx), %ecx ; X86-NEXT: movl %ecx, 16(%eax) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %edi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: br label %loop diff --git a/test/CodeGen/X86/lea-opt-cse3.ll b/test/CodeGen/X86/lea-opt-cse3.ll index 87949b40d487..ed3aff980362 100644 --- a/test/CodeGen/X86/lea-opt-cse3.ll +++ b/test/CodeGen/X86/lea-opt-cse3.ll @@ -91,6 +91,7 @@ define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 { ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB2_2: # %exit ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %mul = shl i32 %b, 2 @@ -143,6 +144,7 @@ define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_a ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB3_2: # %exit ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %mul = shl i32 %b, 1 diff --git a/test/CodeGen/X86/lea-opt-cse4.ll b/test/CodeGen/X86/lea-opt-cse4.ll index 31f31a73d44e..d068180c39cb 100644 --- a/test/CodeGen/X86/lea-opt-cse4.ll +++ b/test/CodeGen/X86/lea-opt-cse4.ll @@ -36,6 +36,7 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 { ; X86-NEXT: leal 1(%ecx,%edx), %ecx ; X86-NEXT: movl %ecx, 16(%eax) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 @@ -110,7 +111,9 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 ; X86-NEXT: addl %ecx, %edx ; X86-NEXT: movl %edx, 16(%eax) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %edi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: br label %loop diff --git a/test/CodeGen/X86/legalize-shift-64.ll b/test/CodeGen/X86/legalize-shift-64.ll index ca4cfa5b8052..7dff2c20d5af 100644 --- a/test/CodeGen/X86/legalize-shift-64.ll +++ b/test/CodeGen/X86/legalize-shift-64.ll @@ -117,9 +117,13 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) { ; CHECK-NEXT: movl %esi, 4(%eax) ; CHECK-NEXT: movl %edi, (%eax) ; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popl %edi +; CHECK-NEXT: .cfi_def_cfa_offset 12 ; CHECK-NEXT: popl %ebx +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %ebp +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl $4 %shl = shl <2 x i64> %A, %B ret <2 x i64> %shl @@ -160,6 +164,7 @@ define i32 @test6() { ; CHECK-NEXT: .LBB5_4: # %if.then ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp +; CHECK-NEXT: .cfi_def_cfa %esp, 4 ; CHECK-NEXT: retl %x = alloca i32, align 4 %t = alloca i64, align 8 diff --git a/test/CodeGen/X86/live-out-reg-info.ll b/test/CodeGen/X86/live-out-reg-info.ll index b838065beea5..170f73593f60 100644 --- a/test/CodeGen/X86/live-out-reg-info.ll +++ b/test/CodeGen/X86/live-out-reg-info.ll @@ -18,6 +18,7 @@ define void @foo(i32 %a) { ; CHECK-NEXT: callq qux ; CHECK-NEXT: .LBB0_2: # %false ; CHECK-NEXT: popq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %t0 = lshr i32 %a, 23 br label %next diff --git a/test/CodeGen/X86/load-combine.ll b/test/CodeGen/X86/load-combine.ll index d1f5f41ac7bf..d46efc4b5eca 100644 --- a/test/CodeGen/X86/load-combine.ll +++ b/test/CodeGen/X86/load-combine.ll @@ -376,6 +376,7 @@ define i32 @load_i32_by_i8_bswap_uses(i32* %arg) { ; CHECK-NEXT: orl %ecx, %eax ; CHECK-NEXT: orl %edx, %eax ; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_bswap_uses: @@ -496,6 +497,7 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) { ; CHECK-NEXT: movzbl 3(%ecx), %eax ; CHECK-NEXT: orl %edx, %eax ; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_bswap_store_in_between: diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll index 94057f98703d..207175aae1a1 100644 --- a/test/CodeGen/X86/masked_gather_scatter.ll +++ b/test/CodeGen/X86/masked_gather_scatter.ll @@ -1690,6 +1690,7 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; KNL_32-NEXT: vmovdqa64 %zmm2, %zmm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp +; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_gather_16i64: @@ -1724,6 +1725,7 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; SKX_32-NEXT: vmovdqa64 %zmm2, %zmm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp +; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %res = call <16 x i64> @llvm.masked.gather.v16i64.v16p0i64(<16 x i64*> %ptrs, i32 4, <16 x i1> %mask, <16 x i64> %src0) ret <16 x i64> %res @@ -1807,6 +1809,7 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; KNL_32-NEXT: vmovapd %zmm2, %zmm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp +; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_gather_16f64: @@ -1841,6 +1844,7 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; SKX_32-NEXT: vmovapd %zmm2, %zmm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp +; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %res = call <16 x double> @llvm.masked.gather.v16f64.v16p0f64(<16 x double*> %ptrs, i32 4, <16 x i1> %mask, <16 x double> %src0) ret <16 x double> %res @@ -1922,6 +1926,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; KNL_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2} ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp +; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl ; @@ -1955,6 +1960,7 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; SKX_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2} ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp +; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: vzeroupper ; SKX_32-NEXT: retl call void @llvm.masked.scatter.v16i64.v16p0i64(<16 x i64> %src0, <16 x i64*> %ptrs, i32 4, <16 x i1> %mask) @@ -2038,6 +2044,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; KNL_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2} ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp +; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl ; @@ -2071,6 +2078,7 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; SKX_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2} ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp +; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: vzeroupper ; SKX_32-NEXT: retl call void @llvm.masked.scatter.v16f64.v16p0f64(<16 x double> %src0, <16 x double*> %ptrs, i32 4, <16 x i1> %mask) @@ -2115,6 +2123,7 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; KNL_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp +; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_pr28312: @@ -2142,6 +2151,7 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; SKX_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp +; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %g1 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef) %g2 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef) diff --git a/test/CodeGen/X86/memset-nonzero.ll b/test/CodeGen/X86/memset-nonzero.ll index f0a957c9417c..98e09377ddb7 100644 --- a/test/CodeGen/X86/memset-nonzero.ll +++ b/test/CodeGen/X86/memset-nonzero.ll @@ -148,6 +148,7 @@ define void @memset_256_nonzero_bytes(i8* %x) { ; SSE-NEXT: movl $256, %edx # imm = 0x100 ; SSE-NEXT: callq memset ; SSE-NEXT: popq %rax +; SSE-NEXT: .cfi_def_cfa_offset 8 ; SSE-NEXT: retq ; ; SSE2FAST-LABEL: memset_256_nonzero_bytes: diff --git a/test/CodeGen/X86/merge-consecutive-loads-128.ll b/test/CodeGen/X86/merge-consecutive-loads-128.ll index e414f5554deb..b909b7c403bb 100644 --- a/test/CodeGen/X86/merge-consecutive-loads-128.ll +++ b/test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -72,7 +72,9 @@ define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp { ; X32-SSE1-NEXT: movl %esi, 4(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_2i64_i64_12: @@ -384,6 +386,7 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp { ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: movl %ecx, 12(%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_23u5: @@ -435,7 +438,9 @@ define <4 x i32> @merge_4i32_i32_23u5_inc2(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: movl %ecx, 12(%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc2: @@ -490,7 +495,9 @@ define <4 x i32> @merge_4i32_i32_23u5_inc3(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: movl %ecx, 12(%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc3: @@ -649,7 +656,9 @@ define <4 x i32> @merge_4i32_i32_45zz_inc4(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl $0, 12(%eax) ; X32-SSE1-NEXT: movl $0, 8(%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc4: @@ -701,7 +710,9 @@ define <4 x i32> @merge_4i32_i32_45zz_inc5(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl $0, 12(%eax) ; X32-SSE1-NEXT: movl $0, 8(%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc5: @@ -751,7 +762,9 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s ; X32-SSE1-NEXT: movl %esi, 6(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_8i16_i16_23u567u9: @@ -897,9 +910,13 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin ; X32-SSE1-NEXT: movl %esi, 3(%eax) ; X32-SSE1-NEXT: movw %bp, (%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 16 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 12 ; X32-SSE1-NEXT: popl %ebx +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %ebp +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF: @@ -1129,7 +1146,9 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin ; X32-SSE1-NEXT: movl %esi, 4(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi +; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_2i64_i64_12_volatile: diff --git a/test/CodeGen/X86/movtopush.ll b/test/CodeGen/X86/movtopush.ll index 051c8a710c85..ddcc383b65e4 100644 --- a/test/CodeGen/X86/movtopush.ll +++ b/test/CodeGen/X86/movtopush.ll @@ -382,8 +382,10 @@ entry: ; LINUX: pushl $1 ; LINUX: .cfi_adjust_cfa_offset 4 ; LINUX: calll good -; LINUX: addl $28, %esp +; LINUX: addl $16, %esp ; LINUX: .cfi_adjust_cfa_offset -16 +; LINUX: addl $12, %esp +; LINUX: .cfi_def_cfa_offset 4 ; LINUX-NOT: add ; LINUX: retl define void @pr27140() optsize { diff --git a/test/CodeGen/X86/mul-constant-result.ll b/test/CodeGen/X86/mul-constant-result.ll index 011b63ce7269..f778397f889a 100644 --- a/test/CodeGen/X86/mul-constant-result.ll +++ b/test/CodeGen/X86/mul-constant-result.ll @@ -34,84 +34,116 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: .LBB0_6: ; X86-NEXT: addl %eax, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_39: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: .LBB0_40: ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_7: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_8: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $2, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_9: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_10: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_11: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (,%eax,8), %ecx ; X86-NEXT: jmp .LBB0_12 ; X86-NEXT: .LBB0_13: ; X86-NEXT: shll $3, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_14: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_15: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_16: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,2), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_17: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_18: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_19: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: jmp .LBB0_20 ; X86-NEXT: .LBB0_21: ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_22: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $4, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_23: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $4, %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_24: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_25: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: shll $2, %ecx ; X86-NEXT: jmp .LBB0_12 @@ -119,20 +151,26 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_27: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_28: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: .LBB0_20: ; X86-NEXT: leal (%eax,%ecx,4), %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_29: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: shll $3, %ecx ; X86-NEXT: jmp .LBB0_12 @@ -140,13 +178,17 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: shll $3, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_31: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_32: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: jmp .LBB0_12 @@ -154,21 +196,27 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_34: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_35: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: addl %eax, %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_36: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $5, %ecx ; X86-NEXT: subl %eax, %ecx @@ -180,10 +228,13 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: subl %eax, %ecx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_38: +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $5, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-HSW-LABEL: mult: @@ -857,8 +908,11 @@ define i32 @foo() local_unnamed_addr #0 { ; X86-NEXT: negl %ecx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %edi +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebx +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-HSW-LABEL: foo: @@ -1072,10 +1126,15 @@ define i32 @foo() local_unnamed_addr #0 { ; X64-HSW-NEXT: negl %ecx ; X64-HSW-NEXT: movl %ecx, %eax ; X64-HSW-NEXT: addq $8, %rsp +; X64-HSW-NEXT: .cfi_def_cfa_offset 40 ; X64-HSW-NEXT: popq %rbx +; X64-HSW-NEXT: .cfi_def_cfa_offset 32 ; X64-HSW-NEXT: popq %r14 +; X64-HSW-NEXT: .cfi_def_cfa_offset 24 ; X64-HSW-NEXT: popq %r15 +; X64-HSW-NEXT: .cfi_def_cfa_offset 16 ; X64-HSW-NEXT: popq %rbp +; X64-HSW-NEXT: .cfi_def_cfa_offset 8 ; X64-HSW-NEXT: retq %1 = tail call i32 @mult(i32 1, i32 0) %2 = icmp ne i32 %1, 1 diff --git a/test/CodeGen/X86/mul-i256.ll b/test/CodeGen/X86/mul-i256.ll index 0a48ae761ec6..1e05b95dda06 100644 --- a/test/CodeGen/X86/mul-i256.ll +++ b/test/CodeGen/X86/mul-i256.ll @@ -349,10 +349,15 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X32-NEXT: movl %eax, 24(%ecx) ; X32-NEXT: movl %edx, 28(%ecx) ; X32-NEXT: addl $88, %esp +; X32-NEXT: .cfi_def_cfa_offset 20 ; X32-NEXT: popl %esi +; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: popl %edi +; X32-NEXT: .cfi_def_cfa_offset 12 ; X32-NEXT: popl %ebx +; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebp +; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test: @@ -421,8 +426,11 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X64-NEXT: movq %rax, 16(%r9) ; X64-NEXT: movq %rdx, 24(%r9) ; X64-NEXT: popq %rbx +; X64-NEXT: .cfi_def_cfa_offset 24 ; X64-NEXT: popq %r14 +; X64-NEXT: .cfi_def_cfa_offset 16 ; X64-NEXT: popq %r15 +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: %av = load i256, i256* %a diff --git a/test/CodeGen/X86/mul128.ll b/test/CodeGen/X86/mul128.ll index 70a6173a19ff..0c11f17d8d1d 100644 --- a/test/CodeGen/X86/mul128.ll +++ b/test/CodeGen/X86/mul128.ll @@ -86,10 +86,15 @@ define i128 @foo(i128 %t, i128 %u) { ; X86-NEXT: movl %edx, 12(%ecx) ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: addl $8, %esp +; X86-NEXT: .cfi_def_cfa_offset 20 ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 16 ; X86-NEXT: popl %edi +; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %ebx +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebp +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl $4 %k = mul i128 %t, %u ret i128 %k diff --git a/test/CodeGen/X86/pr21792.ll b/test/CodeGen/X86/pr21792.ll index 74f6c5a361ff..54eb1fc7272b 100644 --- a/test/CodeGen/X86/pr21792.ll +++ b/test/CodeGen/X86/pr21792.ll @@ -28,6 +28,7 @@ define void @func(<4 x float> %vx) { ; CHECK-NEXT: leaq stuff+8(%r9), %r9 ; CHECK-NEXT: callq toto ; CHECK-NEXT: popq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %tmp2 = bitcast <4 x float> %vx to <2 x i64> diff --git a/test/CodeGen/X86/pr29061.ll b/test/CodeGen/X86/pr29061.ll index 0cbe75f9ad5d..b62d082507d6 100644 --- a/test/CodeGen/X86/pr29061.ll +++ b/test/CodeGen/X86/pr29061.ll @@ -15,6 +15,7 @@ define void @t1(i8 signext %c) { ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: popl %edi +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: tail call void asm sideeffect "", "{di},~{dirflag},~{fpsr},~{flags}"(i8 %c) @@ -32,6 +33,7 @@ define void @t2(i8 signext %c) { ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: tail call void asm sideeffect "", "{si},~{dirflag},~{fpsr},~{flags}"(i8 %c) diff --git a/test/CodeGen/X86/pr29112.ll b/test/CodeGen/X86/pr29112.ll index cc670eeb9788..d791936bd53e 100644 --- a/test/CodeGen/X86/pr29112.ll +++ b/test/CodeGen/X86/pr29112.ll @@ -65,6 +65,7 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, < ; CHECK-NEXT: vaddps {{[0-9]+}}(%rsp), %xmm1, %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: addq $88, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a1 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> <i32 4, i32 20, i32 1, i32 17> diff --git a/test/CodeGen/X86/pr30430.ll b/test/CodeGen/X86/pr30430.ll index 0254c0940b89..06007a3a4cfa 100644 --- a/test/CodeGen/X86/pr30430.ll +++ b/test/CodeGen/X86/pr30430.ll @@ -108,6 +108,7 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float ; CHECK-NEXT: vmovss %xmm14, (%rsp) # 4-byte Spill ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: retq entry: %__A.addr.i = alloca float, align 4 diff --git a/test/CodeGen/X86/pr32241.ll b/test/CodeGen/X86/pr32241.ll index f48fef5f7fbc..02f3bb122913 100644 --- a/test/CodeGen/X86/pr32241.ll +++ b/test/CodeGen/X86/pr32241.ll @@ -50,7 +50,9 @@ define i32 @_Z3foov() { ; CHECK-NEXT: movw %dx, {{[0-9]+}}(%esp) ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: addl $16, %esp +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %aa = alloca i16, align 2 diff --git a/test/CodeGen/X86/pr32256.ll b/test/CodeGen/X86/pr32256.ll index f6e254aaad06..5b6126fbc76c 100644 --- a/test/CodeGen/X86/pr32256.ll +++ b/test/CodeGen/X86/pr32256.ll @@ -27,6 +27,7 @@ define void @_Z1av() { ; CHECK-NEXT: andb $1, %al ; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) ; CHECK-NEXT: addl $2, %esp +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %b = alloca i8, align 1 diff --git a/test/CodeGen/X86/pr32282.ll b/test/CodeGen/X86/pr32282.ll index d6e6f6eb107d..67a0332ac537 100644 --- a/test/CodeGen/X86/pr32282.ll +++ b/test/CodeGen/X86/pr32282.ll @@ -43,6 +43,7 @@ define void @foo() { ; X86-NEXT: orl %eax, %edx ; X86-NEXT: setne {{[0-9]+}}(%esp) ; X86-NEXT: popl %eax +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: foo: diff --git a/test/CodeGen/X86/pr32284.ll b/test/CodeGen/X86/pr32284.ll index 11eb6968709b..59be67f05792 100644 --- a/test/CodeGen/X86/pr32284.ll +++ b/test/CodeGen/X86/pr32284.ll @@ -71,6 +71,7 @@ define void @foo() { ; 686-O0-NEXT: movzbl %al, %ecx ; 686-O0-NEXT: movl %ecx, (%esp) ; 686-O0-NEXT: addl $8, %esp +; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: foo: @@ -88,6 +89,7 @@ define void @foo() { ; 686-NEXT: setle %dl ; 686-NEXT: movl %edx, {{[0-9]+}}(%esp) ; 686-NEXT: addl $8, %esp +; 686-NEXT: .cfi_def_cfa_offset 4 ; 686-NEXT: retl entry: %a = alloca i8, align 1 @@ -232,10 +234,15 @@ define void @f1() { ; 686-O0-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; 686-O0-NEXT: movl %esi, (%esp) # 4-byte Spill ; 686-O0-NEXT: addl $36, %esp +; 686-O0-NEXT: .cfi_def_cfa_offset 20 ; 686-O0-NEXT: popl %esi +; 686-O0-NEXT: .cfi_def_cfa_offset 16 ; 686-O0-NEXT: popl %edi +; 686-O0-NEXT: .cfi_def_cfa_offset 12 ; 686-O0-NEXT: popl %ebx +; 686-O0-NEXT: .cfi_def_cfa_offset 8 ; 686-O0-NEXT: popl %ebp +; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: f1: @@ -277,8 +284,11 @@ define void @f1() { ; 686-NEXT: movl %eax, _ZN8struct_210member_2_0E ; 686-NEXT: movl $0, _ZN8struct_210member_2_0E+4 ; 686-NEXT: addl $1, %esp +; 686-NEXT: .cfi_def_cfa_offset 12 ; 686-NEXT: popl %esi +; 686-NEXT: .cfi_def_cfa_offset 8 ; 686-NEXT: popl %edi +; 686-NEXT: .cfi_def_cfa_offset 4 ; 686-NEXT: retl entry: %a = alloca i8, align 1 @@ -392,8 +402,11 @@ define void @f2() { ; 686-O0-NEXT: movw %cx, %di ; 686-O0-NEXT: movw %di, (%eax) ; 686-O0-NEXT: addl $2, %esp +; 686-O0-NEXT: .cfi_def_cfa_offset 12 ; 686-O0-NEXT: popl %esi +; 686-O0-NEXT: .cfi_def_cfa_offset 8 ; 686-O0-NEXT: popl %edi +; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: f2: @@ -414,6 +427,7 @@ define void @f2() { ; 686-NEXT: sete %dl ; 686-NEXT: movw %dx, (%eax) ; 686-NEXT: addl $2, %esp +; 686-NEXT: .cfi_def_cfa_offset 4 ; 686-NEXT: retl entry: %a = alloca i16, align 2 @@ -532,6 +546,7 @@ define void @f3() #0 { ; 686-O0-NEXT: popl %esi ; 686-O0-NEXT: popl %edi ; 686-O0-NEXT: popl %ebp +; 686-O0-NEXT: .cfi_def_cfa %esp, 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: f3: @@ -558,6 +573,7 @@ define void @f3() #0 { ; 686-NEXT: movl %ecx, var_46 ; 686-NEXT: movl %ebp, %esp ; 686-NEXT: popl %ebp +; 686-NEXT: .cfi_def_cfa %esp, 4 ; 686-NEXT: retl entry: %a = alloca i64, align 8 diff --git a/test/CodeGen/X86/pr32329.ll b/test/CodeGen/X86/pr32329.ll index f6bdade24c6c..9d1bb90e824e 100644 --- a/test/CodeGen/X86/pr32329.ll +++ b/test/CodeGen/X86/pr32329.ll @@ -57,9 +57,13 @@ define void @foo() local_unnamed_addr { ; X86-NEXT: imull %eax, %ebx ; X86-NEXT: movb %bl, var_218 ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 16 ; X86-NEXT: popl %edi +; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %ebx +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebp +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: foo: diff --git a/test/CodeGen/X86/pr32345.ll b/test/CodeGen/X86/pr32345.ll index f6802887e9e4..2bdeca207312 100644 --- a/test/CodeGen/X86/pr32345.ll +++ b/test/CodeGen/X86/pr32345.ll @@ -84,6 +84,7 @@ define void @foo() { ; 6860-NEXT: popl %edi ; 6860-NEXT: popl %ebx ; 6860-NEXT: popl %ebp +; 6860-NEXT: .cfi_def_cfa %esp, 4 ; 6860-NEXT: retl ; ; X64-LABEL: foo: @@ -127,6 +128,7 @@ define void @foo() { ; 686-NEXT: movb %dl, (%eax) ; 686-NEXT: movl %ebp, %esp ; 686-NEXT: popl %ebp +; 686-NEXT: .cfi_def_cfa %esp, 4 ; 686-NEXT: retl bb: %tmp = alloca i64, align 8 diff --git a/test/CodeGen/X86/pr32451.ll b/test/CodeGen/X86/pr32451.ll index 67c0cb39f8c5..5b7d1373d340 100644 --- a/test/CodeGen/X86/pr32451.ll +++ b/test/CodeGen/X86/pr32451.ll @@ -30,7 +30,9 @@ define i8** @japi1_convert_690(i8**, i8***, i32) { ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; CHECK-NEXT: movl %eax, (%ecx) ; CHECK-NEXT: addl $16, %esp +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %ebx +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl top: %3 = alloca i8*** diff --git a/test/CodeGen/X86/pr34088.ll b/test/CodeGen/X86/pr34088.ll index 2049c5507c67..4d85722057f7 100644 --- a/test/CodeGen/X86/pr34088.ll +++ b/test/CodeGen/X86/pr34088.ll @@ -27,6 +27,7 @@ define i32 @pr34088() local_unnamed_addr { ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp +; CHECK-NEXT: .cfi_def_cfa %esp, 4 ; CHECK-NEXT: retl entry: %foo = alloca %struct.Foo, align 4 diff --git a/test/CodeGen/X86/pr34653.ll b/test/CodeGen/X86/pr34653.ll index 4b16ffd33d50..129dbcacc95e 100644 --- a/test/CodeGen/X86/pr34653.ll +++ b/test/CodeGen/X86/pr34653.ll @@ -199,6 +199,7 @@ define void @pr34653() { ; CHECK-NEXT: vmovsd %xmm7, {{[0-9]+}}(%rsp) # 8-byte Spill ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: diff --git a/test/CodeGen/X86/pr9743.ll b/test/CodeGen/X86/pr9743.ll index 73b3c7f835c5..ac3d45755108 100644 --- a/test/CodeGen/X86/pr9743.ll +++ b/test/CodeGen/X86/pr9743.ll @@ -11,4 +11,5 @@ define void @f() { ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret diff --git a/test/CodeGen/X86/push-cfi-debug.ll b/test/CodeGen/X86/push-cfi-debug.ll index 7f438e306e4d..01fa12e87d01 100644 --- a/test/CodeGen/X86/push-cfi-debug.ll +++ b/test/CodeGen/X86/push-cfi-debug.ll @@ -23,8 +23,10 @@ declare x86_stdcallcc void @stdfoo(i32, i32) #0 ; CHECK: .cfi_adjust_cfa_offset 4 ; CHECK: calll stdfoo ; CHECK: .cfi_adjust_cfa_offset -8 -; CHECK: addl $20, %esp +; CHECK: addl $8, %esp ; CHECK: .cfi_adjust_cfa_offset -8 +; CHECK: addl $12, %esp +; CHECK: .cfi_def_cfa_offset 4 define void @test1() #0 !dbg !4 { entry: tail call void @foo(i32 1, i32 2) #1, !dbg !10 diff --git a/test/CodeGen/X86/push-cfi-obj.ll b/test/CodeGen/X86/push-cfi-obj.ll index 33291ec3318a..2c9ec3340270 100644 --- a/test/CodeGen/X86/push-cfi-obj.ll +++ b/test/CodeGen/X86/push-cfi-obj.ll @@ -12,7 +12,7 @@ ; LINUX-NEXT: ] ; LINUX-NEXT: Address: 0x0 ; LINUX-NEXT: Offset: 0x68 -; LINUX-NEXT: Size: 64 +; LINUX-NEXT: Size: 72 ; LINUX-NEXT: Link: 0 ; LINUX-NEXT: Info: 0 ; LINUX-NEXT: AddressAlignment: 4 @@ -22,8 +22,9 @@ ; LINUX-NEXT: SectionData ( ; LINUX-NEXT: 0000: 1C000000 00000000 017A504C 5200017C |.........zPLR..|| ; LINUX-NEXT: 0010: 08070000 00000000 1B0C0404 88010000 |................| -; LINUX-NEXT: 0020: 1C000000 24000000 00000000 1D000000 |....$...........| +; LINUX-NEXT: 0020: 24000000 24000000 00000000 1D000000 |$...$...........| ; LINUX-NEXT: 0030: 04000000 00410E08 8502420D 05432E10 |.....A....B..C..| +; LINUX-NEXT: 0040: 540C0404 410C0508 |T...A...| ; LINUX-NEXT: ) declare i32 @__gxx_personality_v0(...) @@ -35,7 +36,7 @@ entry: to label %continue unwind label %cleanup continue: ret void -cleanup: +cleanup: landingpad { i8*, i32 } cleanup ret void diff --git a/test/CodeGen/X86/push-cfi.ll b/test/CodeGen/X86/push-cfi.ll index 91e579a8391b..44f8bf857c4c 100644 --- a/test/CodeGen/X86/push-cfi.ll +++ b/test/CodeGen/X86/push-cfi.ll @@ -74,8 +74,9 @@ cleanup: ; LINUX-NEXT: pushl $1 ; LINUX-NEXT: .cfi_adjust_cfa_offset 4 ; LINUX-NEXT: call -; LINUX-NEXT: addl $28, %esp +; LINUX-NEXT: addl $16, %esp ; LINUX: .cfi_adjust_cfa_offset -16 +; LINUX: addl $12, %esp ; DARWIN-NOT: .cfi_escape ; DARWIN-NOT: pushl define void @test2_nofp() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { diff --git a/test/CodeGen/X86/return-ext.ll b/test/CodeGen/X86/return-ext.ll index ef160f43b4aa..c66e518943a0 100644 --- a/test/CodeGen/X86/return-ext.ll +++ b/test/CodeGen/X86/return-ext.ll @@ -106,6 +106,7 @@ entry: ; CHECK: call ; CHECK-NEXT: movzbl ; CHECK-NEXT: {{pop|add}} +; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } @@ -120,6 +121,7 @@ entry: ; CHECK: call ; CHECK-NEXT: movzbl ; CHECK-NEXT: {{pop|add}} +; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } @@ -134,5 +136,6 @@ entry: ; CHECK: call ; CHECK-NEXT: movzwl ; CHECK-NEXT: {{pop|add}} +; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } diff --git a/test/CodeGen/X86/rtm.ll b/test/CodeGen/X86/rtm.ll index bd2d3e544bda..a1feeb5999bb 100644 --- a/test/CodeGen/X86/rtm.ll +++ b/test/CodeGen/X86/rtm.ll @@ -75,6 +75,7 @@ define void @f2(i32 %x) nounwind uwtable { ; X64-NEXT: xabort $1 ; X64-NEXT: callq f1 ; X64-NEXT: popq %rax +; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: %x.addr = alloca i32, align 4 diff --git a/test/CodeGen/X86/select-mmx.ll b/test/CodeGen/X86/select-mmx.ll index 795990e3c325..7ad8b6f1b9c7 100644 --- a/test/CodeGen/X86/select-mmx.ll +++ b/test/CodeGen/X86/select-mmx.ll @@ -48,6 +48,7 @@ define i64 @test47(i64 %arg) { ; I32-NEXT: movl {{[0-9]+}}(%esp), %edx ; I32-NEXT: movl %ebp, %esp ; I32-NEXT: popl %ebp +; I32-NEXT: .cfi_def_cfa %esp, 4 ; I32-NEXT: retl %cond = icmp eq i64 %arg, 0 %slct = select i1 %cond, x86_mmx bitcast (i64 7 to x86_mmx), x86_mmx bitcast (i64 0 to x86_mmx) @@ -100,6 +101,7 @@ define i64 @test49(i64 %arg, i64 %x, i64 %y) { ; I32-NEXT: movl {{[0-9]+}}(%esp), %edx ; I32-NEXT: movl %ebp, %esp ; I32-NEXT: popl %ebp +; I32-NEXT: .cfi_def_cfa %esp, 4 ; I32-NEXT: retl %cond = icmp eq i64 %arg, 0 %xmmx = bitcast i64 %x to x86_mmx diff --git a/test/CodeGen/X86/setcc-lowering.ll b/test/CodeGen/X86/setcc-lowering.ll index 359b8d68063a..5ae2cc5f35c1 100644 --- a/test/CodeGen/X86/setcc-lowering.ll +++ b/test/CodeGen/X86/setcc-lowering.ll @@ -89,6 +89,7 @@ define void @pr26232(i64 %a, <16 x i1> %b) { ; KNL-32-NEXT: jne .LBB1_1 ; KNL-32-NEXT: # BB#2: # %for_exit600 ; KNL-32-NEXT: popl %esi +; KNL-32-NEXT: .cfi_def_cfa_offset 4 ; KNL-32-NEXT: retl allocas: br label %for_test11.preheader diff --git a/test/CodeGen/X86/shrink_vmul.ll b/test/CodeGen/X86/shrink_vmul.ll index 79cf0f2c8f11..a2767205fe29 100644 --- a/test/CodeGen/X86/shrink_vmul.ll +++ b/test/CodeGen/X86/shrink_vmul.ll @@ -31,6 +31,7 @@ define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movq %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi8: @@ -89,6 +90,7 @@ define void @mul_4xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movdqu %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_4xi8: @@ -148,6 +150,7 @@ define void @mul_8xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: movdqu %xmm1, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_8xi8: @@ -220,6 +223,7 @@ define void @mul_16xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: movdqu %xmm4, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm3, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_16xi8: @@ -288,6 +292,7 @@ define void @mul_2xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movq %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi16: @@ -342,6 +347,7 @@ define void @mul_4xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movdqu %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_4xi16: @@ -399,6 +405,7 @@ define void @mul_8xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: movdqu %xmm1, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_8xi16: @@ -469,6 +476,7 @@ define void @mul_16xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i6 ; X86-NEXT: movdqu %xmm2, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_16xi16: @@ -541,6 +549,7 @@ define void @mul_2xi8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, ; X86-NEXT: psrad $16, %xmm0 ; X86-NEXT: movq %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi8_sext: @@ -606,6 +615,7 @@ define void @mul_2xi8_sext_zext(i8* nocapture readonly %a, i8* nocapture readonl ; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; X86-NEXT: movq %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi8_sext_zext: @@ -666,6 +676,7 @@ define void @mul_2xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movq %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi16_sext: @@ -733,6 +744,7 @@ define void @mul_2xi16_sext_zext(i8* nocapture readonly %a, i8* nocapture readon ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; X86-NEXT: movq %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi16_sext_zext: @@ -813,6 +825,7 @@ define void @mul_16xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly % ; X86-NEXT: movdqu %xmm2, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_16xi16_sext: diff --git a/test/CodeGen/X86/statepoint-call-lowering.ll b/test/CodeGen/X86/statepoint-call-lowering.ll index bd2dd53b654a..d80c87b99b64 100644 --- a/test/CodeGen/X86/statepoint-call-lowering.ll +++ b/test/CodeGen/X86/statepoint-call-lowering.ll @@ -83,6 +83,7 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK: callq return_i1 ; CHECK-NEXT: .Ltmp5: ; CHECK-NEXT: popq %rcx +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* %a) diff --git a/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll b/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll index b88ca03805f2..90f2002e2d45 100644 --- a/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll +++ b/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll @@ -69,6 +69,7 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK: callq return_i1 ; CHECK-NEXT: .Ltmp4: ; CHECK-NEXT: popq %rcx +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 1, i32 0, i32 0, i32 addrspace(1)* %a) diff --git a/test/CodeGen/X86/statepoint-invoke.ll b/test/CodeGen/X86/statepoint-invoke.ll index 784b932addc8..5aa902546c16 100644 --- a/test/CodeGen/X86/statepoint-invoke.ll +++ b/test/CodeGen/X86/statepoint-invoke.ll @@ -142,6 +142,7 @@ normal_return: ; CHECK-LABEL: %normal_return ; CHECK: xorl %eax, %eax ; CHECK-NEXT: popq + ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %null.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 13, i32 13) %undef.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 14, i32 14) @@ -169,6 +170,7 @@ entry: normal_return: ; CHECK: leaq ; CHECK-NEXT: popq + ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %aa.rel = call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %sp, i32 13, i32 13) %aa.converted = bitcast i32 addrspace(1)* %aa.rel to i64 addrspace(1)* @@ -177,6 +179,7 @@ normal_return: exceptional_return: ; CHECK: movl $15 ; CHECK-NEXT: popq + ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %landing_pad = landingpad token cleanup diff --git a/test/CodeGen/X86/throws-cfi-fp.ll b/test/CodeGen/X86/throws-cfi-fp.ll new file mode 100644 index 000000000000..bacd965054c9 --- /dev/null +++ b/test/CodeGen/X86/throws-cfi-fp.ll @@ -0,0 +1,98 @@ +; RUN: llc %s -o - | FileCheck %s + +; ModuleID = 'throws-cfi-fp.cpp' +source_filename = "throws-cfi-fp.cpp" +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +$__clang_call_terminate = comdat any + +@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1 +@_ZTIi = external constant i8* +@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00" + +; Function Attrs: uwtable +define void @_Z6throwsv() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { + +; CHECK-LABEL: _Z6throwsv: +; CHECK: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB0_1: +; CHECK-NEXT: .cfi_def_cfa %rbp, 16 + +entry: + %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1 + br i1 %.b5, label %if.then, label %try.cont + +if.then: ; preds = %entry + %exception = tail call i8* @__cxa_allocate_exception(i64 4) + %0 = bitcast i8* %exception to i32* + store i32 1, i32* %0, align 16 + invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) + to label %unreachable unwind label %lpad + +lpad: ; preds = %if.then + %1 = landingpad { i8*, i32 } + catch i8* null + %2 = extractvalue { i8*, i32 } %1, 0 + %3 = tail call i8* @__cxa_begin_catch(i8* %2) + %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0)) + invoke void @__cxa_rethrow() + to label %unreachable unwind label %lpad1 + +lpad1: ; preds = %lpad + %4 = landingpad { i8*, i32 } + cleanup + invoke void @__cxa_end_catch() + to label %eh.resume unwind label %terminate.lpad + +try.cont: ; preds = %entry + ret void + +eh.resume: ; preds = %lpad1 + resume { i8*, i32 } %4 + +terminate.lpad: ; preds = %lpad1 + %5 = landingpad { i8*, i32 } + catch i8* null + %6 = extractvalue { i8*, i32 } %5, 0 + tail call void @__clang_call_terminate(i8* %6) + unreachable + +unreachable: ; preds = %lpad, %if.then + unreachable +} + +declare i8* @__cxa_allocate_exception(i64) + +declare void @__cxa_throw(i8*, i8*, i8*) + +declare i32 @__gxx_personality_v0(...) + +declare i8* @__cxa_begin_catch(i8*) + +declare void @__cxa_rethrow() + +declare void @__cxa_end_catch() + +; Function Attrs: noinline noreturn nounwind +declare void @__clang_call_terminate(i8*) + +declare void @_ZSt9terminatev() + +; Function Attrs: nounwind +declare i32 @puts(i8* nocapture readonly) + +attributes #0 = { "no-frame-pointer-elim"="true" } + +!llvm.dbg.cu = !{!2} +!llvm.module.flags = !{!8, !9, !10} + +!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 6.0.0 (https://github.com/llvm-mirror/clang.git 316ebefb7fff8ad324a08a694347500b6cd7c95f) (https://github.com/llvm-mirror/llvm.git dcae9be81fc17cdfbe989402354d3c8ecd0a2c79)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5) +!3 = !DIFile(filename: "throws-cfi-fp.cpp", directory: "epilogue-dwarf/test") +!4 = !{} +!5 = !{} +!8 = !{i32 2, !"Dwarf Version", i32 4} +!9 = !{i32 2, !"Debug Info Version", i32 3} +!10 = !{i32 1, !"wchar_size", i32 4} diff --git a/test/CodeGen/X86/throws-cfi-no-fp.ll b/test/CodeGen/X86/throws-cfi-no-fp.ll new file mode 100644 index 000000000000..1483e6b8483c --- /dev/null +++ b/test/CodeGen/X86/throws-cfi-no-fp.ll @@ -0,0 +1,97 @@ +; RUN: llc %s -o - | FileCheck %s + +; ModuleID = 'throws-cfi-no-fp.cpp' +source_filename = "throws-cfi-no-fp.cpp" +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +$__clang_call_terminate = comdat any + +@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1 +@_ZTIi = external constant i8* +@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00" + +; Function Attrs: uwtable +define void @_Z6throwsv() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { + +; CHECK-LABEL: _Z6throwsv: +; CHECK: popq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB0_1: +; CHECK-NEXT: .cfi_def_cfa_offset 16 + +entry: + %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1 + br i1 %.b5, label %if.then, label %try.cont + +if.then: ; preds = %entry + %exception = tail call i8* @__cxa_allocate_exception(i64 4) + %0 = bitcast i8* %exception to i32* + store i32 1, i32* %0, align 16 + invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) + to label %unreachable unwind label %lpad + +lpad: ; preds = %if.then + %1 = landingpad { i8*, i32 } + catch i8* null + %2 = extractvalue { i8*, i32 } %1, 0 + %3 = tail call i8* @__cxa_begin_catch(i8* %2) + %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0)) + invoke void @__cxa_rethrow() #4 + to label %unreachable unwind label %lpad1 + +lpad1: ; preds = %lpad + %4 = landingpad { i8*, i32 } + cleanup + invoke void @__cxa_end_catch() + to label %eh.resume unwind label %terminate.lpad + +try.cont: ; preds = %entry + ret void + +eh.resume: ; preds = %lpad1 + resume { i8*, i32 } %4 + +terminate.lpad: ; preds = %lpad1 + %5 = landingpad { i8*, i32 } + catch i8* null + %6 = extractvalue { i8*, i32 } %5, 0 + tail call void @__clang_call_terminate(i8* %6) + unreachable + +unreachable: ; preds = %lpad, %if.then + unreachable +} + +declare i8* @__cxa_allocate_exception(i64) + +declare void @__cxa_throw(i8*, i8*, i8*) + +declare i32 @__gxx_personality_v0(...) + +declare i8* @__cxa_begin_catch(i8*) + +declare void @__cxa_rethrow() + +declare void @__cxa_end_catch() + +; Function Attrs: noinline noreturn nounwind +declare void @__clang_call_terminate(i8*) + +declare void @_ZSt9terminatev() + + +; Function Attrs: nounwind +declare i32 @puts(i8* nocapture readonly) + +!llvm.dbg.cu = !{!2} +!llvm.module.flags = !{!8, !9, !10} + +!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 6.0.0 (https://github.com/llvm-mirror/clang.git 316ebefb7fff8ad324a08a694347500b6cd7c95f) (https://github.com/llvm-mirror/llvm.git dcae9be81fc17cdfbe989402354d3c8ecd0a2c79)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5) +!3 = !DIFile(filename: "throws-cfi-no-fp.cpp", directory: "epilogue-dwarf/test") +!4 = !{} +!5 = !{} +!8 = !{i32 2, !"Dwarf Version", i32 4} +!9 = !{i32 2, !"Debug Info Version", i32 3} +!10 = !{i32 1, !"wchar_size", i32 4} diff --git a/test/CodeGen/X86/vector-sext.ll b/test/CodeGen/X86/vector-sext.ll index cd4b237735f1..25377f267996 100644 --- a/test/CodeGen/X86/vector-sext.ll +++ b/test/CodeGen/X86/vector-sext.ll @@ -3333,11 +3333,17 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: popq %rbx +; AVX1-NEXT: .cfi_def_cfa_offset 48 ; AVX1-NEXT: popq %r12 +; AVX1-NEXT: .cfi_def_cfa_offset 40 ; AVX1-NEXT: popq %r13 +; AVX1-NEXT: .cfi_def_cfa_offset 32 ; AVX1-NEXT: popq %r14 +; AVX1-NEXT: .cfi_def_cfa_offset 24 ; AVX1-NEXT: popq %r15 +; AVX1-NEXT: .cfi_def_cfa_offset 16 ; AVX1-NEXT: popq %rbp +; AVX1-NEXT: .cfi_def_cfa_offset 8 ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_sext_16i1_to_16i16: @@ -3424,11 +3430,17 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: popq %rbx +; AVX2-NEXT: .cfi_def_cfa_offset 48 ; AVX2-NEXT: popq %r12 +; AVX2-NEXT: .cfi_def_cfa_offset 40 ; AVX2-NEXT: popq %r13 +; AVX2-NEXT: .cfi_def_cfa_offset 32 ; AVX2-NEXT: popq %r14 +; AVX2-NEXT: .cfi_def_cfa_offset 24 ; AVX2-NEXT: popq %r15 +; AVX2-NEXT: .cfi_def_cfa_offset 16 ; AVX2-NEXT: popq %rbp +; AVX2-NEXT: .cfi_def_cfa_offset 8 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: load_sext_16i1_to_16i16: @@ -4824,6 +4836,7 @@ define i32 @sext_2i8_to_i32(<16 x i8> %A) nounwind uwtable readnone ssp { ; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; X32-SSE41-NEXT: movd %xmm0, %eax ; X32-SSE41-NEXT: popl %ecx +; X32-SSE41-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE41-NEXT: retl entry: %Shuf = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1> diff --git a/test/CodeGen/X86/vector-shuffle-avx512.ll b/test/CodeGen/X86/vector-shuffle-avx512.ll index efbe5586747f..b107b60cd6d2 100644 --- a/test/CodeGen/X86/vector-shuffle-avx512.ll +++ b/test/CodeGen/X86/vector-shuffle-avx512.ll @@ -619,6 +619,7 @@ define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){ ; KNL32-NEXT: vpblendvb %ymm3, 8(%ebp), %ymm1, %ymm1 ; KNL32-NEXT: movl %ebp, %esp ; KNL32-NEXT: popl %ebp +; KNL32-NEXT: .cfi_def_cfa %esp, 4 ; KNL32-NEXT: retl entry: %0 = shufflevector <64 x i8> %A, <64 x i8> %W, <64 x i32> <i32 64, i32 1, i32 66, i32 3, i32 68, i32 5, i32 70, i32 7, i32 72, i32 9, i32 74, i32 11, i32 76, i32 13, i32 78, i32 15, i32 80, i32 17, i32 82, i32 19, i32 84, i32 21, i32 86, i32 23, i32 88, i32 25, i32 90, i32 27, i32 92, i32 29, i32 94, i32 31, i32 96, i32 33, i32 98, i32 35, i32 100, i32 37, i32 102, i32 39, i32 104, i32 41, i32 106, i32 43, i32 108, i32 45, i32 110, i32 47, i32 112, i32 49, i32 114, i32 51, i32 116, i32 53, i32 118, i32 55, i32 120, i32 57, i32 122, i32 59, i32 124, i32 61, i32 126, i32 63> @@ -659,6 +660,7 @@ define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){ ; KNL32-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm1[1],mem[2],ymm1[3],mem[4],ymm1[5],mem[6],ymm1[7],mem[8],ymm1[9],mem[10],ymm1[11],mem[12],ymm1[13],mem[14],ymm1[15] ; KNL32-NEXT: movl %ebp, %esp ; KNL32-NEXT: popl %ebp +; KNL32-NEXT: .cfi_def_cfa %esp, 4 ; KNL32-NEXT: retl entry: %0 = shufflevector <32 x i16> %A, <32 x i16> %W, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31> diff --git a/test/CodeGen/X86/vector-shuffle-v1.ll b/test/CodeGen/X86/vector-shuffle-v1.ll index 8d057290085c..0e690347a543 100644 --- a/test/CodeGen/X86/vector-shuffle-v1.ll +++ b/test/CodeGen/X86/vector-shuffle-v1.ll @@ -630,6 +630,7 @@ define i64 @shuf64i1_zero(i64 %a) { ; AVX512F-NEXT: orq %rcx, %rax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -662,6 +663,7 @@ define i64 @shuf64i1_zero(i64 %a) { ; AVX512VL-NEXT: orq %rcx, %rax ; AVX512VL-NEXT: movq %rbp, %rsp ; AVX512VL-NEXT: popq %rbp +; AVX512VL-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; diff --git a/test/CodeGen/X86/wide-integer-cmp.ll b/test/CodeGen/X86/wide-integer-cmp.ll index 97460b36a749..9bd53c6fbd35 100644 --- a/test/CodeGen/X86/wide-integer-cmp.ll +++ b/test/CodeGen/X86/wide-integer-cmp.ll @@ -105,10 +105,13 @@ define i32 @test_wide(i128 %a, i128 %b) { ; CHECK-NEXT: # BB#1: # %bb1 ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; CHECK-NEXT: .LBB4_2: # %bb2 +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: movl $2, %eax ; CHECK-NEXT: popl %esi +; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %cmp = icmp slt i128 %a, %b diff --git a/test/CodeGen/X86/x86-framelowering-trap.ll b/test/CodeGen/X86/x86-framelowering-trap.ll index f1590abcae8b..89f4528fb06d 100644 --- a/test/CodeGen/X86/x86-framelowering-trap.ll +++ b/test/CodeGen/X86/x86-framelowering-trap.ll @@ -6,6 +6,7 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK: pushq ; CHECK: ud2 ; CHECK-NEXT: popq +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq define void @bar() { entry: diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll index acad9f771fc7..bc6a6ea205c1 100644 --- a/test/CodeGen/X86/x86-interleaved-access.ll +++ b/test/CodeGen/X86/x86-interleaved-access.ll @@ -1816,6 +1816,7 @@ define void @interleaved_store_vf64_i8_stride4(<64 x i8> %a, <64 x i8> %b, <64 x ; AVX1-NEXT: vmovaps %ymm9, 64(%rdi) ; AVX1-NEXT: vmovaps %ymm8, (%rdi) ; AVX1-NEXT: addq $24, %rsp +; AVX1-NEXT: .cfi_def_cfa_offset 8 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; diff --git a/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll index 763d764698dd..929dafbfc21d 100644 --- a/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll +++ b/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll @@ -20,6 +20,7 @@ define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 { ; CHECK-NEXT: movl $4, %eax ; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: popq %rdx +; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void asm sideeffect "", "~{rax},~{rdx},~{xmm1},~{rdi},~{rsi},~{xmm0}"() ret i32 4 |