1817 lines
		
	
	
		
			64 KiB
		
	
	
	
		
			C++
		
	
	
	
			
		
		
	
	
			1817 lines
		
	
	
		
			64 KiB
		
	
	
	
		
			C++
		
	
	
	
| //===- X86InstructionSelector.cpp -----------------------------------------===//
 | |
| //
 | |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 | |
| // See https://llvm.org/LICENSE.txt for license information.
 | |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 | |
| //
 | |
| //===----------------------------------------------------------------------===//
 | |
| /// \file
 | |
| /// This file implements the targeting of the InstructionSelector class for
 | |
| /// X86.
 | |
| /// \todo This should be generated by TableGen.
 | |
| //===----------------------------------------------------------------------===//
 | |
| 
 | |
| #include "MCTargetDesc/X86BaseInfo.h"
 | |
| #include "X86InstrBuilder.h"
 | |
| #include "X86InstrInfo.h"
 | |
| #include "X86RegisterBankInfo.h"
 | |
| #include "X86RegisterInfo.h"
 | |
| #include "X86Subtarget.h"
 | |
| #include "X86TargetMachine.h"
 | |
| #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
 | |
| #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
 | |
| #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
 | |
| #include "llvm/CodeGen/GlobalISel/Utils.h"
 | |
| #include "llvm/CodeGen/MachineBasicBlock.h"
 | |
| #include "llvm/CodeGen/MachineConstantPool.h"
 | |
| #include "llvm/CodeGen/MachineFunction.h"
 | |
| #include "llvm/CodeGen/MachineInstr.h"
 | |
| #include "llvm/CodeGen/MachineInstrBuilder.h"
 | |
| #include "llvm/CodeGen/MachineMemOperand.h"
 | |
| #include "llvm/CodeGen/MachineOperand.h"
 | |
| #include "llvm/CodeGen/MachineRegisterInfo.h"
 | |
| #include "llvm/CodeGen/TargetOpcodes.h"
 | |
| #include "llvm/CodeGen/TargetRegisterInfo.h"
 | |
| #include "llvm/IR/DataLayout.h"
 | |
| #include "llvm/IR/InstrTypes.h"
 | |
| #include "llvm/Support/AtomicOrdering.h"
 | |
| #include "llvm/Support/CodeGen.h"
 | |
| #include "llvm/Support/Debug.h"
 | |
| #include "llvm/Support/ErrorHandling.h"
 | |
| #include "llvm/Support/LowLevelTypeImpl.h"
 | |
| #include "llvm/Support/MathExtras.h"
 | |
| #include "llvm/Support/raw_ostream.h"
 | |
| #include <cassert>
 | |
| #include <cstdint>
 | |
| #include <tuple>
 | |
| 
 | |
| #define DEBUG_TYPE "X86-isel"
 | |
| 
 | |
| using namespace llvm;
 | |
| 
 | |
| namespace {
 | |
| 
 | |
| #define GET_GLOBALISEL_PREDICATE_BITSET
 | |
| #include "X86GenGlobalISel.inc"
 | |
| #undef GET_GLOBALISEL_PREDICATE_BITSET
 | |
| 
 | |
| class X86InstructionSelector : public InstructionSelector {
 | |
| public:
 | |
|   X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
 | |
|                          const X86RegisterBankInfo &RBI);
 | |
| 
 | |
|   bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
 | |
|   static const char *getName() { return DEBUG_TYPE; }
 | |
| 
 | |
| private:
 | |
|   /// tblgen-erated 'select' implementation, used as the initial selector for
 | |
|   /// the patterns that don't require complex C++.
 | |
|   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
 | |
| 
 | |
|   // TODO: remove after supported by Tablegen-erated instruction selection.
 | |
|   unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
 | |
|                           uint64_t Alignment) const;
 | |
| 
 | |
|   bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                          MachineFunction &MF) const;
 | |
|   bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                              MachineFunction &MF) const;
 | |
|   bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                          MachineFunction &MF) const;
 | |
|   bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                       MachineFunction &MF) const;
 | |
|   bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                              MachineFunction &MF) const;
 | |
|   bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                   MachineFunction &MF) const;
 | |
|   bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                     MachineFunction &MF) const;
 | |
|   bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                  MachineFunction &MF) const;
 | |
|   bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                   MachineFunction &MF) const;
 | |
|   bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                    MachineFunction &MF) const;
 | |
|   bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
 | |
|   bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                            MachineFunction &MF,
 | |
|                            CodeGenCoverage &CoverageInfo) const;
 | |
|   bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                          MachineFunction &MF,
 | |
|                          CodeGenCoverage &CoverageInfo) const;
 | |
|   bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                     MachineFunction &MF) const;
 | |
|   bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                      MachineFunction &MF) const;
 | |
|   bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                         MachineFunction &MF) const;
 | |
|   bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                           const unsigned DstReg,
 | |
|                           const TargetRegisterClass *DstRC,
 | |
|                           const unsigned SrcReg,
 | |
|                           const TargetRegisterClass *SrcRC) const;
 | |
|   bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                      MachineFunction &MF) const;
 | |
|   bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
 | |
|   bool selectShift(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                    MachineFunction &MF) const;
 | |
|   bool selectDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                     MachineFunction &MF) const;
 | |
|   bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI,
 | |
|                                    MachineFunction &MF) const;
 | |
| 
 | |
|   // emit insert subreg instruction and insert it before MachineInstr &I
 | |
|   bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
 | |
|                         MachineRegisterInfo &MRI, MachineFunction &MF) const;
 | |
|   // emit extract subreg instruction and insert it before MachineInstr &I
 | |
|   bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
 | |
|                          MachineRegisterInfo &MRI, MachineFunction &MF) const;
 | |
| 
 | |
|   const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
 | |
|   const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
 | |
|                                          MachineRegisterInfo &MRI) const;
 | |
| 
 | |
|   const X86TargetMachine &TM;
 | |
|   const X86Subtarget &STI;
 | |
|   const X86InstrInfo &TII;
 | |
|   const X86RegisterInfo &TRI;
 | |
|   const X86RegisterBankInfo &RBI;
 | |
| 
 | |
| #define GET_GLOBALISEL_PREDICATES_DECL
 | |
| #include "X86GenGlobalISel.inc"
 | |
| #undef GET_GLOBALISEL_PREDICATES_DECL
 | |
| 
 | |
| #define GET_GLOBALISEL_TEMPORARIES_DECL
 | |
| #include "X86GenGlobalISel.inc"
 | |
| #undef GET_GLOBALISEL_TEMPORARIES_DECL
 | |
| };
 | |
| 
 | |
| } // end anonymous namespace
 | |
| 
 | |
| #define GET_GLOBALISEL_IMPL
 | |
| #include "X86GenGlobalISel.inc"
 | |
| #undef GET_GLOBALISEL_IMPL
 | |
| 
 | |
| X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
 | |
|                                                const X86Subtarget &STI,
 | |
|                                                const X86RegisterBankInfo &RBI)
 | |
|     : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
 | |
|       TRI(*STI.getRegisterInfo()), RBI(RBI),
 | |
| #define GET_GLOBALISEL_PREDICATES_INIT
 | |
| #include "X86GenGlobalISel.inc"
 | |
| #undef GET_GLOBALISEL_PREDICATES_INIT
 | |
| #define GET_GLOBALISEL_TEMPORARIES_INIT
 | |
| #include "X86GenGlobalISel.inc"
 | |
| #undef GET_GLOBALISEL_TEMPORARIES_INIT
 | |
| {
 | |
| }
 | |
| 
 | |
| // FIXME: This should be target-independent, inferred from the types declared
 | |
| // for each class in the bank.
 | |
| const TargetRegisterClass *
 | |
| X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
 | |
|   if (RB.getID() == X86::GPRRegBankID) {
 | |
|     if (Ty.getSizeInBits() <= 8)
 | |
|       return &X86::GR8RegClass;
 | |
|     if (Ty.getSizeInBits() == 16)
 | |
|       return &X86::GR16RegClass;
 | |
|     if (Ty.getSizeInBits() == 32)
 | |
|       return &X86::GR32RegClass;
 | |
|     if (Ty.getSizeInBits() == 64)
 | |
|       return &X86::GR64RegClass;
 | |
|   }
 | |
|   if (RB.getID() == X86::VECRRegBankID) {
 | |
|     if (Ty.getSizeInBits() == 32)
 | |
|       return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
 | |
|     if (Ty.getSizeInBits() == 64)
 | |
|       return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
 | |
|     if (Ty.getSizeInBits() == 128)
 | |
|       return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
 | |
|     if (Ty.getSizeInBits() == 256)
 | |
|       return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
 | |
|     if (Ty.getSizeInBits() == 512)
 | |
|       return &X86::VR512RegClass;
 | |
|   }
 | |
| 
 | |
|   llvm_unreachable("Unknown RegBank!");
 | |
| }
 | |
| 
 | |
| const TargetRegisterClass *
 | |
| X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
 | |
|                                     MachineRegisterInfo &MRI) const {
 | |
|   const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
 | |
|   return getRegClass(Ty, RegBank);
 | |
| }
 | |
| 
 | |
| static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
 | |
|   unsigned SubIdx = X86::NoSubRegister;
 | |
|   if (RC == &X86::GR32RegClass) {
 | |
|     SubIdx = X86::sub_32bit;
 | |
|   } else if (RC == &X86::GR16RegClass) {
 | |
|     SubIdx = X86::sub_16bit;
 | |
|   } else if (RC == &X86::GR8RegClass) {
 | |
|     SubIdx = X86::sub_8bit;
 | |
|   }
 | |
| 
 | |
|   return SubIdx;
 | |
| }
 | |
| 
 | |
| static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
 | |
|   assert(TargetRegisterInfo::isPhysicalRegister(Reg));
 | |
|   if (X86::GR64RegClass.contains(Reg))
 | |
|     return &X86::GR64RegClass;
 | |
|   if (X86::GR32RegClass.contains(Reg))
 | |
|     return &X86::GR32RegClass;
 | |
|   if (X86::GR16RegClass.contains(Reg))
 | |
|     return &X86::GR16RegClass;
 | |
|   if (X86::GR8RegClass.contains(Reg))
 | |
|     return &X86::GR8RegClass;
 | |
| 
 | |
|   llvm_unreachable("Unknown RegClass for PhysReg!");
 | |
| }
 | |
| 
 | |
| // Set X86 Opcode and constrain DestReg.
 | |
| bool X86InstructionSelector::selectCopy(MachineInstr &I,
 | |
|                                         MachineRegisterInfo &MRI) const {
 | |
|   unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
 | |
|   const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
 | |
| 
 | |
|   unsigned SrcReg = I.getOperand(1).getReg();
 | |
|   const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
 | |
|   const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
 | |
| 
 | |
|   if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
 | |
|     assert(I.isCopy() && "Generic operators do not allow physical registers");
 | |
| 
 | |
|     if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
 | |
|         DstRegBank.getID() == X86::GPRRegBankID) {
 | |
| 
 | |
|       const TargetRegisterClass *SrcRC =
 | |
|           getRegClass(MRI.getType(SrcReg), SrcRegBank);
 | |
|       const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
 | |
| 
 | |
|       if (SrcRC != DstRC) {
 | |
|         // This case can be generated by ABI lowering, performe anyext
 | |
|         unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
 | |
|         BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                 TII.get(TargetOpcode::SUBREG_TO_REG))
 | |
|             .addDef(ExtSrc)
 | |
|             .addImm(0)
 | |
|             .addReg(SrcReg)
 | |
|             .addImm(getSubRegIndex(SrcRC));
 | |
| 
 | |
|         I.getOperand(1).setReg(ExtSrc);
 | |
|       }
 | |
|     }
 | |
| 
 | |
|     return true;
 | |
|   }
 | |
| 
 | |
|   assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
 | |
|          "No phys reg on generic operators");
 | |
|   assert((DstSize == SrcSize ||
 | |
|           // Copies are a mean to setup initial types, the number of
 | |
|           // bits may not exactly match.
 | |
|           (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
 | |
|            DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
 | |
|          "Copy with different width?!");
 | |
| 
 | |
|   const TargetRegisterClass *DstRC =
 | |
|       getRegClass(MRI.getType(DstReg), DstRegBank);
 | |
| 
 | |
|   if (SrcRegBank.getID() == X86::GPRRegBankID &&
 | |
|       DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
 | |
|       TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
 | |
|     // Change the physical register to performe truncate.
 | |
| 
 | |
|     const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
 | |
| 
 | |
|     if (DstRC != SrcRC) {
 | |
|       I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
 | |
|       I.getOperand(1).substPhysReg(SrcReg, TRI);
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   // No need to constrain SrcReg. It will get constrained when
 | |
|   // we hit another of its use or its defs.
 | |
|   // Copies do not have constraints.
 | |
|   const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
 | |
|   if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
 | |
|     if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
 | |
|       LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
 | |
|                         << " operand\n");
 | |
|       return false;
 | |
|     }
 | |
|   }
 | |
|   I.setDesc(TII.get(X86::COPY));
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::select(MachineInstr &I,
 | |
|                                     CodeGenCoverage &CoverageInfo) const {
 | |
|   assert(I.getParent() && "Instruction should be in a basic block!");
 | |
|   assert(I.getParent()->getParent() && "Instruction should be in a function!");
 | |
| 
 | |
|   MachineBasicBlock &MBB = *I.getParent();
 | |
|   MachineFunction &MF = *MBB.getParent();
 | |
|   MachineRegisterInfo &MRI = MF.getRegInfo();
 | |
| 
 | |
|   unsigned Opcode = I.getOpcode();
 | |
|   if (!isPreISelGenericOpcode(Opcode)) {
 | |
|     // Certain non-generic instructions also need some special handling.
 | |
| 
 | |
|     if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
 | |
|       return false;
 | |
| 
 | |
|     if (I.isCopy())
 | |
|       return selectCopy(I, MRI);
 | |
| 
 | |
|     return true;
 | |
|   }
 | |
| 
 | |
|   assert(I.getNumOperands() == I.getNumExplicitOperands() &&
 | |
|          "Generic instruction has unexpected implicit operands\n");
 | |
| 
 | |
|   if (selectImpl(I, CoverageInfo))
 | |
|     return true;
 | |
| 
 | |
|   LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
 | |
| 
 | |
|   // TODO: This should be implemented by tblgen.
 | |
|   switch (I.getOpcode()) {
 | |
|   default:
 | |
|     return false;
 | |
|   case TargetOpcode::G_STORE:
 | |
|   case TargetOpcode::G_LOAD:
 | |
|     return selectLoadStoreOp(I, MRI, MF);
 | |
|   case TargetOpcode::G_GEP:
 | |
|   case TargetOpcode::G_FRAME_INDEX:
 | |
|     return selectFrameIndexOrGep(I, MRI, MF);
 | |
|   case TargetOpcode::G_GLOBAL_VALUE:
 | |
|     return selectGlobalValue(I, MRI, MF);
 | |
|   case TargetOpcode::G_CONSTANT:
 | |
|     return selectConstant(I, MRI, MF);
 | |
|   case TargetOpcode::G_FCONSTANT:
 | |
|     return materializeFP(I, MRI, MF);
 | |
|   case TargetOpcode::G_PTRTOINT:
 | |
|   case TargetOpcode::G_TRUNC:
 | |
|     return selectTruncOrPtrToInt(I, MRI, MF);
 | |
|   case TargetOpcode::G_INTTOPTR:
 | |
|     return selectCopy(I, MRI);
 | |
|   case TargetOpcode::G_ZEXT:
 | |
|     return selectZext(I, MRI, MF);
 | |
|   case TargetOpcode::G_ANYEXT:
 | |
|     return selectAnyext(I, MRI, MF);
 | |
|   case TargetOpcode::G_ICMP:
 | |
|     return selectCmp(I, MRI, MF);
 | |
|   case TargetOpcode::G_FCMP:
 | |
|     return selectFCmp(I, MRI, MF);
 | |
|   case TargetOpcode::G_UADDE:
 | |
|     return selectUadde(I, MRI, MF);
 | |
|   case TargetOpcode::G_UNMERGE_VALUES:
 | |
|     return selectUnmergeValues(I, MRI, MF, CoverageInfo);
 | |
|   case TargetOpcode::G_MERGE_VALUES:
 | |
|   case TargetOpcode::G_CONCAT_VECTORS:
 | |
|     return selectMergeValues(I, MRI, MF, CoverageInfo);
 | |
|   case TargetOpcode::G_EXTRACT:
 | |
|     return selectExtract(I, MRI, MF);
 | |
|   case TargetOpcode::G_INSERT:
 | |
|     return selectInsert(I, MRI, MF);
 | |
|   case TargetOpcode::G_BRCOND:
 | |
|     return selectCondBranch(I, MRI, MF);
 | |
|   case TargetOpcode::G_IMPLICIT_DEF:
 | |
|   case TargetOpcode::G_PHI:
 | |
|     return selectImplicitDefOrPHI(I, MRI);
 | |
|   case TargetOpcode::G_SHL:
 | |
|   case TargetOpcode::G_ASHR:
 | |
|   case TargetOpcode::G_LSHR:
 | |
|     return selectShift(I, MRI, MF);
 | |
|   case TargetOpcode::G_SDIV:
 | |
|   case TargetOpcode::G_UDIV:
 | |
|   case TargetOpcode::G_SREM:
 | |
|   case TargetOpcode::G_UREM:
 | |
|     return selectDivRem(I, MRI, MF);
 | |
|   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
 | |
|     return selectIntrinsicWSideEffects(I, MRI, MF);
 | |
|   }
 | |
| 
 | |
|   return false;
 | |
| }
 | |
| 
 | |
| unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
 | |
|                                                 const RegisterBank &RB,
 | |
|                                                 unsigned Opc,
 | |
|                                                 uint64_t Alignment) const {
 | |
|   bool Isload = (Opc == TargetOpcode::G_LOAD);
 | |
|   bool HasAVX = STI.hasAVX();
 | |
|   bool HasAVX512 = STI.hasAVX512();
 | |
|   bool HasVLX = STI.hasVLX();
 | |
| 
 | |
|   if (Ty == LLT::scalar(8)) {
 | |
|     if (X86::GPRRegBankID == RB.getID())
 | |
|       return Isload ? X86::MOV8rm : X86::MOV8mr;
 | |
|   } else if (Ty == LLT::scalar(16)) {
 | |
|     if (X86::GPRRegBankID == RB.getID())
 | |
|       return Isload ? X86::MOV16rm : X86::MOV16mr;
 | |
|   } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
 | |
|     if (X86::GPRRegBankID == RB.getID())
 | |
|       return Isload ? X86::MOV32rm : X86::MOV32mr;
 | |
|     if (X86::VECRRegBankID == RB.getID())
 | |
|       return Isload ? (HasAVX512 ? X86::VMOVSSZrm
 | |
|                                  : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
 | |
|                     : (HasAVX512 ? X86::VMOVSSZmr
 | |
|                                  : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
 | |
|   } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
 | |
|     if (X86::GPRRegBankID == RB.getID())
 | |
|       return Isload ? X86::MOV64rm : X86::MOV64mr;
 | |
|     if (X86::VECRRegBankID == RB.getID())
 | |
|       return Isload ? (HasAVX512 ? X86::VMOVSDZrm
 | |
|                                  : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
 | |
|                     : (HasAVX512 ? X86::VMOVSDZmr
 | |
|                                  : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
 | |
|   } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
 | |
|     if (Alignment >= 16)
 | |
|       return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
 | |
|                               : HasAVX512
 | |
|                                     ? X86::VMOVAPSZ128rm_NOVLX
 | |
|                                     : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
 | |
|                     : (HasVLX ? X86::VMOVAPSZ128mr
 | |
|                               : HasAVX512
 | |
|                                     ? X86::VMOVAPSZ128mr_NOVLX
 | |
|                                     : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
 | |
|     else
 | |
|       return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
 | |
|                               : HasAVX512
 | |
|                                     ? X86::VMOVUPSZ128rm_NOVLX
 | |
|                                     : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
 | |
|                     : (HasVLX ? X86::VMOVUPSZ128mr
 | |
|                               : HasAVX512
 | |
|                                     ? X86::VMOVUPSZ128mr_NOVLX
 | |
|                                     : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
 | |
|   } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
 | |
|     if (Alignment >= 32)
 | |
|       return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
 | |
|                               : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
 | |
|                                           : X86::VMOVAPSYrm)
 | |
|                     : (HasVLX ? X86::VMOVAPSZ256mr
 | |
|                               : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
 | |
|                                           : X86::VMOVAPSYmr);
 | |
|     else
 | |
|       return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
 | |
|                               : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
 | |
|                                           : X86::VMOVUPSYrm)
 | |
|                     : (HasVLX ? X86::VMOVUPSZ256mr
 | |
|                               : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
 | |
|                                           : X86::VMOVUPSYmr);
 | |
|   } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
 | |
|     if (Alignment >= 64)
 | |
|       return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
 | |
|     else
 | |
|       return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
 | |
|   }
 | |
|   return Opc;
 | |
| }
 | |
| 
 | |
| // Fill in an address from the given instruction.
 | |
| static void X86SelectAddress(const MachineInstr &I,
 | |
|                              const MachineRegisterInfo &MRI,
 | |
|                              X86AddressMode &AM) {
 | |
|   assert(I.getOperand(0).isReg() && "unsupported opperand.");
 | |
|   assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
 | |
|          "unsupported type.");
 | |
| 
 | |
|   if (I.getOpcode() == TargetOpcode::G_GEP) {
 | |
|     if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
 | |
|       int64_t Imm = *COff;
 | |
|       if (isInt<32>(Imm)) { // Check for displacement overflow.
 | |
|         AM.Disp = static_cast<int32_t>(Imm);
 | |
|         AM.Base.Reg = I.getOperand(1).getReg();
 | |
|         return;
 | |
|       }
 | |
|     }
 | |
|   } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
 | |
|     AM.Base.FrameIndex = I.getOperand(1).getIndex();
 | |
|     AM.BaseType = X86AddressMode::FrameIndexBase;
 | |
|     return;
 | |
|   }
 | |
| 
 | |
|   // Default behavior.
 | |
|   AM.Base.Reg = I.getOperand(0).getReg();
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
 | |
|                                                MachineRegisterInfo &MRI,
 | |
|                                                MachineFunction &MF) const {
 | |
|   unsigned Opc = I.getOpcode();
 | |
| 
 | |
|   assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   const unsigned DefReg = I.getOperand(0).getReg();
 | |
|   LLT Ty = MRI.getType(DefReg);
 | |
|   const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
 | |
| 
 | |
|   assert(I.hasOneMemOperand());
 | |
|   auto &MemOp = **I.memoperands_begin();
 | |
|   if (MemOp.isAtomic()) {
 | |
|     // Note: for unordered operations, we rely on the fact the appropriate MMO
 | |
|     // is already on the instruction we're mutating, and thus we don't need to
 | |
|     // make any changes.  So long as we select an opcode which is capable of
 | |
|     // loading or storing the appropriate size atomically, the rest of the
 | |
|     // backend is required to respect the MMO state. 
 | |
|     if (!MemOp.isUnordered()) {
 | |
|       LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
 | |
|       return false;
 | |
|     }
 | |
|     if (MemOp.getAlignment() < Ty.getSizeInBits()/8) {
 | |
|       LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
 | |
|       return false;
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
 | |
|   if (NewOpc == Opc)
 | |
|     return false;
 | |
| 
 | |
|   X86AddressMode AM;
 | |
|   X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
 | |
| 
 | |
|   I.setDesc(TII.get(NewOpc));
 | |
|   MachineInstrBuilder MIB(MF, I);
 | |
|   if (Opc == TargetOpcode::G_LOAD) {
 | |
|     I.RemoveOperand(1);
 | |
|     addFullAddress(MIB, AM);
 | |
|   } else {
 | |
|     // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
 | |
|     I.RemoveOperand(1);
 | |
|     I.RemoveOperand(0);
 | |
|     addFullAddress(MIB, AM).addUse(DefReg);
 | |
|   }
 | |
|   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | |
| }
 | |
| 
 | |
| static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
 | |
|   if (Ty == LLT::pointer(0, 64))
 | |
|     return X86::LEA64r;
 | |
|   else if (Ty == LLT::pointer(0, 32))
 | |
|     return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
 | |
|   else
 | |
|     llvm_unreachable("Can't get LEA opcode. Unsupported type.");
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
 | |
|                                                    MachineRegisterInfo &MRI,
 | |
|                                                    MachineFunction &MF) const {
 | |
|   unsigned Opc = I.getOpcode();
 | |
| 
 | |
|   assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   const unsigned DefReg = I.getOperand(0).getReg();
 | |
|   LLT Ty = MRI.getType(DefReg);
 | |
| 
 | |
|   // Use LEA to calculate frame index and GEP
 | |
|   unsigned NewOpc = getLeaOP(Ty, STI);
 | |
|   I.setDesc(TII.get(NewOpc));
 | |
|   MachineInstrBuilder MIB(MF, I);
 | |
| 
 | |
|   if (Opc == TargetOpcode::G_FRAME_INDEX) {
 | |
|     addOffset(MIB, 0);
 | |
|   } else {
 | |
|     MachineOperand &InxOp = I.getOperand(2);
 | |
|     I.addOperand(InxOp);        // set IndexReg
 | |
|     InxOp.ChangeToImmediate(1); // set Scale
 | |
|     MIB.addImm(0).addReg(0);
 | |
|   }
 | |
| 
 | |
|   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
 | |
|                                                MachineRegisterInfo &MRI,
 | |
|                                                MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   auto GV = I.getOperand(1).getGlobal();
 | |
|   if (GV->isThreadLocal()) {
 | |
|     return false; // TODO: we don't support TLS yet.
 | |
|   }
 | |
| 
 | |
|   // Can't handle alternate code models yet.
 | |
|   if (TM.getCodeModel() != CodeModel::Small)
 | |
|     return false;
 | |
| 
 | |
|   X86AddressMode AM;
 | |
|   AM.GV = GV;
 | |
|   AM.GVOpFlags = STI.classifyGlobalReference(GV);
 | |
| 
 | |
|   // TODO: The ABI requires an extra load. not supported yet.
 | |
|   if (isGlobalStubReference(AM.GVOpFlags))
 | |
|     return false;
 | |
| 
 | |
|   // TODO: This reference is relative to the pic base. not supported yet.
 | |
|   if (isGlobalRelativeToPICBase(AM.GVOpFlags))
 | |
|     return false;
 | |
| 
 | |
|   if (STI.isPICStyleRIPRel()) {
 | |
|     // Use rip-relative addressing.
 | |
|     assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
 | |
|     AM.Base.Reg = X86::RIP;
 | |
|   }
 | |
| 
 | |
|   const unsigned DefReg = I.getOperand(0).getReg();
 | |
|   LLT Ty = MRI.getType(DefReg);
 | |
|   unsigned NewOpc = getLeaOP(Ty, STI);
 | |
| 
 | |
|   I.setDesc(TII.get(NewOpc));
 | |
|   MachineInstrBuilder MIB(MF, I);
 | |
| 
 | |
|   I.RemoveOperand(1);
 | |
|   addFullAddress(MIB, AM);
 | |
| 
 | |
|   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectConstant(MachineInstr &I,
 | |
|                                             MachineRegisterInfo &MRI,
 | |
|                                             MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   const unsigned DefReg = I.getOperand(0).getReg();
 | |
|   LLT Ty = MRI.getType(DefReg);
 | |
| 
 | |
|   if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
 | |
|     return false;
 | |
| 
 | |
|   uint64_t Val = 0;
 | |
|   if (I.getOperand(1).isCImm()) {
 | |
|     Val = I.getOperand(1).getCImm()->getZExtValue();
 | |
|     I.getOperand(1).ChangeToImmediate(Val);
 | |
|   } else if (I.getOperand(1).isImm()) {
 | |
|     Val = I.getOperand(1).getImm();
 | |
|   } else
 | |
|     llvm_unreachable("Unsupported operand type.");
 | |
| 
 | |
|   unsigned NewOpc;
 | |
|   switch (Ty.getSizeInBits()) {
 | |
|   case 8:
 | |
|     NewOpc = X86::MOV8ri;
 | |
|     break;
 | |
|   case 16:
 | |
|     NewOpc = X86::MOV16ri;
 | |
|     break;
 | |
|   case 32:
 | |
|     NewOpc = X86::MOV32ri;
 | |
|     break;
 | |
|   case 64:
 | |
|     // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
 | |
|     if (isInt<32>(Val))
 | |
|       NewOpc = X86::MOV64ri32;
 | |
|     else
 | |
|       NewOpc = X86::MOV64ri;
 | |
|     break;
 | |
|   default:
 | |
|     llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
 | |
|   }
 | |
| 
 | |
|   I.setDesc(TII.get(NewOpc));
 | |
|   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | |
| }
 | |
| 
 | |
| // Helper function for selectTruncOrPtrToInt and selectAnyext.
 | |
| // Returns true if DstRC lives on a floating register class and
 | |
| // SrcRC lives on a 128-bit vector class.
 | |
| static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
 | |
|                             const TargetRegisterClass *SrcRC) {
 | |
|   return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
 | |
|           DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
 | |
|          (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectTurnIntoCOPY(
 | |
|     MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
 | |
|     const TargetRegisterClass *DstRC, const unsigned SrcReg,
 | |
|     const TargetRegisterClass *SrcRC) const {
 | |
| 
 | |
|   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
 | |
|       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
 | |
|     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
 | |
|                       << " operand\n");
 | |
|     return false;
 | |
|   }
 | |
|   I.setDesc(TII.get(X86::COPY));
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
 | |
|                                                    MachineRegisterInfo &MRI,
 | |
|                                                    MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
 | |
|           I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned SrcReg = I.getOperand(1).getReg();
 | |
| 
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT SrcTy = MRI.getType(SrcReg);
 | |
| 
 | |
|   const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
 | |
|   const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
 | |
| 
 | |
|   if (DstRB.getID() != SrcRB.getID()) {
 | |
|     LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
 | |
|                       << " input/output on different banks\n");
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
 | |
|   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
 | |
| 
 | |
|   if (!DstRC || !SrcRC)
 | |
|     return false;
 | |
| 
 | |
|   // If that's truncation of the value that lives on the vector class and goes
 | |
|   // into the floating class, just replace it with copy, as we are able to
 | |
|   // select it as a regular move.
 | |
|   if (canTurnIntoCOPY(DstRC, SrcRC))
 | |
|     return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
 | |
| 
 | |
|   if (DstRB.getID() != X86::GPRRegBankID)
 | |
|     return false;
 | |
| 
 | |
|   unsigned SubIdx;
 | |
|   if (DstRC == SrcRC) {
 | |
|     // Nothing to be done
 | |
|     SubIdx = X86::NoSubRegister;
 | |
|   } else if (DstRC == &X86::GR32RegClass) {
 | |
|     SubIdx = X86::sub_32bit;
 | |
|   } else if (DstRC == &X86::GR16RegClass) {
 | |
|     SubIdx = X86::sub_16bit;
 | |
|   } else if (DstRC == &X86::GR8RegClass) {
 | |
|     SubIdx = X86::sub_8bit;
 | |
|   } else {
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
 | |
| 
 | |
|   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
 | |
|       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
 | |
|     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
 | |
|                       << "\n");
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   I.getOperand(1).setSubReg(SubIdx);
 | |
| 
 | |
|   I.setDesc(TII.get(X86::COPY));
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectZext(MachineInstr &I,
 | |
|                                         MachineRegisterInfo &MRI,
 | |
|                                         MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned SrcReg = I.getOperand(1).getReg();
 | |
| 
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT SrcTy = MRI.getType(SrcReg);
 | |
| 
 | |
|   assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
 | |
|          "8=>32 Zext is handled by tablegen");
 | |
|   assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
 | |
|          "16=>32 Zext is handled by tablegen");
 | |
| 
 | |
|   const static struct ZextEntry {
 | |
|     LLT SrcTy;
 | |
|     LLT DstTy;
 | |
|     unsigned MovOp;
 | |
|     bool NeedSubregToReg;
 | |
|   } OpTable[] = {
 | |
|       {LLT::scalar(8), LLT::scalar(16), X86::MOVZX16rr8, false},  // i8  => i16
 | |
|       {LLT::scalar(8), LLT::scalar(64), X86::MOVZX32rr8, true},   // i8  => i64
 | |
|       {LLT::scalar(16), LLT::scalar(64), X86::MOVZX32rr16, true}, // i16 => i64
 | |
|       {LLT::scalar(32), LLT::scalar(64), 0, true}                 // i32 => i64
 | |
|   };
 | |
| 
 | |
|   auto ZextEntryIt =
 | |
|       std::find_if(std::begin(OpTable), std::end(OpTable),
 | |
|                    [SrcTy, DstTy](const ZextEntry &El) {
 | |
|                      return El.DstTy == DstTy && El.SrcTy == SrcTy;
 | |
|                    });
 | |
| 
 | |
|   // Here we try to select Zext into a MOVZ and/or SUBREG_TO_REG instruction.
 | |
|   if (ZextEntryIt != std::end(OpTable)) {
 | |
|     const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
 | |
|     const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
 | |
|     const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
 | |
|     const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
 | |
| 
 | |
|     if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
 | |
|         !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
 | |
|       LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
 | |
|                         << " operand\n");
 | |
|       return false;
 | |
|     }
 | |
| 
 | |
|     unsigned TransitRegTo = DstReg;
 | |
|     unsigned TransitRegFrom = SrcReg;
 | |
|     if (ZextEntryIt->MovOp) {
 | |
|       // If we select Zext into MOVZ + SUBREG_TO_REG, we need to have
 | |
|       // a transit register in between: create it here.
 | |
|       if (ZextEntryIt->NeedSubregToReg) {
 | |
|         TransitRegFrom = MRI.createVirtualRegister(
 | |
|             getRegClass(LLT::scalar(32), DstReg, MRI));
 | |
|         TransitRegTo = TransitRegFrom;
 | |
|       }
 | |
| 
 | |
|       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ZextEntryIt->MovOp))
 | |
|           .addDef(TransitRegTo)
 | |
|           .addReg(SrcReg);
 | |
|     }
 | |
|     if (ZextEntryIt->NeedSubregToReg) {
 | |
|       BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|               TII.get(TargetOpcode::SUBREG_TO_REG))
 | |
|           .addDef(DstReg)
 | |
|           .addImm(0)
 | |
|           .addReg(TransitRegFrom)
 | |
|           .addImm(X86::sub_32bit);
 | |
|     }
 | |
|     I.eraseFromParent();
 | |
|     return true;
 | |
|   }
 | |
| 
 | |
|   if (SrcTy != LLT::scalar(1))
 | |
|     return false;
 | |
| 
 | |
|   unsigned AndOpc;
 | |
|   if (DstTy == LLT::scalar(8))
 | |
|     AndOpc = X86::AND8ri;
 | |
|   else if (DstTy == LLT::scalar(16))
 | |
|     AndOpc = X86::AND16ri8;
 | |
|   else if (DstTy == LLT::scalar(32))
 | |
|     AndOpc = X86::AND32ri8;
 | |
|   else if (DstTy == LLT::scalar(64))
 | |
|     AndOpc = X86::AND64ri8;
 | |
|   else
 | |
|     return false;
 | |
| 
 | |
|   unsigned DefReg = SrcReg;
 | |
|   if (DstTy != LLT::scalar(8)) {
 | |
|     DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
 | |
|     BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|             TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
 | |
|         .addImm(0)
 | |
|         .addReg(SrcReg)
 | |
|         .addImm(X86::sub_8bit);
 | |
|   }
 | |
| 
 | |
|   MachineInstr &AndInst =
 | |
|       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
 | |
|            .addReg(DefReg)
 | |
|            .addImm(1);
 | |
| 
 | |
|   constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectAnyext(MachineInstr &I,
 | |
|                                           MachineRegisterInfo &MRI,
 | |
|                                           MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned SrcReg = I.getOperand(1).getReg();
 | |
| 
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT SrcTy = MRI.getType(SrcReg);
 | |
| 
 | |
|   const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
 | |
|   const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
 | |
| 
 | |
|   assert(DstRB.getID() == SrcRB.getID() &&
 | |
|          "G_ANYEXT input/output on different banks\n");
 | |
| 
 | |
|   assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
 | |
|          "G_ANYEXT incorrect operand size");
 | |
| 
 | |
|   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
 | |
|   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
 | |
| 
 | |
|   // If that's ANY_EXT of the value that lives on the floating class and goes
 | |
|   // into the vector class, just replace it with copy, as we are able to select
 | |
|   // it as a regular move.
 | |
|   if (canTurnIntoCOPY(SrcRC, DstRC))
 | |
|     return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
 | |
| 
 | |
|   if (DstRB.getID() != X86::GPRRegBankID)
 | |
|     return false;
 | |
| 
 | |
|   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
 | |
|       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
 | |
|     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
 | |
|                       << " operand\n");
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   if (SrcRC == DstRC) {
 | |
|     I.setDesc(TII.get(X86::COPY));
 | |
|     return true;
 | |
|   }
 | |
| 
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|           TII.get(TargetOpcode::SUBREG_TO_REG))
 | |
|       .addDef(DstReg)
 | |
|       .addImm(0)
 | |
|       .addReg(SrcReg)
 | |
|       .addImm(getSubRegIndex(SrcRC));
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectCmp(MachineInstr &I,
 | |
|                                        MachineRegisterInfo &MRI,
 | |
|                                        MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
 | |
| 
 | |
|   X86::CondCode CC;
 | |
|   bool SwapArgs;
 | |
|   std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
 | |
|       (CmpInst::Predicate)I.getOperand(1).getPredicate());
 | |
| 
 | |
|   unsigned LHS = I.getOperand(2).getReg();
 | |
|   unsigned RHS = I.getOperand(3).getReg();
 | |
| 
 | |
|   if (SwapArgs)
 | |
|     std::swap(LHS, RHS);
 | |
| 
 | |
|   unsigned OpCmp;
 | |
|   LLT Ty = MRI.getType(LHS);
 | |
| 
 | |
|   switch (Ty.getSizeInBits()) {
 | |
|   default:
 | |
|     return false;
 | |
|   case 8:
 | |
|     OpCmp = X86::CMP8rr;
 | |
|     break;
 | |
|   case 16:
 | |
|     OpCmp = X86::CMP16rr;
 | |
|     break;
 | |
|   case 32:
 | |
|     OpCmp = X86::CMP32rr;
 | |
|     break;
 | |
|   case 64:
 | |
|     OpCmp = X86::CMP64rr;
 | |
|     break;
 | |
|   }
 | |
| 
 | |
|   MachineInstr &CmpInst =
 | |
|       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
 | |
|            .addReg(LHS)
 | |
|            .addReg(RHS);
 | |
| 
 | |
|   MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                                    TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
 | |
| 
 | |
|   constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
 | |
|   constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectFCmp(MachineInstr &I,
 | |
|                                         MachineRegisterInfo &MRI,
 | |
|                                         MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
 | |
| 
 | |
|   unsigned LhsReg = I.getOperand(2).getReg();
 | |
|   unsigned RhsReg = I.getOperand(3).getReg();
 | |
|   CmpInst::Predicate Predicate =
 | |
|       (CmpInst::Predicate)I.getOperand(1).getPredicate();
 | |
| 
 | |
|   // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
 | |
|   static const uint16_t SETFOpcTable[2][3] = {
 | |
|       {X86::COND_E, X86::COND_NP, X86::AND8rr},
 | |
|       {X86::COND_NE, X86::COND_P, X86::OR8rr}};
 | |
|   const uint16_t *SETFOpc = nullptr;
 | |
|   switch (Predicate) {
 | |
|   default:
 | |
|     break;
 | |
|   case CmpInst::FCMP_OEQ:
 | |
|     SETFOpc = &SETFOpcTable[0][0];
 | |
|     break;
 | |
|   case CmpInst::FCMP_UNE:
 | |
|     SETFOpc = &SETFOpcTable[1][0];
 | |
|     break;
 | |
|   }
 | |
| 
 | |
|   // Compute the opcode for the CMP instruction.
 | |
|   unsigned OpCmp;
 | |
|   LLT Ty = MRI.getType(LhsReg);
 | |
|   switch (Ty.getSizeInBits()) {
 | |
|   default:
 | |
|     return false;
 | |
|   case 32:
 | |
|     OpCmp = X86::UCOMISSrr;
 | |
|     break;
 | |
|   case 64:
 | |
|     OpCmp = X86::UCOMISDrr;
 | |
|     break;
 | |
|   }
 | |
| 
 | |
|   unsigned ResultReg = I.getOperand(0).getReg();
 | |
|   RBI.constrainGenericRegister(
 | |
|       ResultReg,
 | |
|       *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
 | |
|   if (SETFOpc) {
 | |
|     MachineInstr &CmpInst =
 | |
|         *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
 | |
|              .addReg(LhsReg)
 | |
|              .addReg(RhsReg);
 | |
| 
 | |
|     unsigned FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
 | |
|     unsigned FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
 | |
|     MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                                   TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
 | |
|     MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                                   TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
 | |
|     MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                                   TII.get(SETFOpc[2]), ResultReg)
 | |
|                               .addReg(FlagReg1)
 | |
|                               .addReg(FlagReg2);
 | |
|     constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
 | |
|     constrainSelectedInstRegOperands(Set1, TII, TRI, RBI);
 | |
|     constrainSelectedInstRegOperands(Set2, TII, TRI, RBI);
 | |
|     constrainSelectedInstRegOperands(Set3, TII, TRI, RBI);
 | |
| 
 | |
|     I.eraseFromParent();
 | |
|     return true;
 | |
|   }
 | |
| 
 | |
|   X86::CondCode CC;
 | |
|   bool SwapArgs;
 | |
|   std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
 | |
|   assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
 | |
| 
 | |
|   if (SwapArgs)
 | |
|     std::swap(LhsReg, RhsReg);
 | |
| 
 | |
|   // Emit a compare of LHS/RHS.
 | |
|   MachineInstr &CmpInst =
 | |
|       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
 | |
|            .addReg(LhsReg)
 | |
|            .addReg(RhsReg);
 | |
| 
 | |
|   MachineInstr &Set =
 | |
|       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
 | |
|   constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
 | |
|   constrainSelectedInstRegOperands(Set, TII, TRI, RBI);
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectUadde(MachineInstr &I,
 | |
|                                          MachineRegisterInfo &MRI,
 | |
|                                          MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned CarryOutReg = I.getOperand(1).getReg();
 | |
|   const unsigned Op0Reg = I.getOperand(2).getReg();
 | |
|   const unsigned Op1Reg = I.getOperand(3).getReg();
 | |
|   unsigned CarryInReg = I.getOperand(4).getReg();
 | |
| 
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
| 
 | |
|   if (DstTy != LLT::scalar(32))
 | |
|     return false;
 | |
| 
 | |
|   // find CarryIn def instruction.
 | |
|   MachineInstr *Def = MRI.getVRegDef(CarryInReg);
 | |
|   while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
 | |
|     CarryInReg = Def->getOperand(1).getReg();
 | |
|     Def = MRI.getVRegDef(CarryInReg);
 | |
|   }
 | |
| 
 | |
|   unsigned Opcode;
 | |
|   if (Def->getOpcode() == TargetOpcode::G_UADDE) {
 | |
|     // carry set by prev ADD.
 | |
| 
 | |
|     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
 | |
|         .addReg(CarryInReg);
 | |
| 
 | |
|     if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
 | |
|       return false;
 | |
| 
 | |
|     Opcode = X86::ADC32rr;
 | |
|   } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
 | |
|     // carry is constant, support only 0.
 | |
|     if (*val != 0)
 | |
|       return false;
 | |
| 
 | |
|     Opcode = X86::ADD32rr;
 | |
|   } else
 | |
|     return false;
 | |
| 
 | |
|   MachineInstr &AddInst =
 | |
|       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
 | |
|            .addReg(Op0Reg)
 | |
|            .addReg(Op1Reg);
 | |
| 
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
 | |
|       .addReg(X86::EFLAGS);
 | |
| 
 | |
|   if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
 | |
|       !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
 | |
|     return false;
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectExtract(MachineInstr &I,
 | |
|                                            MachineRegisterInfo &MRI,
 | |
|                                            MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned SrcReg = I.getOperand(1).getReg();
 | |
|   int64_t Index = I.getOperand(2).getImm();
 | |
| 
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT SrcTy = MRI.getType(SrcReg);
 | |
| 
 | |
|   // Meanwile handle vector type only.
 | |
|   if (!DstTy.isVector())
 | |
|     return false;
 | |
| 
 | |
|   if (Index % DstTy.getSizeInBits() != 0)
 | |
|     return false; // Not extract subvector.
 | |
| 
 | |
|   if (Index == 0) {
 | |
|     // Replace by extract subreg copy.
 | |
|     if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
 | |
|       return false;
 | |
| 
 | |
|     I.eraseFromParent();
 | |
|     return true;
 | |
|   }
 | |
| 
 | |
|   bool HasAVX = STI.hasAVX();
 | |
|   bool HasAVX512 = STI.hasAVX512();
 | |
|   bool HasVLX = STI.hasVLX();
 | |
| 
 | |
|   if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
 | |
|     if (HasVLX)
 | |
|       I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
 | |
|     else if (HasAVX)
 | |
|       I.setDesc(TII.get(X86::VEXTRACTF128rr));
 | |
|     else
 | |
|       return false;
 | |
|   } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
 | |
|     if (DstTy.getSizeInBits() == 128)
 | |
|       I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
 | |
|     else if (DstTy.getSizeInBits() == 256)
 | |
|       I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
 | |
|     else
 | |
|       return false;
 | |
|   } else
 | |
|     return false;
 | |
| 
 | |
|   // Convert to X86 VEXTRACT immediate.
 | |
|   Index = Index / DstTy.getSizeInBits();
 | |
|   I.getOperand(2).setImm(Index);
 | |
| 
 | |
|   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
 | |
|                                                MachineInstr &I,
 | |
|                                                MachineRegisterInfo &MRI,
 | |
|                                                MachineFunction &MF) const {
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT SrcTy = MRI.getType(SrcReg);
 | |
|   unsigned SubIdx = X86::NoSubRegister;
 | |
| 
 | |
|   if (!DstTy.isVector() || !SrcTy.isVector())
 | |
|     return false;
 | |
| 
 | |
|   assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
 | |
|          "Incorrect Src/Dst register size");
 | |
| 
 | |
|   if (DstTy.getSizeInBits() == 128)
 | |
|     SubIdx = X86::sub_xmm;
 | |
|   else if (DstTy.getSizeInBits() == 256)
 | |
|     SubIdx = X86::sub_ymm;
 | |
|   else
 | |
|     return false;
 | |
| 
 | |
|   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
 | |
|   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
 | |
| 
 | |
|   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
 | |
| 
 | |
|   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
 | |
|       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
 | |
|     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
 | |
|       .addReg(SrcReg, 0, SubIdx);
 | |
| 
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
 | |
|                                               MachineInstr &I,
 | |
|                                               MachineRegisterInfo &MRI,
 | |
|                                               MachineFunction &MF) const {
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT SrcTy = MRI.getType(SrcReg);
 | |
|   unsigned SubIdx = X86::NoSubRegister;
 | |
| 
 | |
|   // TODO: support scalar types
 | |
|   if (!DstTy.isVector() || !SrcTy.isVector())
 | |
|     return false;
 | |
| 
 | |
|   assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
 | |
|          "Incorrect Src/Dst register size");
 | |
| 
 | |
|   if (SrcTy.getSizeInBits() == 128)
 | |
|     SubIdx = X86::sub_xmm;
 | |
|   else if (SrcTy.getSizeInBits() == 256)
 | |
|     SubIdx = X86::sub_ymm;
 | |
|   else
 | |
|     return false;
 | |
| 
 | |
|   const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
 | |
|   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
 | |
| 
 | |
|   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
 | |
|       !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
 | |
|     LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
 | |
|       .addReg(DstReg, RegState::DefineNoRead, SubIdx)
 | |
|       .addReg(SrcReg);
 | |
| 
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectInsert(MachineInstr &I,
 | |
|                                           MachineRegisterInfo &MRI,
 | |
|                                           MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned SrcReg = I.getOperand(1).getReg();
 | |
|   const unsigned InsertReg = I.getOperand(2).getReg();
 | |
|   int64_t Index = I.getOperand(3).getImm();
 | |
| 
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT InsertRegTy = MRI.getType(InsertReg);
 | |
| 
 | |
|   // Meanwile handle vector type only.
 | |
|   if (!DstTy.isVector())
 | |
|     return false;
 | |
| 
 | |
|   if (Index % InsertRegTy.getSizeInBits() != 0)
 | |
|     return false; // Not insert subvector.
 | |
| 
 | |
|   if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
 | |
|     // Replace by subreg copy.
 | |
|     if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
 | |
|       return false;
 | |
| 
 | |
|     I.eraseFromParent();
 | |
|     return true;
 | |
|   }
 | |
| 
 | |
|   bool HasAVX = STI.hasAVX();
 | |
|   bool HasAVX512 = STI.hasAVX512();
 | |
|   bool HasVLX = STI.hasVLX();
 | |
| 
 | |
|   if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
 | |
|     if (HasVLX)
 | |
|       I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
 | |
|     else if (HasAVX)
 | |
|       I.setDesc(TII.get(X86::VINSERTF128rr));
 | |
|     else
 | |
|       return false;
 | |
|   } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
 | |
|     if (InsertRegTy.getSizeInBits() == 128)
 | |
|       I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
 | |
|     else if (InsertRegTy.getSizeInBits() == 256)
 | |
|       I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
 | |
|     else
 | |
|       return false;
 | |
|   } else
 | |
|     return false;
 | |
| 
 | |
|   // Convert to X86 VINSERT immediate.
 | |
|   Index = Index / InsertRegTy.getSizeInBits();
 | |
| 
 | |
|   I.getOperand(3).setImm(Index);
 | |
| 
 | |
|   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectUnmergeValues(
 | |
|     MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
 | |
|     CodeGenCoverage &CoverageInfo) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   // Split to extracts.
 | |
|   unsigned NumDefs = I.getNumOperands() - 1;
 | |
|   unsigned SrcReg = I.getOperand(NumDefs).getReg();
 | |
|   unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
 | |
| 
 | |
|   for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
 | |
|     MachineInstr &ExtrInst =
 | |
|         *BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                  TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
 | |
|              .addReg(SrcReg)
 | |
|              .addImm(Idx * DefSize);
 | |
| 
 | |
|     if (!select(ExtrInst, CoverageInfo))
 | |
|       return false;
 | |
|   }
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectMergeValues(
 | |
|     MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
 | |
|     CodeGenCoverage &CoverageInfo) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||
 | |
|           I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   // Split to inserts.
 | |
|   unsigned DstReg = I.getOperand(0).getReg();
 | |
|   unsigned SrcReg0 = I.getOperand(1).getReg();
 | |
| 
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const LLT SrcTy = MRI.getType(SrcReg0);
 | |
|   unsigned SrcSize = SrcTy.getSizeInBits();
 | |
| 
 | |
|   const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
 | |
| 
 | |
|   // For the first src use insertSubReg.
 | |
|   unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
 | |
|   MRI.setRegBank(DefReg, RegBank);
 | |
|   if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
 | |
|     return false;
 | |
| 
 | |
|   for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
 | |
|     unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
 | |
|     MRI.setRegBank(Tmp, RegBank);
 | |
| 
 | |
|     MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                                         TII.get(TargetOpcode::G_INSERT), Tmp)
 | |
|                                     .addReg(DefReg)
 | |
|                                     .addReg(I.getOperand(Idx).getReg())
 | |
|                                     .addImm((Idx - 1) * SrcSize);
 | |
| 
 | |
|     DefReg = Tmp;
 | |
| 
 | |
|     if (!select(InsertInst, CoverageInfo))
 | |
|       return false;
 | |
|   }
 | |
| 
 | |
|   MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                                     TII.get(TargetOpcode::COPY), DstReg)
 | |
|                                 .addReg(DefReg);
 | |
| 
 | |
|   if (!select(CopyInst, CoverageInfo))
 | |
|     return false;
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
 | |
|                                               MachineRegisterInfo &MRI,
 | |
|                                               MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
 | |
| 
 | |
|   const unsigned CondReg = I.getOperand(0).getReg();
 | |
|   MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
 | |
| 
 | |
|   MachineInstr &TestInst =
 | |
|       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
 | |
|            .addReg(CondReg)
 | |
|            .addImm(1);
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))
 | |
|       .addMBB(DestMBB).addImm(X86::COND_NE);
 | |
| 
 | |
|   constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::materializeFP(MachineInstr &I,
 | |
|                                            MachineRegisterInfo &MRI,
 | |
|                                            MachineFunction &MF) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   // Can't handle alternate code models yet.
 | |
|   CodeModel::Model CM = TM.getCodeModel();
 | |
|   if (CM != CodeModel::Small && CM != CodeModel::Large)
 | |
|     return false;
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
 | |
|   unsigned Align = DstTy.getSizeInBits();
 | |
|   const DebugLoc &DbgLoc = I.getDebugLoc();
 | |
| 
 | |
|   unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
 | |
| 
 | |
|   // Create the load from the constant pool.
 | |
|   const ConstantFP *CFP = I.getOperand(1).getFPImm();
 | |
|   unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
 | |
|   MachineInstr *LoadInst = nullptr;
 | |
|   unsigned char OpFlag = STI.classifyLocalReference(nullptr);
 | |
| 
 | |
|   if (CM == CodeModel::Large && STI.is64Bit()) {
 | |
|     // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
 | |
|     // they cannot be folded into immediate fields.
 | |
| 
 | |
|     unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
 | |
|     BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
 | |
|         .addConstantPoolIndex(CPI, 0, OpFlag);
 | |
| 
 | |
|     MachineMemOperand *MMO = MF.getMachineMemOperand(
 | |
|         MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
 | |
|         MF.getDataLayout().getPointerSize(), Align);
 | |
| 
 | |
|     LoadInst =
 | |
|         addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
 | |
|                      AddrReg)
 | |
|             .addMemOperand(MMO);
 | |
| 
 | |
|   } else if (CM == CodeModel::Small || !STI.is64Bit()) {
 | |
|     // Handle the case when globals fit in our immediate field.
 | |
|     // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
 | |
| 
 | |
|     // x86-32 PIC requires a PIC base register for constant pools.
 | |
|     unsigned PICBase = 0;
 | |
|     if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
 | |
|       // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
 | |
|       // In DAGISEL the code that initialize it generated by the CGBR pass.
 | |
|       return false; // TODO support the mode.
 | |
|     } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
 | |
|       PICBase = X86::RIP;
 | |
| 
 | |
|     LoadInst = addConstantPoolReference(
 | |
|         BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
 | |
|         OpFlag);
 | |
|   } else
 | |
|     return false;
 | |
| 
 | |
|   constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectImplicitDefOrPHI(
 | |
|     MachineInstr &I, MachineRegisterInfo &MRI) const {
 | |
|   assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
 | |
|           I.getOpcode() == TargetOpcode::G_PHI) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   unsigned DstReg = I.getOperand(0).getReg();
 | |
| 
 | |
|   if (!MRI.getRegClassOrNull(DstReg)) {
 | |
|     const LLT DstTy = MRI.getType(DstReg);
 | |
|     const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
 | |
| 
 | |
|     if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
 | |
|       LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
 | |
|                         << " operand\n");
 | |
|       return false;
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
 | |
|     I.setDesc(TII.get(X86::IMPLICIT_DEF));
 | |
|   else
 | |
|     I.setDesc(TII.get(X86::PHI));
 | |
| 
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| // Currently GlobalIsel TableGen generates patterns for shift imm and shift 1,
 | |
| // but with shiftCount i8. In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
 | |
| // has the same type, so for now only shift i8 can use auto generated
 | |
| // TableGen patterns.
 | |
| bool X86InstructionSelector::selectShift(MachineInstr &I,
 | |
|                                          MachineRegisterInfo &MRI,
 | |
|                                          MachineFunction &MF) const {
 | |
| 
 | |
|   assert((I.getOpcode() == TargetOpcode::G_SHL ||
 | |
|           I.getOpcode() == TargetOpcode::G_ASHR ||
 | |
|           I.getOpcode() == TargetOpcode::G_LSHR) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const LLT DstTy = MRI.getType(DstReg);
 | |
|   const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
 | |
| 
 | |
|   const static struct ShiftEntry {
 | |
|     unsigned SizeInBits;
 | |
|     unsigned OpLSHR;
 | |
|     unsigned OpASHR;
 | |
|     unsigned OpSHL;
 | |
|   } OpTable[] = {
 | |
|       {8, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL},     // i8
 | |
|       {16, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
 | |
|       {32, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
 | |
|       {64, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL}  // i64
 | |
|   };
 | |
| 
 | |
|   if (DstRB.getID() != X86::GPRRegBankID)
 | |
|     return false;
 | |
| 
 | |
|   auto ShiftEntryIt = std::find_if(
 | |
|       std::begin(OpTable), std::end(OpTable), [DstTy](const ShiftEntry &El) {
 | |
|         return El.SizeInBits == DstTy.getSizeInBits();
 | |
|       });
 | |
|   if (ShiftEntryIt == std::end(OpTable))
 | |
|     return false;
 | |
| 
 | |
|   unsigned Opcode = 0;
 | |
|   switch (I.getOpcode()) {
 | |
|   case TargetOpcode::G_SHL:
 | |
|     Opcode = ShiftEntryIt->OpSHL;
 | |
|     break;
 | |
|   case TargetOpcode::G_ASHR:
 | |
|     Opcode = ShiftEntryIt->OpASHR;
 | |
|     break;
 | |
|   case TargetOpcode::G_LSHR:
 | |
|     Opcode = ShiftEntryIt->OpLSHR;
 | |
|     break;
 | |
|   default:
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   unsigned Op0Reg = I.getOperand(1).getReg();
 | |
|   unsigned Op1Reg = I.getOperand(2).getReg();
 | |
| 
 | |
|   assert(MRI.getType(Op1Reg).getSizeInBits() == 8);
 | |
| 
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
 | |
|           X86::CL)
 | |
|     .addReg(Op1Reg);
 | |
| 
 | |
|   MachineInstr &ShiftInst =
 | |
|       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
 | |
|            .addReg(Op0Reg);
 | |
| 
 | |
|   constrainSelectedInstRegOperands(ShiftInst, TII, TRI, RBI);
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectDivRem(MachineInstr &I,
 | |
|                                           MachineRegisterInfo &MRI,
 | |
|                                           MachineFunction &MF) const {
 | |
|   // The implementation of this function is taken from X86FastISel.
 | |
|   assert((I.getOpcode() == TargetOpcode::G_SDIV ||
 | |
|           I.getOpcode() == TargetOpcode::G_SREM ||
 | |
|           I.getOpcode() == TargetOpcode::G_UDIV ||
 | |
|           I.getOpcode() == TargetOpcode::G_UREM) &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   const unsigned DstReg = I.getOperand(0).getReg();
 | |
|   const unsigned Op1Reg = I.getOperand(1).getReg();
 | |
|   const unsigned Op2Reg = I.getOperand(2).getReg();
 | |
| 
 | |
|   const LLT RegTy = MRI.getType(DstReg);
 | |
|   assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&
 | |
|          "Arguments and return value types must match");
 | |
| 
 | |
|   const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);
 | |
|   if (!RegRB || RegRB->getID() != X86::GPRRegBankID)
 | |
|     return false;
 | |
| 
 | |
|   const static unsigned NumTypes = 4; // i8, i16, i32, i64
 | |
|   const static unsigned NumOps = 4;   // SDiv, SRem, UDiv, URem
 | |
|   const static bool S = true;         // IsSigned
 | |
|   const static bool U = false;        // !IsSigned
 | |
|   const static unsigned Copy = TargetOpcode::COPY;
 | |
|   // For the X86 IDIV instruction, in most cases the dividend
 | |
|   // (numerator) must be in a specific register pair highreg:lowreg,
 | |
|   // producing the quotient in lowreg and the remainder in highreg.
 | |
|   // For most data types, to set up the instruction, the dividend is
 | |
|   // copied into lowreg, and lowreg is sign-extended into highreg.  The
 | |
|   // exception is i8, where the dividend is defined as a single register rather
 | |
|   // than a register pair, and we therefore directly sign-extend the dividend
 | |
|   // into lowreg, instead of copying, and ignore the highreg.
 | |
|   const static struct DivRemEntry {
 | |
|     // The following portion depends only on the data type.
 | |
|     unsigned SizeInBits;
 | |
|     unsigned LowInReg;  // low part of the register pair
 | |
|     unsigned HighInReg; // high part of the register pair
 | |
|     // The following portion depends on both the data type and the operation.
 | |
|     struct DivRemResult {
 | |
|       unsigned OpDivRem;        // The specific DIV/IDIV opcode to use.
 | |
|       unsigned OpSignExtend;    // Opcode for sign-extending lowreg into
 | |
|                                 // highreg, or copying a zero into highreg.
 | |
|       unsigned OpCopy;          // Opcode for copying dividend into lowreg, or
 | |
|                                 // zero/sign-extending into lowreg for i8.
 | |
|       unsigned DivRemResultReg; // Register containing the desired result.
 | |
|       bool IsOpSigned;          // Whether to use signed or unsigned form.
 | |
|     } ResultTable[NumOps];
 | |
|   } OpTable[NumTypes] = {
 | |
|       {8,
 | |
|        X86::AX,
 | |
|        0,
 | |
|        {
 | |
|            {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv
 | |
|            {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem
 | |
|            {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U},  // UDiv
 | |
|            {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U},  // URem
 | |
|        }},                                                // i8
 | |
|       {16,
 | |
|        X86::AX,
 | |
|        X86::DX,
 | |
|        {
 | |
|            {X86::IDIV16r, X86::CWD, Copy, X86::AX, S},    // SDiv
 | |
|            {X86::IDIV16r, X86::CWD, Copy, X86::DX, S},    // SRem
 | |
|            {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv
 | |
|            {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem
 | |
|        }},                                                // i16
 | |
|       {32,
 | |
|        X86::EAX,
 | |
|        X86::EDX,
 | |
|        {
 | |
|            {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S},    // SDiv
 | |
|            {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S},    // SRem
 | |
|            {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv
 | |
|            {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem
 | |
|        }},                                                 // i32
 | |
|       {64,
 | |
|        X86::RAX,
 | |
|        X86::RDX,
 | |
|        {
 | |
|            {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S},    // SDiv
 | |
|            {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S},    // SRem
 | |
|            {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv
 | |
|            {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem
 | |
|        }},                                                 // i64
 | |
|   };
 | |
| 
 | |
|   auto OpEntryIt = std::find_if(std::begin(OpTable), std::end(OpTable),
 | |
|                                 [RegTy](const DivRemEntry &El) {
 | |
|                                   return El.SizeInBits == RegTy.getSizeInBits();
 | |
|                                 });
 | |
|   if (OpEntryIt == std::end(OpTable))
 | |
|     return false;
 | |
| 
 | |
|   unsigned OpIndex;
 | |
|   switch (I.getOpcode()) {
 | |
|   default:
 | |
|     llvm_unreachable("Unexpected div/rem opcode");
 | |
|   case TargetOpcode::G_SDIV:
 | |
|     OpIndex = 0;
 | |
|     break;
 | |
|   case TargetOpcode::G_SREM:
 | |
|     OpIndex = 1;
 | |
|     break;
 | |
|   case TargetOpcode::G_UDIV:
 | |
|     OpIndex = 2;
 | |
|     break;
 | |
|   case TargetOpcode::G_UREM:
 | |
|     OpIndex = 3;
 | |
|     break;
 | |
|   }
 | |
| 
 | |
|   const DivRemEntry &TypeEntry = *OpEntryIt;
 | |
|   const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
 | |
| 
 | |
|   const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);
 | |
|   if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) ||
 | |
|       !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) ||
 | |
|       !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
 | |
|     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
 | |
|                       << " operand\n");
 | |
|     return false;
 | |
|   }
 | |
| 
 | |
|   // Move op1 into low-order input register.
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),
 | |
|           TypeEntry.LowInReg)
 | |
|       .addReg(Op1Reg);
 | |
|   // Zero-extend or sign-extend into high-order input register.
 | |
|   if (OpEntry.OpSignExtend) {
 | |
|     if (OpEntry.IsOpSigned)
 | |
|       BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|               TII.get(OpEntry.OpSignExtend));
 | |
|     else {
 | |
|       unsigned Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);
 | |
|       BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),
 | |
|               Zero32);
 | |
| 
 | |
|       // Copy the zero into the appropriate sub/super/identical physical
 | |
|       // register. Unfortunately the operations needed are not uniform enough
 | |
|       // to fit neatly into the table above.
 | |
|       if (RegTy.getSizeInBits() == 16) {
 | |
|         BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
 | |
|                 TypeEntry.HighInReg)
 | |
|             .addReg(Zero32, 0, X86::sub_16bit);
 | |
|       } else if (RegTy.getSizeInBits() == 32) {
 | |
|         BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy),
 | |
|                 TypeEntry.HighInReg)
 | |
|             .addReg(Zero32);
 | |
|       } else if (RegTy.getSizeInBits() == 64) {
 | |
|         BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|                 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
 | |
|             .addImm(0)
 | |
|             .addReg(Zero32)
 | |
|             .addImm(X86::sub_32bit);
 | |
|       }
 | |
|     }
 | |
|   }
 | |
|   // Generate the DIV/IDIV instruction.
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpDivRem))
 | |
|       .addReg(Op2Reg);
 | |
|   // For i8 remainder, we can't reference ah directly, as we'll end
 | |
|   // up with bogus copies like %r9b = COPY %ah. Reference ax
 | |
|   // instead to prevent ah references in a rex instruction.
 | |
|   //
 | |
|   // The current assumption of the fast register allocator is that isel
 | |
|   // won't generate explicit references to the GR8_NOREX registers. If
 | |
|   // the allocator and/or the backend get enhanced to be more robust in
 | |
|   // that regard, this can be, and should be, removed.
 | |
|   if ((I.getOpcode() == Instruction::SRem ||
 | |
|        I.getOpcode() == Instruction::URem) &&
 | |
|       OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) {
 | |
|     unsigned SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
 | |
|     unsigned ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);
 | |
|     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)
 | |
|         .addReg(X86::AX);
 | |
| 
 | |
|     // Shift AX right by 8 bits instead of using AH.
 | |
|     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),
 | |
|             ResultSuperReg)
 | |
|         .addReg(SourceSuperReg)
 | |
|         .addImm(8);
 | |
| 
 | |
|     // Now reference the 8-bit subreg of the result.
 | |
|     BuildMI(*I.getParent(), I, I.getDebugLoc(),
 | |
|             TII.get(TargetOpcode::SUBREG_TO_REG))
 | |
|         .addDef(DstReg)
 | |
|         .addImm(0)
 | |
|         .addReg(ResultSuperReg)
 | |
|         .addImm(X86::sub_8bit);
 | |
|   } else {
 | |
|     BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
 | |
|             DstReg)
 | |
|         .addReg(OpEntry.DivRemResultReg);
 | |
|   }
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| bool X86InstructionSelector::selectIntrinsicWSideEffects(
 | |
|     MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const {
 | |
| 
 | |
|   assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
 | |
|          "unexpected instruction");
 | |
| 
 | |
|   if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
 | |
|     return false;
 | |
| 
 | |
|   BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP));
 | |
| 
 | |
|   I.eraseFromParent();
 | |
|   return true;
 | |
| }
 | |
| 
 | |
| InstructionSelector *
 | |
| llvm::createX86InstructionSelector(const X86TargetMachine &TM,
 | |
|                                    X86Subtarget &Subtarget,
 | |
|                                    X86RegisterBankInfo &RBI) {
 | |
|   return new X86InstructionSelector(TM, Subtarget, RBI);
 | |
| }
 |