3817 lines
		
	
	
		
			127 KiB
		
	
	
	
		
			C++
		
	
	
	
			
		
		
	
	
			3817 lines
		
	
	
		
			127 KiB
		
	
	
	
		
			C++
		
	
	
	
//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
 | 
						|
//
 | 
						|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 | 
						|
// See https://llvm.org/LICENSE.txt for license information.
 | 
						|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 | 
						|
//
 | 
						|
//===----------------------------------------------------------------------===//
 | 
						|
/// \file
 | 
						|
/// This file implements the targeting of the InstructionSelector class for
 | 
						|
/// AMDGPU.
 | 
						|
/// \todo This should be generated by TableGen.
 | 
						|
//===----------------------------------------------------------------------===//
 | 
						|
 | 
						|
#include "AMDGPUInstructionSelector.h"
 | 
						|
#include "AMDGPUInstrInfo.h"
 | 
						|
#include "AMDGPUGlobalISelUtils.h"
 | 
						|
#include "AMDGPURegisterBankInfo.h"
 | 
						|
#include "AMDGPUSubtarget.h"
 | 
						|
#include "AMDGPUTargetMachine.h"
 | 
						|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
 | 
						|
#include "SIMachineFunctionInfo.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/Utils.h"
 | 
						|
#include "llvm/CodeGen/MachineBasicBlock.h"
 | 
						|
#include "llvm/CodeGen/MachineFunction.h"
 | 
						|
#include "llvm/CodeGen/MachineInstr.h"
 | 
						|
#include "llvm/CodeGen/MachineInstrBuilder.h"
 | 
						|
#include "llvm/CodeGen/MachineRegisterInfo.h"
 | 
						|
#include "llvm/IR/Type.h"
 | 
						|
#include "llvm/Support/Debug.h"
 | 
						|
#include "llvm/Support/raw_ostream.h"
 | 
						|
 | 
						|
#define DEBUG_TYPE "amdgpu-isel"
 | 
						|
 | 
						|
using namespace llvm;
 | 
						|
using namespace MIPatternMatch;
 | 
						|
 | 
						|
static cl::opt<bool> AllowRiskySelect(
 | 
						|
  "amdgpu-global-isel-risky-select",
 | 
						|
  cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
 | 
						|
  cl::init(false),
 | 
						|
  cl::ReallyHidden);
 | 
						|
 | 
						|
#define GET_GLOBALISEL_IMPL
 | 
						|
#define AMDGPUSubtarget GCNSubtarget
 | 
						|
#include "AMDGPUGenGlobalISel.inc"
 | 
						|
#undef GET_GLOBALISEL_IMPL
 | 
						|
#undef AMDGPUSubtarget
 | 
						|
 | 
						|
AMDGPUInstructionSelector::AMDGPUInstructionSelector(
 | 
						|
    const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
 | 
						|
    const AMDGPUTargetMachine &TM)
 | 
						|
    : InstructionSelector(), TII(*STI.getInstrInfo()),
 | 
						|
      TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
 | 
						|
      STI(STI),
 | 
						|
      EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
 | 
						|
#define GET_GLOBALISEL_PREDICATES_INIT
 | 
						|
#include "AMDGPUGenGlobalISel.inc"
 | 
						|
#undef GET_GLOBALISEL_PREDICATES_INIT
 | 
						|
#define GET_GLOBALISEL_TEMPORARIES_INIT
 | 
						|
#include "AMDGPUGenGlobalISel.inc"
 | 
						|
#undef GET_GLOBALISEL_TEMPORARIES_INIT
 | 
						|
{
 | 
						|
}
 | 
						|
 | 
						|
const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
 | 
						|
                                        CodeGenCoverage &CoverageInfo) {
 | 
						|
  MRI = &MF.getRegInfo();
 | 
						|
  InstructionSelector::setupMF(MF, KB, CoverageInfo);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::isVCC(Register Reg,
 | 
						|
                                      const MachineRegisterInfo &MRI) const {
 | 
						|
  if (Register::isPhysicalRegister(Reg))
 | 
						|
    return Reg == TRI.getVCC();
 | 
						|
 | 
						|
  auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
 | 
						|
  const TargetRegisterClass *RC =
 | 
						|
      RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
 | 
						|
  if (RC) {
 | 
						|
    const LLT Ty = MRI.getType(Reg);
 | 
						|
    return RC->hasSuperClassEq(TRI.getBoolRC()) &&
 | 
						|
           Ty.isValid() && Ty.getSizeInBits() == 1;
 | 
						|
  }
 | 
						|
 | 
						|
  const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
 | 
						|
  return RB->getID() == AMDGPU::VCCRegBankID;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
 | 
						|
                                                        unsigned NewOpc) const {
 | 
						|
  MI.setDesc(TII.get(NewOpc));
 | 
						|
  MI.RemoveOperand(1); // Remove intrinsic ID.
 | 
						|
  MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
 | 
						|
 | 
						|
  MachineOperand &Dst = MI.getOperand(0);
 | 
						|
  MachineOperand &Src = MI.getOperand(1);
 | 
						|
 | 
						|
  // TODO: This should be legalized to s32 if needed
 | 
						|
  if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
 | 
						|
    return false;
 | 
						|
 | 
						|
  const TargetRegisterClass *DstRC
 | 
						|
    = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
 | 
						|
  const TargetRegisterClass *SrcRC
 | 
						|
    = TRI.getConstrainedRegClassForOperand(Src, *MRI);
 | 
						|
  if (!DstRC || DstRC != SrcRC)
 | 
						|
    return false;
 | 
						|
 | 
						|
  return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
 | 
						|
         RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  I.setDesc(TII.get(TargetOpcode::COPY));
 | 
						|
 | 
						|
  const MachineOperand &Src = I.getOperand(1);
 | 
						|
  MachineOperand &Dst = I.getOperand(0);
 | 
						|
  Register DstReg = Dst.getReg();
 | 
						|
  Register SrcReg = Src.getReg();
 | 
						|
 | 
						|
  if (isVCC(DstReg, *MRI)) {
 | 
						|
    if (SrcReg == AMDGPU::SCC) {
 | 
						|
      const TargetRegisterClass *RC
 | 
						|
        = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
 | 
						|
      if (!RC)
 | 
						|
        return true;
 | 
						|
      return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
 | 
						|
    }
 | 
						|
 | 
						|
    if (!isVCC(SrcReg, *MRI)) {
 | 
						|
      // TODO: Should probably leave the copy and let copyPhysReg expand it.
 | 
						|
      if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
 | 
						|
        return false;
 | 
						|
 | 
						|
      const TargetRegisterClass *SrcRC
 | 
						|
        = TRI.getConstrainedRegClassForOperand(Src, *MRI);
 | 
						|
 | 
						|
      Register MaskedReg = MRI->createVirtualRegister(SrcRC);
 | 
						|
 | 
						|
      // We can't trust the high bits at this point, so clear them.
 | 
						|
 | 
						|
      // TODO: Skip masking high bits if def is known boolean.
 | 
						|
 | 
						|
      unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
 | 
						|
        AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
 | 
						|
      BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
 | 
						|
        .addImm(1)
 | 
						|
        .addReg(SrcReg);
 | 
						|
      BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
 | 
						|
        .addImm(0)
 | 
						|
        .addReg(MaskedReg);
 | 
						|
 | 
						|
      if (!MRI->getRegClassOrNull(SrcReg))
 | 
						|
        MRI->setRegClass(SrcReg, SrcRC);
 | 
						|
      I.eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    const TargetRegisterClass *RC =
 | 
						|
      TRI.getConstrainedRegClassForOperand(Dst, *MRI);
 | 
						|
    if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
 | 
						|
      return false;
 | 
						|
 | 
						|
    // Don't constrain the source register to a class so the def instruction
 | 
						|
    // handles it (unless it's undef).
 | 
						|
    //
 | 
						|
    // FIXME: This is a hack. When selecting the def, we neeed to know
 | 
						|
    // specifically know that the result is VCCRegBank, and not just an SGPR
 | 
						|
    // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
 | 
						|
    if (Src.isUndef()) {
 | 
						|
      const TargetRegisterClass *SrcRC =
 | 
						|
        TRI.getConstrainedRegClassForOperand(Src, *MRI);
 | 
						|
      if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
 | 
						|
        return false;
 | 
						|
    }
 | 
						|
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  for (const MachineOperand &MO : I.operands()) {
 | 
						|
    if (Register::isPhysicalRegister(MO.getReg()))
 | 
						|
      continue;
 | 
						|
 | 
						|
    const TargetRegisterClass *RC =
 | 
						|
            TRI.getConstrainedRegClassForOperand(MO, *MRI);
 | 
						|
    if (!RC)
 | 
						|
      continue;
 | 
						|
    RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
 | 
						|
  }
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
 | 
						|
  const Register DefReg = I.getOperand(0).getReg();
 | 
						|
  const LLT DefTy = MRI->getType(DefReg);
 | 
						|
  if (DefTy == LLT::scalar(1)) {
 | 
						|
    if (!AllowRiskySelect) {
 | 
						|
      LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
 | 
						|
      return false;
 | 
						|
    }
 | 
						|
 | 
						|
    LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
 | 
						|
  }
 | 
						|
 | 
						|
  // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
 | 
						|
 | 
						|
  const RegClassOrRegBank &RegClassOrBank =
 | 
						|
    MRI->getRegClassOrRegBank(DefReg);
 | 
						|
 | 
						|
  const TargetRegisterClass *DefRC
 | 
						|
    = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
 | 
						|
  if (!DefRC) {
 | 
						|
    if (!DefTy.isValid()) {
 | 
						|
      LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
 | 
						|
      return false;
 | 
						|
    }
 | 
						|
 | 
						|
    const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
 | 
						|
    DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
 | 
						|
    if (!DefRC) {
 | 
						|
      LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
 | 
						|
      return false;
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // TODO: Verify that all registers have the same bank
 | 
						|
  I.setDesc(TII.get(TargetOpcode::PHI));
 | 
						|
  return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
 | 
						|
}
 | 
						|
 | 
						|
MachineOperand
 | 
						|
AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
 | 
						|
                                           const TargetRegisterClass &SubRC,
 | 
						|
                                           unsigned SubIdx) const {
 | 
						|
 | 
						|
  MachineInstr *MI = MO.getParent();
 | 
						|
  MachineBasicBlock *BB = MO.getParent()->getParent();
 | 
						|
  Register DstReg = MRI->createVirtualRegister(&SubRC);
 | 
						|
 | 
						|
  if (MO.isReg()) {
 | 
						|
    unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
 | 
						|
    Register Reg = MO.getReg();
 | 
						|
    BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
 | 
						|
            .addReg(Reg, 0, ComposedSubIdx);
 | 
						|
 | 
						|
    return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
 | 
						|
                                     MO.isKill(), MO.isDead(), MO.isUndef(),
 | 
						|
                                     MO.isEarlyClobber(), 0, MO.isDebug(),
 | 
						|
                                     MO.isInternalRead());
 | 
						|
  }
 | 
						|
 | 
						|
  assert(MO.isImm());
 | 
						|
 | 
						|
  APInt Imm(64, MO.getImm());
 | 
						|
 | 
						|
  switch (SubIdx) {
 | 
						|
  default:
 | 
						|
    llvm_unreachable("do not know to split immediate with this sub index.");
 | 
						|
  case AMDGPU::sub0:
 | 
						|
    return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
 | 
						|
  case AMDGPU::sub1:
 | 
						|
    return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
 | 
						|
  switch (Opc) {
 | 
						|
  case AMDGPU::G_AND:
 | 
						|
    return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
 | 
						|
  case AMDGPU::G_OR:
 | 
						|
    return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
 | 
						|
  case AMDGPU::G_XOR:
 | 
						|
    return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
 | 
						|
  default:
 | 
						|
    llvm_unreachable("not a bit op");
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
 | 
						|
  MachineOperand &Dst = I.getOperand(0);
 | 
						|
  MachineOperand &Src0 = I.getOperand(1);
 | 
						|
  MachineOperand &Src1 = I.getOperand(2);
 | 
						|
  Register DstReg = Dst.getReg();
 | 
						|
  unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
 | 
						|
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  if (DstRB->getID() == AMDGPU::VCCRegBankID) {
 | 
						|
    const TargetRegisterClass *RC = TRI.getBoolRC();
 | 
						|
    unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
 | 
						|
                                           RC == &AMDGPU::SReg_64RegClass);
 | 
						|
    I.setDesc(TII.get(InstOpc));
 | 
						|
    // Dead implicit-def of scc
 | 
						|
    I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
 | 
						|
                                           true, // isImp
 | 
						|
                                           false, // isKill
 | 
						|
                                           true)); // isDead
 | 
						|
 | 
						|
    // FIXME: Hack to avoid turning the register bank into a register class.
 | 
						|
    // The selector for G_ICMP relies on seeing the register bank for the result
 | 
						|
    // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
 | 
						|
    // be ambiguous whether it's a scalar or vector bool.
 | 
						|
    if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
 | 
						|
      MRI->setRegClass(Src0.getReg(), RC);
 | 
						|
    if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
 | 
						|
      MRI->setRegClass(Src1.getReg(), RC);
 | 
						|
 | 
						|
    return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
 | 
						|
  }
 | 
						|
 | 
						|
  // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
 | 
						|
  // the result?
 | 
						|
  if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
 | 
						|
    unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
 | 
						|
    I.setDesc(TII.get(InstOpc));
 | 
						|
    // Dead implicit-def of scc
 | 
						|
    I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
 | 
						|
                                           true, // isImp
 | 
						|
                                           false, // isKill
 | 
						|
                                           true)); // isDead
 | 
						|
    return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | 
						|
  }
 | 
						|
 | 
						|
  return false;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  MachineFunction *MF = BB->getParent();
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  LLT Ty = MRI->getType(DstReg);
 | 
						|
  if (Ty.isVector())
 | 
						|
    return false;
 | 
						|
 | 
						|
  unsigned Size = Ty.getSizeInBits();
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
 | 
						|
  const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
 | 
						|
 | 
						|
  if (Size == 32) {
 | 
						|
    if (IsSALU) {
 | 
						|
      const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
 | 
						|
      MachineInstr *Add =
 | 
						|
        BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
 | 
						|
        .add(I.getOperand(1))
 | 
						|
        .add(I.getOperand(2));
 | 
						|
      I.eraseFromParent();
 | 
						|
      return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
 | 
						|
    }
 | 
						|
 | 
						|
    if (STI.hasAddNoCarry()) {
 | 
						|
      const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
 | 
						|
      I.setDesc(TII.get(Opc));
 | 
						|
      I.addOperand(*MF, MachineOperand::CreateImm(0));
 | 
						|
      I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
 | 
						|
      return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | 
						|
    }
 | 
						|
 | 
						|
    const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
 | 
						|
 | 
						|
    Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
 | 
						|
    MachineInstr *Add
 | 
						|
      = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
 | 
						|
      .addDef(UnusedCarry, RegState::Dead)
 | 
						|
      .add(I.getOperand(1))
 | 
						|
      .add(I.getOperand(2))
 | 
						|
      .addImm(0);
 | 
						|
    I.eraseFromParent();
 | 
						|
    return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
 | 
						|
  }
 | 
						|
 | 
						|
  assert(!Sub && "illegal sub should not reach here");
 | 
						|
 | 
						|
  const TargetRegisterClass &RC
 | 
						|
    = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
 | 
						|
  const TargetRegisterClass &HalfRC
 | 
						|
    = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
 | 
						|
 | 
						|
  MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
 | 
						|
  MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
 | 
						|
  MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
 | 
						|
  MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
 | 
						|
 | 
						|
  Register DstLo = MRI->createVirtualRegister(&HalfRC);
 | 
						|
  Register DstHi = MRI->createVirtualRegister(&HalfRC);
 | 
						|
 | 
						|
  if (IsSALU) {
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
 | 
						|
      .add(Lo1)
 | 
						|
      .add(Lo2);
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
 | 
						|
      .add(Hi1)
 | 
						|
      .add(Hi2);
 | 
						|
  } else {
 | 
						|
    const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
 | 
						|
    Register CarryReg = MRI->createVirtualRegister(CarryRC);
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
 | 
						|
      .addDef(CarryReg)
 | 
						|
      .add(Lo1)
 | 
						|
      .add(Lo2)
 | 
						|
      .addImm(0);
 | 
						|
    MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
 | 
						|
      .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
 | 
						|
      .add(Hi1)
 | 
						|
      .add(Hi2)
 | 
						|
      .addReg(CarryReg, RegState::Kill)
 | 
						|
      .addImm(0);
 | 
						|
 | 
						|
    if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
 | 
						|
      return false;
 | 
						|
  }
 | 
						|
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
 | 
						|
    .addReg(DstLo)
 | 
						|
    .addImm(AMDGPU::sub0)
 | 
						|
    .addReg(DstHi)
 | 
						|
    .addImm(AMDGPU::sub1);
 | 
						|
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  I.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
 | 
						|
  MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  MachineFunction *MF = BB->getParent();
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  Register Dst0Reg = I.getOperand(0).getReg();
 | 
						|
  Register Dst1Reg = I.getOperand(1).getReg();
 | 
						|
  const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
 | 
						|
                     I.getOpcode() == AMDGPU::G_UADDE;
 | 
						|
  const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
 | 
						|
                          I.getOpcode() == AMDGPU::G_USUBE;
 | 
						|
 | 
						|
  if (isVCC(Dst1Reg, *MRI)) {
 | 
						|
      // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
 | 
						|
      // carry out despite the _i32 name. These were renamed in VI to _U32.
 | 
						|
      // FIXME: We should probably rename the opcodes here.
 | 
						|
    unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
 | 
						|
    unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
 | 
						|
    I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
 | 
						|
    I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
 | 
						|
    I.addOperand(*MF, MachineOperand::CreateImm(0));
 | 
						|
    return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | 
						|
  }
 | 
						|
 | 
						|
  Register Src0Reg = I.getOperand(2).getReg();
 | 
						|
  Register Src1Reg = I.getOperand(3).getReg();
 | 
						|
 | 
						|
  if (HasCarryIn) {
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
 | 
						|
      .addReg(I.getOperand(4).getReg());
 | 
						|
  }
 | 
						|
 | 
						|
  unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
 | 
						|
  unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
 | 
						|
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
 | 
						|
    .add(I.getOperand(2))
 | 
						|
    .add(I.getOperand(3));
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
 | 
						|
    .addReg(AMDGPU::SCC);
 | 
						|
 | 
						|
  if (!MRI->getRegClassOrNull(Dst1Reg))
 | 
						|
    MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (HasCarryIn &&
 | 
						|
      !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
 | 
						|
                                    AMDGPU::SReg_32RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  I.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// TODO: We should probably legalize these to only using 32-bit results.
 | 
						|
bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  Register SrcReg = I.getOperand(1).getReg();
 | 
						|
  LLT DstTy = MRI->getType(DstReg);
 | 
						|
  LLT SrcTy = MRI->getType(SrcReg);
 | 
						|
  const unsigned SrcSize = SrcTy.getSizeInBits();
 | 
						|
  unsigned DstSize = DstTy.getSizeInBits();
 | 
						|
 | 
						|
  // TODO: Should handle any multiple of 32 offset.
 | 
						|
  unsigned Offset = I.getOperand(2).getImm();
 | 
						|
  if (Offset % 32 != 0 || DstSize > 128)
 | 
						|
    return false;
 | 
						|
 | 
						|
  // 16-bit operations really use 32-bit registers.
 | 
						|
  // FIXME: Probably should not allow 16-bit G_EXTRACT results.
 | 
						|
  if (DstSize == 16)
 | 
						|
    DstSize = 32;
 | 
						|
 | 
						|
  const TargetRegisterClass *DstRC =
 | 
						|
    TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
 | 
						|
  if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
 | 
						|
  const TargetRegisterClass *SrcRC =
 | 
						|
    TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
 | 
						|
  if (!SrcRC)
 | 
						|
    return false;
 | 
						|
  unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
 | 
						|
                                                         DstSize / 32);
 | 
						|
  SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
 | 
						|
  if (!SrcRC)
 | 
						|
    return false;
 | 
						|
 | 
						|
  SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
 | 
						|
                                    *SrcRC, I.getOperand(1));
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
 | 
						|
    .addReg(SrcReg, 0, SubReg);
 | 
						|
 | 
						|
  I.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  LLT DstTy = MRI->getType(DstReg);
 | 
						|
  LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
 | 
						|
 | 
						|
  const unsigned SrcSize = SrcTy.getSizeInBits();
 | 
						|
  if (SrcSize < 32)
 | 
						|
    return selectImpl(MI, *CoverageInfo);
 | 
						|
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  const unsigned DstSize = DstTy.getSizeInBits();
 | 
						|
  const TargetRegisterClass *DstRC =
 | 
						|
    TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
 | 
						|
  if (!DstRC)
 | 
						|
    return false;
 | 
						|
 | 
						|
  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
 | 
						|
  MachineInstrBuilder MIB =
 | 
						|
    BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
 | 
						|
  for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
 | 
						|
    MachineOperand &Src = MI.getOperand(I + 1);
 | 
						|
    MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
 | 
						|
    MIB.addImm(SubRegs[I]);
 | 
						|
 | 
						|
    const TargetRegisterClass *SrcRC
 | 
						|
      = TRI.getConstrainedRegClassForOperand(Src, *MRI);
 | 
						|
    if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
 | 
						|
      return false;
 | 
						|
  }
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
  const int NumDst = MI.getNumOperands() - 1;
 | 
						|
 | 
						|
  MachineOperand &Src = MI.getOperand(NumDst);
 | 
						|
 | 
						|
  Register SrcReg = Src.getReg();
 | 
						|
  Register DstReg0 = MI.getOperand(0).getReg();
 | 
						|
  LLT DstTy = MRI->getType(DstReg0);
 | 
						|
  LLT SrcTy = MRI->getType(SrcReg);
 | 
						|
 | 
						|
  const unsigned DstSize = DstTy.getSizeInBits();
 | 
						|
  const unsigned SrcSize = SrcTy.getSizeInBits();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
 | 
						|
 | 
						|
  const TargetRegisterClass *SrcRC =
 | 
						|
    TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
 | 
						|
  if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  const unsigned SrcFlags = getUndefRegState(Src.isUndef());
 | 
						|
 | 
						|
  // Note we could have mixed SGPR and VGPR destination banks for an SGPR
 | 
						|
  // source, and this relies on the fact that the same subregister indices are
 | 
						|
  // used for both.
 | 
						|
  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
 | 
						|
  for (int I = 0, E = NumDst; I != E; ++I) {
 | 
						|
    MachineOperand &Dst = MI.getOperand(I);
 | 
						|
    BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
 | 
						|
      .addReg(SrcReg, SrcFlags, SubRegs[I]);
 | 
						|
 | 
						|
    const TargetRegisterClass *DstRC =
 | 
						|
      TRI.getConstrainedRegClassForOperand(Dst, *MRI);
 | 
						|
    if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
 | 
						|
      return false;
 | 
						|
  }
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static bool isZero(Register Reg, const MachineRegisterInfo &MRI) {
 | 
						|
  int64_t Val;
 | 
						|
  return mi_match(Reg, MRI, m_ICst(Val)) && Val == 0;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
 | 
						|
  MachineInstr &MI) const {
 | 
						|
  if (selectImpl(MI, *CoverageInfo))
 | 
						|
    return true;
 | 
						|
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  const LLT V2S16 = LLT::vector(2, 16);
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  if (MRI->getType(Dst) != V2S16)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
 | 
						|
  if (DstBank->getID() != AMDGPU::SGPRRegBankID)
 | 
						|
    return false;
 | 
						|
 | 
						|
  Register Src0 = MI.getOperand(1).getReg();
 | 
						|
  Register Src1 = MI.getOperand(2).getReg();
 | 
						|
  if (MRI->getType(Src0) != S32)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
 | 
						|
  // TODO: This should probably be a combine somewhere
 | 
						|
  // (build_vector_trunc $src0, undef -> copy $src0
 | 
						|
  MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
 | 
						|
  if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
 | 
						|
    MI.setDesc(TII.get(AMDGPU::COPY));
 | 
						|
    MI.RemoveOperand(2);
 | 
						|
    return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
 | 
						|
           RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
 | 
						|
  }
 | 
						|
 | 
						|
  Register ShiftSrc0;
 | 
						|
  Register ShiftSrc1;
 | 
						|
  int64_t ShiftAmt;
 | 
						|
 | 
						|
  // With multiple uses of the shift, this will duplicate the shift and
 | 
						|
  // increase register pressure.
 | 
						|
  //
 | 
						|
  // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
 | 
						|
  //  => (S_PACK_HH_B32_B16 $src0, $src1)
 | 
						|
  // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
 | 
						|
  //  => (S_PACK_LH_B32_B16 $src0, $src1)
 | 
						|
  // (build_vector_trunc $src0, $src1)
 | 
						|
  //  => (S_PACK_LL_B32_B16 $src0, $src1)
 | 
						|
 | 
						|
  // FIXME: This is an inconvenient way to check a specific value
 | 
						|
  bool Shift0 = mi_match(
 | 
						|
    Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
 | 
						|
    ShiftAmt == 16;
 | 
						|
 | 
						|
  bool Shift1 = mi_match(
 | 
						|
    Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
 | 
						|
    ShiftAmt == 16;
 | 
						|
 | 
						|
  unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
 | 
						|
  if (Shift0 && Shift1) {
 | 
						|
    Opc = AMDGPU::S_PACK_HH_B32_B16;
 | 
						|
    MI.getOperand(1).setReg(ShiftSrc0);
 | 
						|
    MI.getOperand(2).setReg(ShiftSrc1);
 | 
						|
  } else if (Shift1) {
 | 
						|
    Opc = AMDGPU::S_PACK_LH_B32_B16;
 | 
						|
    MI.getOperand(2).setReg(ShiftSrc1);
 | 
						|
  } else if (Shift0 && isZero(Src1, *MRI)) {
 | 
						|
    // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
 | 
						|
    auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
 | 
						|
      .addReg(ShiftSrc0)
 | 
						|
      .addImm(16);
 | 
						|
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
 | 
						|
  }
 | 
						|
 | 
						|
  MI.setDesc(TII.get(Opc));
 | 
						|
  return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
 | 
						|
  return selectG_ADD_SUB(I);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
 | 
						|
  const MachineOperand &MO = I.getOperand(0);
 | 
						|
 | 
						|
  // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
 | 
						|
  // regbank check here is to know why getConstrainedRegClassForOperand failed.
 | 
						|
  const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
 | 
						|
  if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
 | 
						|
      (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
 | 
						|
    I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  return false;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  Register Src0Reg = I.getOperand(1).getReg();
 | 
						|
  Register Src1Reg = I.getOperand(2).getReg();
 | 
						|
  LLT Src1Ty = MRI->getType(Src1Reg);
 | 
						|
 | 
						|
  unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
 | 
						|
  unsigned InsSize = Src1Ty.getSizeInBits();
 | 
						|
 | 
						|
  int64_t Offset = I.getOperand(3).getImm();
 | 
						|
 | 
						|
  // FIXME: These cases should have been illegal and unnecessary to check here.
 | 
						|
  if (Offset % 32 != 0 || InsSize % 32 != 0)
 | 
						|
    return false;
 | 
						|
 | 
						|
  unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
 | 
						|
  if (SubReg == AMDGPU::NoSubRegister)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  const TargetRegisterClass *DstRC =
 | 
						|
    TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
 | 
						|
  if (!DstRC)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
 | 
						|
  const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
 | 
						|
  const TargetRegisterClass *Src0RC =
 | 
						|
    TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
 | 
						|
  const TargetRegisterClass *Src1RC =
 | 
						|
    TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
 | 
						|
 | 
						|
  // Deal with weird cases where the class only partially supports the subreg
 | 
						|
  // index.
 | 
						|
  Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
 | 
						|
  if (!Src0RC || !Src1RC)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
 | 
						|
    .addReg(Src0Reg)
 | 
						|
    .addReg(Src1Reg)
 | 
						|
    .addImm(SubReg);
 | 
						|
 | 
						|
  I.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
 | 
						|
  if (STI.getLDSBankCount() != 16)
 | 
						|
    return selectImpl(MI, *CoverageInfo);
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src0 = MI.getOperand(2).getReg();
 | 
						|
  Register M0Val = MI.getOperand(6).getReg();
 | 
						|
  if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  // This requires 2 instructions. It is possible to write a pattern to support
 | 
						|
  // this, but the generated isel emitter doesn't correctly deal with multiple
 | 
						|
  // output instructions using the same physical register input. The copy to m0
 | 
						|
  // is incorrectly placed before the second instruction.
 | 
						|
  //
 | 
						|
  // TODO: Match source modifiers.
 | 
						|
 | 
						|
  Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  MachineBasicBlock *MBB = MI.getParent();
 | 
						|
 | 
						|
  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
 | 
						|
    .addReg(M0Val);
 | 
						|
  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
 | 
						|
    .addImm(2)
 | 
						|
    .addImm(MI.getOperand(4).getImm())  // $attr
 | 
						|
    .addImm(MI.getOperand(3).getImm()); // $attrchan
 | 
						|
 | 
						|
  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
 | 
						|
    .addImm(0)                          // $src0_modifiers
 | 
						|
    .addReg(Src0)                       // $src0
 | 
						|
    .addImm(MI.getOperand(4).getImm())  // $attr
 | 
						|
    .addImm(MI.getOperand(3).getImm())  // $attrchan
 | 
						|
    .addImm(0)                          // $src2_modifiers
 | 
						|
    .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
 | 
						|
    .addImm(MI.getOperand(5).getImm())  // $high
 | 
						|
    .addImm(0)                          // $clamp
 | 
						|
    .addImm(0);                         // $omod
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// We need to handle this here because tablegen doesn't support matching
 | 
						|
// instructions with multiple outputs.
 | 
						|
bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
 | 
						|
  Register Dst0 = MI.getOperand(0).getReg();
 | 
						|
  Register Dst1 = MI.getOperand(1).getReg();
 | 
						|
 | 
						|
  LLT Ty = MRI->getType(Dst0);
 | 
						|
  unsigned Opc;
 | 
						|
  if (Ty == LLT::scalar(32))
 | 
						|
    Opc = AMDGPU::V_DIV_SCALE_F32;
 | 
						|
  else if (Ty == LLT::scalar(64))
 | 
						|
    Opc = AMDGPU::V_DIV_SCALE_F64;
 | 
						|
  else
 | 
						|
    return false;
 | 
						|
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  MachineBasicBlock *MBB = MI.getParent();
 | 
						|
 | 
						|
  Register Numer = MI.getOperand(3).getReg();
 | 
						|
  Register Denom = MI.getOperand(4).getReg();
 | 
						|
  unsigned ChooseDenom = MI.getOperand(5).getImm();
 | 
						|
 | 
						|
  Register Src0 = ChooseDenom != 0 ? Numer : Denom;
 | 
						|
 | 
						|
  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
 | 
						|
    .addDef(Dst1)
 | 
						|
    .addUse(Src0)
 | 
						|
    .addUse(Denom)
 | 
						|
    .addUse(Numer);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
 | 
						|
  unsigned IntrinsicID = I.getIntrinsicID();
 | 
						|
  switch (IntrinsicID) {
 | 
						|
  case Intrinsic::amdgcn_if_break: {
 | 
						|
    MachineBasicBlock *BB = I.getParent();
 | 
						|
 | 
						|
    // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
 | 
						|
    // SelectionDAG uses for wave32 vs wave64.
 | 
						|
    BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
 | 
						|
      .add(I.getOperand(0))
 | 
						|
      .add(I.getOperand(2))
 | 
						|
      .add(I.getOperand(3));
 | 
						|
 | 
						|
    Register DstReg = I.getOperand(0).getReg();
 | 
						|
    Register Src0Reg = I.getOperand(2).getReg();
 | 
						|
    Register Src1Reg = I.getOperand(3).getReg();
 | 
						|
 | 
						|
    I.eraseFromParent();
 | 
						|
 | 
						|
    for (Register Reg : { DstReg, Src0Reg, Src1Reg })
 | 
						|
      MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
 | 
						|
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
  case Intrinsic::amdgcn_interp_p1_f16:
 | 
						|
    return selectInterpP1F16(I);
 | 
						|
  case Intrinsic::amdgcn_wqm:
 | 
						|
    return constrainCopyLikeIntrin(I, AMDGPU::WQM);
 | 
						|
  case Intrinsic::amdgcn_softwqm:
 | 
						|
    return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
 | 
						|
  case Intrinsic::amdgcn_wwm:
 | 
						|
    return constrainCopyLikeIntrin(I, AMDGPU::WWM);
 | 
						|
  case Intrinsic::amdgcn_div_scale:
 | 
						|
    return selectDivScale(I);
 | 
						|
  case Intrinsic::amdgcn_icmp:
 | 
						|
    return selectIntrinsicIcmp(I);
 | 
						|
  default:
 | 
						|
    return selectImpl(I, *CoverageInfo);
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
 | 
						|
  if (Size != 32 && Size != 64)
 | 
						|
    return -1;
 | 
						|
  switch (P) {
 | 
						|
  default:
 | 
						|
    llvm_unreachable("Unknown condition code!");
 | 
						|
  case CmpInst::ICMP_NE:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
 | 
						|
  case CmpInst::ICMP_EQ:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
 | 
						|
  case CmpInst::ICMP_SGT:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
 | 
						|
  case CmpInst::ICMP_SGE:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
 | 
						|
  case CmpInst::ICMP_SLT:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
 | 
						|
  case CmpInst::ICMP_SLE:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
 | 
						|
  case CmpInst::ICMP_UGT:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
 | 
						|
  case CmpInst::ICMP_UGE:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
 | 
						|
  case CmpInst::ICMP_ULT:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
 | 
						|
  case CmpInst::ICMP_ULE:
 | 
						|
    return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
 | 
						|
                                              unsigned Size) const {
 | 
						|
  if (Size == 64) {
 | 
						|
    if (!STI.hasScalarCompareEq64())
 | 
						|
      return -1;
 | 
						|
 | 
						|
    switch (P) {
 | 
						|
    case CmpInst::ICMP_NE:
 | 
						|
      return AMDGPU::S_CMP_LG_U64;
 | 
						|
    case CmpInst::ICMP_EQ:
 | 
						|
      return AMDGPU::S_CMP_EQ_U64;
 | 
						|
    default:
 | 
						|
      return -1;
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  if (Size != 32)
 | 
						|
    return -1;
 | 
						|
 | 
						|
  switch (P) {
 | 
						|
  case CmpInst::ICMP_NE:
 | 
						|
    return AMDGPU::S_CMP_LG_U32;
 | 
						|
  case CmpInst::ICMP_EQ:
 | 
						|
    return AMDGPU::S_CMP_EQ_U32;
 | 
						|
  case CmpInst::ICMP_SGT:
 | 
						|
    return AMDGPU::S_CMP_GT_I32;
 | 
						|
  case CmpInst::ICMP_SGE:
 | 
						|
    return AMDGPU::S_CMP_GE_I32;
 | 
						|
  case CmpInst::ICMP_SLT:
 | 
						|
    return AMDGPU::S_CMP_LT_I32;
 | 
						|
  case CmpInst::ICMP_SLE:
 | 
						|
    return AMDGPU::S_CMP_LE_I32;
 | 
						|
  case CmpInst::ICMP_UGT:
 | 
						|
    return AMDGPU::S_CMP_GT_U32;
 | 
						|
  case CmpInst::ICMP_UGE:
 | 
						|
    return AMDGPU::S_CMP_GE_U32;
 | 
						|
  case CmpInst::ICMP_ULT:
 | 
						|
    return AMDGPU::S_CMP_LT_U32;
 | 
						|
  case CmpInst::ICMP_ULE:
 | 
						|
    return AMDGPU::S_CMP_LE_U32;
 | 
						|
  default:
 | 
						|
    llvm_unreachable("Unknown condition code!");
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
 | 
						|
  Register SrcReg = I.getOperand(2).getReg();
 | 
						|
  unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
 | 
						|
 | 
						|
  auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
 | 
						|
 | 
						|
  Register CCReg = I.getOperand(0).getReg();
 | 
						|
  if (!isVCC(CCReg, *MRI)) {
 | 
						|
    int Opcode = getS_CMPOpcode(Pred, Size);
 | 
						|
    if (Opcode == -1)
 | 
						|
      return false;
 | 
						|
    MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
 | 
						|
            .add(I.getOperand(2))
 | 
						|
            .add(I.getOperand(3));
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
 | 
						|
      .addReg(AMDGPU::SCC);
 | 
						|
    bool Ret =
 | 
						|
        constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
 | 
						|
        RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
 | 
						|
    I.eraseFromParent();
 | 
						|
    return Ret;
 | 
						|
  }
 | 
						|
 | 
						|
  int Opcode = getV_CMPOpcode(Pred, Size);
 | 
						|
  if (Opcode == -1)
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
 | 
						|
            I.getOperand(0).getReg())
 | 
						|
            .add(I.getOperand(2))
 | 
						|
            .add(I.getOperand(3));
 | 
						|
  RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
 | 
						|
                               *TRI.getBoolRC(), *MRI);
 | 
						|
  bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
 | 
						|
  I.eraseFromParent();
 | 
						|
  return Ret;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
 | 
						|
  Register Dst = I.getOperand(0).getReg();
 | 
						|
  if (isVCC(Dst, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  Register SrcReg = I.getOperand(2).getReg();
 | 
						|
  unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
 | 
						|
  auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
 | 
						|
 | 
						|
  int Opcode = getV_CMPOpcode(Pred, Size);
 | 
						|
  if (Opcode == -1)
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
 | 
						|
                           .add(I.getOperand(2))
 | 
						|
                           .add(I.getOperand(3));
 | 
						|
  RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
 | 
						|
                               *MRI);
 | 
						|
  bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
 | 
						|
  I.eraseFromParent();
 | 
						|
  return Ret;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
 | 
						|
  // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
 | 
						|
  // SelectionDAG uses for wave32 vs wave64.
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
  BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
 | 
						|
      .add(MI.getOperand(1));
 | 
						|
 | 
						|
  Register Reg = MI.getOperand(1).getReg();
 | 
						|
  MI.eraseFromParent();
 | 
						|
 | 
						|
  if (!MRI->getRegClassOrNull(Reg))
 | 
						|
    MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
 | 
						|
  switch (MF.getFunction().getCallingConv()) {
 | 
						|
  case CallingConv::AMDGPU_PS:
 | 
						|
    return 1;
 | 
						|
  case CallingConv::AMDGPU_VS:
 | 
						|
    return 2;
 | 
						|
  case CallingConv::AMDGPU_GS:
 | 
						|
    return 3;
 | 
						|
  case CallingConv::AMDGPU_HS:
 | 
						|
  case CallingConv::AMDGPU_LS:
 | 
						|
  case CallingConv::AMDGPU_ES:
 | 
						|
    report_fatal_error("ds_ordered_count unsupported for this calling conv");
 | 
						|
  case CallingConv::AMDGPU_CS:
 | 
						|
  case CallingConv::AMDGPU_KERNEL:
 | 
						|
  case CallingConv::C:
 | 
						|
  case CallingConv::Fast:
 | 
						|
  default:
 | 
						|
    // Assume other calling conventions are various compute callable functions
 | 
						|
    return 0;
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
 | 
						|
  MachineInstr &MI, Intrinsic::ID IntrID) const {
 | 
						|
  MachineBasicBlock *MBB = MI.getParent();
 | 
						|
  MachineFunction *MF = MBB->getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
 | 
						|
  unsigned IndexOperand = MI.getOperand(7).getImm();
 | 
						|
  bool WaveRelease = MI.getOperand(8).getImm() != 0;
 | 
						|
  bool WaveDone = MI.getOperand(9).getImm() != 0;
 | 
						|
 | 
						|
  if (WaveDone && !WaveRelease)
 | 
						|
    report_fatal_error("ds_ordered_count: wave_done requires wave_release");
 | 
						|
 | 
						|
  unsigned OrderedCountIndex = IndexOperand & 0x3f;
 | 
						|
  IndexOperand &= ~0x3f;
 | 
						|
  unsigned CountDw = 0;
 | 
						|
 | 
						|
  if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
 | 
						|
    CountDw = (IndexOperand >> 24) & 0xf;
 | 
						|
    IndexOperand &= ~(0xf << 24);
 | 
						|
 | 
						|
    if (CountDw < 1 || CountDw > 4) {
 | 
						|
      report_fatal_error(
 | 
						|
        "ds_ordered_count: dword count must be between 1 and 4");
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  if (IndexOperand)
 | 
						|
    report_fatal_error("ds_ordered_count: bad index operand");
 | 
						|
 | 
						|
  unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
 | 
						|
  unsigned ShaderType = getDSShaderTypeValue(*MF);
 | 
						|
 | 
						|
  unsigned Offset0 = OrderedCountIndex << 2;
 | 
						|
  unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
 | 
						|
                     (Instruction << 4);
 | 
						|
 | 
						|
  if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
 | 
						|
    Offset1 |= (CountDw - 1) << 6;
 | 
						|
 | 
						|
  unsigned Offset = Offset0 | (Offset1 << 8);
 | 
						|
 | 
						|
  Register M0Val = MI.getOperand(2).getReg();
 | 
						|
  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
 | 
						|
    .addReg(M0Val);
 | 
						|
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register ValReg = MI.getOperand(3).getReg();
 | 
						|
  MachineInstrBuilder DS =
 | 
						|
    BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
 | 
						|
      .addReg(ValReg)
 | 
						|
      .addImm(Offset)
 | 
						|
      .cloneMemRefs(MI);
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return Ret;
 | 
						|
}
 | 
						|
 | 
						|
static unsigned gwsIntrinToOpcode(unsigned IntrID) {
 | 
						|
  switch (IntrID) {
 | 
						|
  case Intrinsic::amdgcn_ds_gws_init:
 | 
						|
    return AMDGPU::DS_GWS_INIT;
 | 
						|
  case Intrinsic::amdgcn_ds_gws_barrier:
 | 
						|
    return AMDGPU::DS_GWS_BARRIER;
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_v:
 | 
						|
    return AMDGPU::DS_GWS_SEMA_V;
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_br:
 | 
						|
    return AMDGPU::DS_GWS_SEMA_BR;
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_p:
 | 
						|
    return AMDGPU::DS_GWS_SEMA_P;
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_release_all:
 | 
						|
    return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
 | 
						|
  default:
 | 
						|
    llvm_unreachable("not a gws intrinsic");
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
 | 
						|
                                                     Intrinsic::ID IID) const {
 | 
						|
  if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
 | 
						|
      !STI.hasGWSSemaReleaseAll())
 | 
						|
    return false;
 | 
						|
 | 
						|
  // intrinsic ID, vsrc, offset
 | 
						|
  const bool HasVSrc = MI.getNumOperands() == 3;
 | 
						|
  assert(HasVSrc || MI.getNumOperands() == 2);
 | 
						|
 | 
						|
  Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
 | 
						|
  const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
 | 
						|
  if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
 | 
						|
  assert(OffsetDef);
 | 
						|
 | 
						|
  unsigned ImmOffset;
 | 
						|
 | 
						|
  MachineBasicBlock *MBB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
 | 
						|
  MachineInstr *Readfirstlane = nullptr;
 | 
						|
 | 
						|
  // If we legalized the VGPR input, strip out the readfirstlane to analyze the
 | 
						|
  // incoming offset, in case there's an add of a constant. We'll have to put it
 | 
						|
  // back later.
 | 
						|
  if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
 | 
						|
    Readfirstlane = OffsetDef;
 | 
						|
    BaseOffset = OffsetDef->getOperand(1).getReg();
 | 
						|
    OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
 | 
						|
  }
 | 
						|
 | 
						|
  if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
 | 
						|
    // If we have a constant offset, try to use the 0 in m0 as the base.
 | 
						|
    // TODO: Look into changing the default m0 initialization value. If the
 | 
						|
    // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
 | 
						|
    // the immediate offset.
 | 
						|
 | 
						|
    ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
 | 
						|
    BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
 | 
						|
      .addImm(0);
 | 
						|
  } else {
 | 
						|
    std::tie(BaseOffset, ImmOffset, OffsetDef)
 | 
						|
      = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
 | 
						|
 | 
						|
    if (Readfirstlane) {
 | 
						|
      // We have the constant offset now, so put the readfirstlane back on the
 | 
						|
      // variable component.
 | 
						|
      if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
 | 
						|
        return false;
 | 
						|
 | 
						|
      Readfirstlane->getOperand(1).setReg(BaseOffset);
 | 
						|
      BaseOffset = Readfirstlane->getOperand(0).getReg();
 | 
						|
    } else {
 | 
						|
      if (!RBI.constrainGenericRegister(BaseOffset,
 | 
						|
                                        AMDGPU::SReg_32RegClass, *MRI))
 | 
						|
        return false;
 | 
						|
    }
 | 
						|
 | 
						|
    Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
    BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
 | 
						|
      .addReg(BaseOffset)
 | 
						|
      .addImm(16);
 | 
						|
 | 
						|
    BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
 | 
						|
      .addReg(M0Base);
 | 
						|
  }
 | 
						|
 | 
						|
  // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
 | 
						|
  // offset field) % 64. Some versions of the programming guide omit the m0
 | 
						|
  // part, or claim it's from offset 0.
 | 
						|
  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
 | 
						|
 | 
						|
  if (HasVSrc) {
 | 
						|
    Register VSrc = MI.getOperand(1).getReg();
 | 
						|
    MIB.addReg(VSrc);
 | 
						|
    if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
 | 
						|
      return false;
 | 
						|
  }
 | 
						|
 | 
						|
  MIB.addImm(ImmOffset)
 | 
						|
     .addImm(-1) // $gds
 | 
						|
     .cloneMemRefs(MI);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
 | 
						|
                                                      bool IsAppend) const {
 | 
						|
  Register PtrBase = MI.getOperand(2).getReg();
 | 
						|
  LLT PtrTy = MRI->getType(PtrBase);
 | 
						|
  bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
 | 
						|
 | 
						|
  unsigned Offset;
 | 
						|
  std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
 | 
						|
 | 
						|
  // TODO: Should this try to look through readfirstlane like GWS?
 | 
						|
  if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
 | 
						|
    PtrBase = MI.getOperand(2).getReg();
 | 
						|
    Offset = 0;
 | 
						|
  }
 | 
						|
 | 
						|
  MachineBasicBlock *MBB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
 | 
						|
 | 
						|
  BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
 | 
						|
    .addReg(PtrBase);
 | 
						|
  BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
 | 
						|
    .addImm(Offset)
 | 
						|
    .addImm(IsGDS ? -1 : 0)
 | 
						|
    .cloneMemRefs(MI);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
 | 
						|
                         bool &IsTexFail) {
 | 
						|
  if (TexFailCtrl)
 | 
						|
    IsTexFail = true;
 | 
						|
 | 
						|
  TFE = (TexFailCtrl & 0x1) ? 1 : 0;
 | 
						|
  TexFailCtrl &= ~(uint64_t)0x1;
 | 
						|
  LWE = (TexFailCtrl & 0x2) ? 1 : 0;
 | 
						|
  TexFailCtrl &= ~(uint64_t)0x2;
 | 
						|
 | 
						|
  return TexFailCtrl == 0;
 | 
						|
}
 | 
						|
 | 
						|
static bool parseCachePolicy(uint64_t Value,
 | 
						|
                             bool *GLC, bool *SLC, bool *DLC) {
 | 
						|
  if (GLC) {
 | 
						|
    *GLC = (Value & 0x1) ? 1 : 0;
 | 
						|
    Value &= ~(uint64_t)0x1;
 | 
						|
  }
 | 
						|
  if (SLC) {
 | 
						|
    *SLC = (Value & 0x2) ? 1 : 0;
 | 
						|
    Value &= ~(uint64_t)0x2;
 | 
						|
  }
 | 
						|
  if (DLC) {
 | 
						|
    *DLC = (Value & 0x4) ? 1 : 0;
 | 
						|
    Value &= ~(uint64_t)0x4;
 | 
						|
  }
 | 
						|
 | 
						|
  return Value == 0;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectImageIntrinsic(
 | 
						|
  MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
 | 
						|
  MachineBasicBlock *MBB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
 | 
						|
  const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
 | 
						|
    AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
 | 
						|
 | 
						|
  const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
 | 
						|
  const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
 | 
						|
      AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
 | 
						|
  const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
 | 
						|
      AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
 | 
						|
  unsigned IntrOpcode = Intr->BaseOpcode;
 | 
						|
  const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10;
 | 
						|
 | 
						|
  const int VAddrIdx = getImageVAddrIdxBegin(BaseOpcode,
 | 
						|
                                             MI.getNumExplicitDefs());
 | 
						|
  int NumVAddr, NumGradients;
 | 
						|
  std::tie(NumVAddr, NumGradients) = getImageNumVAddr(Intr, BaseOpcode);
 | 
						|
 | 
						|
  Register VDataIn, VDataOut;
 | 
						|
  LLT VDataTy;
 | 
						|
  int NumVDataDwords = -1;
 | 
						|
  bool IsD16 = false;
 | 
						|
 | 
						|
  // XXX - Can we just get the second to last argument for ctrl?
 | 
						|
  unsigned CtrlIdx; // Index of texfailctrl argument
 | 
						|
  bool Unorm;
 | 
						|
  if (!BaseOpcode->Sampler) {
 | 
						|
    Unorm = true;
 | 
						|
    CtrlIdx = VAddrIdx + NumVAddr + 1;
 | 
						|
  } else {
 | 
						|
    Unorm = MI.getOperand(VAddrIdx + NumVAddr + 2).getImm() != 0;
 | 
						|
    CtrlIdx = VAddrIdx + NumVAddr + 3;
 | 
						|
  }
 | 
						|
 | 
						|
  bool TFE;
 | 
						|
  bool LWE;
 | 
						|
  bool IsTexFail = false;
 | 
						|
  if (!parseTexFail(MI.getOperand(CtrlIdx).getImm(), TFE, LWE, IsTexFail))
 | 
						|
    return false;
 | 
						|
 | 
						|
  const int Flags = MI.getOperand(CtrlIdx + 2).getImm();
 | 
						|
  const bool IsA16 = (Flags & 1) != 0;
 | 
						|
  const bool IsG16 = (Flags & 2) != 0;
 | 
						|
 | 
						|
  // A16 implies 16 bit gradients
 | 
						|
  if (IsA16 && !IsG16)
 | 
						|
    return false;
 | 
						|
 | 
						|
  unsigned DMask = 0;
 | 
						|
  unsigned DMaskLanes = 0;
 | 
						|
 | 
						|
  if (BaseOpcode->Atomic) {
 | 
						|
    VDataOut = MI.getOperand(0).getReg();
 | 
						|
    VDataIn = MI.getOperand(2).getReg();
 | 
						|
    LLT Ty = MRI->getType(VDataIn);
 | 
						|
 | 
						|
    // Be careful to allow atomic swap on 16-bit element vectors.
 | 
						|
    const bool Is64Bit = BaseOpcode->AtomicX2 ?
 | 
						|
      Ty.getSizeInBits() == 128 :
 | 
						|
      Ty.getSizeInBits() == 64;
 | 
						|
 | 
						|
    if (BaseOpcode->AtomicX2) {
 | 
						|
      assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
 | 
						|
 | 
						|
      DMask = Is64Bit ? 0xf : 0x3;
 | 
						|
      NumVDataDwords = Is64Bit ? 4 : 2;
 | 
						|
    } else {
 | 
						|
      DMask = Is64Bit ? 0x3 : 0x1;
 | 
						|
      NumVDataDwords = Is64Bit ? 2 : 1;
 | 
						|
    }
 | 
						|
  } else {
 | 
						|
    const int DMaskIdx = 2; // Input/output + intrinsic ID.
 | 
						|
 | 
						|
    DMask = MI.getOperand(DMaskIdx).getImm();
 | 
						|
    DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
 | 
						|
 | 
						|
    if (BaseOpcode->Store) {
 | 
						|
      VDataIn = MI.getOperand(1).getReg();
 | 
						|
      VDataTy = MRI->getType(VDataIn);
 | 
						|
      NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
 | 
						|
    } else {
 | 
						|
      VDataOut = MI.getOperand(0).getReg();
 | 
						|
      VDataTy = MRI->getType(VDataOut);
 | 
						|
      NumVDataDwords = DMaskLanes;
 | 
						|
 | 
						|
      // One memoperand is mandatory, except for getresinfo.
 | 
						|
      // FIXME: Check this in verifier.
 | 
						|
      if (!MI.memoperands_empty()) {
 | 
						|
        const MachineMemOperand *MMO = *MI.memoperands_begin();
 | 
						|
 | 
						|
        // Infer d16 from the memory size, as the register type will be mangled by
 | 
						|
        // unpacked subtargets, or by TFE.
 | 
						|
        IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
 | 
						|
 | 
						|
        if (IsD16 && !STI.hasUnpackedD16VMem())
 | 
						|
          NumVDataDwords = (DMaskLanes + 1) / 2;
 | 
						|
      }
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // Optimize _L to _LZ when _L is zero
 | 
						|
  if (LZMappingInfo) {
 | 
						|
    // The legalizer replaced the register with an immediate 0 if we need to
 | 
						|
    // change the opcode.
 | 
						|
    const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
 | 
						|
    if (Lod.isImm()) {
 | 
						|
      assert(Lod.getImm() == 0);
 | 
						|
      IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // Optimize _mip away, when 'lod' is zero
 | 
						|
  if (MIPMappingInfo) {
 | 
						|
    const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
 | 
						|
    if (Lod.isImm()) {
 | 
						|
      assert(Lod.getImm() == 0);
 | 
						|
      IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // Set G16 opcode
 | 
						|
  if (IsG16 && !IsA16) {
 | 
						|
    const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
 | 
						|
        AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
 | 
						|
    assert(G16MappingInfo);
 | 
						|
    IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
 | 
						|
  }
 | 
						|
 | 
						|
  // TODO: Check this in verifier.
 | 
						|
  assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
 | 
						|
 | 
						|
  bool GLC = false;
 | 
						|
  bool SLC = false;
 | 
						|
  bool DLC = false;
 | 
						|
  if (BaseOpcode->Atomic) {
 | 
						|
    GLC = true; // TODO no-return optimization
 | 
						|
    if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), nullptr, &SLC,
 | 
						|
                          IsGFX10 ? &DLC : nullptr))
 | 
						|
      return false;
 | 
						|
  } else {
 | 
						|
    if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), &GLC, &SLC,
 | 
						|
                          IsGFX10 ? &DLC : nullptr))
 | 
						|
      return false;
 | 
						|
  }
 | 
						|
 | 
						|
  int NumVAddrRegs = 0;
 | 
						|
  int NumVAddrDwords = 0;
 | 
						|
  for (int I = 0; I < NumVAddr; ++I) {
 | 
						|
    // Skip the $noregs and 0s inserted during legalization.
 | 
						|
    MachineOperand &AddrOp = MI.getOperand(VAddrIdx + I);
 | 
						|
    if (!AddrOp.isReg())
 | 
						|
      continue; // XXX - Break?
 | 
						|
 | 
						|
    Register Addr = AddrOp.getReg();
 | 
						|
    if (!Addr)
 | 
						|
      break;
 | 
						|
 | 
						|
    ++NumVAddrRegs;
 | 
						|
    NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
 | 
						|
  }
 | 
						|
 | 
						|
  // The legalizer preprocessed the intrinsic arguments. If we aren't using
 | 
						|
  // NSA, these should have beeen packed into a single value in the first
 | 
						|
  // address register
 | 
						|
  const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
 | 
						|
  if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
 | 
						|
    LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
 | 
						|
    return false;
 | 
						|
  }
 | 
						|
 | 
						|
  if (IsTexFail)
 | 
						|
    ++NumVDataDwords;
 | 
						|
 | 
						|
  int Opcode = -1;
 | 
						|
  if (IsGFX10) {
 | 
						|
    Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
 | 
						|
                                   UseNSA ? AMDGPU::MIMGEncGfx10NSA
 | 
						|
                                          : AMDGPU::MIMGEncGfx10Default,
 | 
						|
                                   NumVDataDwords, NumVAddrDwords);
 | 
						|
  } else {
 | 
						|
    if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
 | 
						|
      Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
 | 
						|
                                     NumVDataDwords, NumVAddrDwords);
 | 
						|
    if (Opcode == -1)
 | 
						|
      Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
 | 
						|
                                     NumVDataDwords, NumVAddrDwords);
 | 
						|
  }
 | 
						|
  assert(Opcode != -1);
 | 
						|
 | 
						|
  auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
 | 
						|
    .cloneMemRefs(MI);
 | 
						|
 | 
						|
  if (VDataOut) {
 | 
						|
    if (BaseOpcode->AtomicX2) {
 | 
						|
      const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
 | 
						|
 | 
						|
      Register TmpReg = MRI->createVirtualRegister(
 | 
						|
        Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
 | 
						|
      unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
 | 
						|
 | 
						|
      MIB.addDef(TmpReg);
 | 
						|
      BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
 | 
						|
        .addReg(TmpReg, RegState::Kill, SubReg);
 | 
						|
 | 
						|
    } else {
 | 
						|
      MIB.addDef(VDataOut); // vdata output
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  if (VDataIn)
 | 
						|
    MIB.addReg(VDataIn); // vdata input
 | 
						|
 | 
						|
  for (int i = 0; i != NumVAddrRegs; ++i) {
 | 
						|
    MachineOperand &SrcOp = MI.getOperand(VAddrIdx + i);
 | 
						|
    if (SrcOp.isReg()) {
 | 
						|
      assert(SrcOp.getReg() != 0);
 | 
						|
      MIB.addReg(SrcOp.getReg());
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr).getReg()); // rsrc
 | 
						|
  if (BaseOpcode->Sampler)
 | 
						|
    MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr + 1).getReg()); // sampler
 | 
						|
 | 
						|
  MIB.addImm(DMask); // dmask
 | 
						|
 | 
						|
  if (IsGFX10)
 | 
						|
    MIB.addImm(DimInfo->Encoding);
 | 
						|
  MIB.addImm(Unorm);
 | 
						|
  if (IsGFX10)
 | 
						|
    MIB.addImm(DLC);
 | 
						|
 | 
						|
  MIB.addImm(GLC);
 | 
						|
  MIB.addImm(SLC);
 | 
						|
  MIB.addImm(IsA16 &&  // a16 or r128
 | 
						|
             STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
 | 
						|
  if (IsGFX10)
 | 
						|
    MIB.addImm(IsA16 ? -1 : 0);
 | 
						|
 | 
						|
  MIB.addImm(TFE); // tfe
 | 
						|
  MIB.addImm(LWE); // lwe
 | 
						|
  if (!IsGFX10)
 | 
						|
    MIB.addImm(DimInfo->DA ? -1 : 0);
 | 
						|
  if (BaseOpcode->HasD16)
 | 
						|
    MIB.addImm(IsD16 ? -1 : 0);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
 | 
						|
    MachineInstr &I) const {
 | 
						|
  unsigned IntrinsicID = I.getIntrinsicID();
 | 
						|
  switch (IntrinsicID) {
 | 
						|
  case Intrinsic::amdgcn_end_cf:
 | 
						|
    return selectEndCfIntrinsic(I);
 | 
						|
  case Intrinsic::amdgcn_ds_ordered_add:
 | 
						|
  case Intrinsic::amdgcn_ds_ordered_swap:
 | 
						|
    return selectDSOrderedIntrinsic(I, IntrinsicID);
 | 
						|
  case Intrinsic::amdgcn_ds_gws_init:
 | 
						|
  case Intrinsic::amdgcn_ds_gws_barrier:
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_v:
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_br:
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_p:
 | 
						|
  case Intrinsic::amdgcn_ds_gws_sema_release_all:
 | 
						|
    return selectDSGWSIntrinsic(I, IntrinsicID);
 | 
						|
  case Intrinsic::amdgcn_ds_append:
 | 
						|
    return selectDSAppendConsume(I, true);
 | 
						|
  case Intrinsic::amdgcn_ds_consume:
 | 
						|
    return selectDSAppendConsume(I, false);
 | 
						|
  default: {
 | 
						|
    return selectImpl(I, *CoverageInfo);
 | 
						|
  }
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
 | 
						|
  if (selectImpl(I, *CoverageInfo))
 | 
						|
    return true;
 | 
						|
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
 | 
						|
  assert(Size <= 32 || Size == 64);
 | 
						|
  const MachineOperand &CCOp = I.getOperand(1);
 | 
						|
  Register CCReg = CCOp.getReg();
 | 
						|
  if (!isVCC(CCReg, *MRI)) {
 | 
						|
    unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
 | 
						|
                                         AMDGPU::S_CSELECT_B32;
 | 
						|
    MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
 | 
						|
            .addReg(CCReg);
 | 
						|
 | 
						|
    // The generic constrainSelectedInstRegOperands doesn't work for the scc register
 | 
						|
    // bank, because it does not cover the register class that we used to represent
 | 
						|
    // for it.  So we need to manually set the register class here.
 | 
						|
    if (!MRI->getRegClassOrNull(CCReg))
 | 
						|
        MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
 | 
						|
    MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
 | 
						|
            .add(I.getOperand(2))
 | 
						|
            .add(I.getOperand(3));
 | 
						|
 | 
						|
    bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
 | 
						|
               constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
 | 
						|
    I.eraseFromParent();
 | 
						|
    return Ret;
 | 
						|
  }
 | 
						|
 | 
						|
  // Wide VGPR select should have been split in RegBankSelect.
 | 
						|
  if (Size > 32)
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineInstr *Select =
 | 
						|
      BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
 | 
						|
              .addImm(0)
 | 
						|
              .add(I.getOperand(3))
 | 
						|
              .addImm(0)
 | 
						|
              .add(I.getOperand(2))
 | 
						|
              .add(I.getOperand(1));
 | 
						|
 | 
						|
  bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
 | 
						|
  I.eraseFromParent();
 | 
						|
  return Ret;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
 | 
						|
  initM0(I);
 | 
						|
  return selectImpl(I, *CoverageInfo);
 | 
						|
}
 | 
						|
 | 
						|
static int sizeToSubRegIndex(unsigned Size) {
 | 
						|
  switch (Size) {
 | 
						|
  case 32:
 | 
						|
    return AMDGPU::sub0;
 | 
						|
  case 64:
 | 
						|
    return AMDGPU::sub0_sub1;
 | 
						|
  case 96:
 | 
						|
    return AMDGPU::sub0_sub1_sub2;
 | 
						|
  case 128:
 | 
						|
    return AMDGPU::sub0_sub1_sub2_sub3;
 | 
						|
  case 256:
 | 
						|
    return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
 | 
						|
  default:
 | 
						|
    if (Size < 32)
 | 
						|
      return AMDGPU::sub0;
 | 
						|
    if (Size > 256)
 | 
						|
      return -1;
 | 
						|
    return sizeToSubRegIndex(PowerOf2Ceil(Size));
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  Register SrcReg = I.getOperand(1).getReg();
 | 
						|
  const LLT DstTy = MRI->getType(DstReg);
 | 
						|
  const LLT SrcTy = MRI->getType(SrcReg);
 | 
						|
  const LLT S1 = LLT::scalar(1);
 | 
						|
 | 
						|
  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
 | 
						|
  const RegisterBank *DstRB;
 | 
						|
  if (DstTy == S1) {
 | 
						|
    // This is a special case. We don't treat s1 for legalization artifacts as
 | 
						|
    // vcc booleans.
 | 
						|
    DstRB = SrcRB;
 | 
						|
  } else {
 | 
						|
    DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
    if (SrcRB != DstRB)
 | 
						|
      return false;
 | 
						|
  }
 | 
						|
 | 
						|
  const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
 | 
						|
 | 
						|
  unsigned DstSize = DstTy.getSizeInBits();
 | 
						|
  unsigned SrcSize = SrcTy.getSizeInBits();
 | 
						|
 | 
						|
  const TargetRegisterClass *SrcRC
 | 
						|
    = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
 | 
						|
  const TargetRegisterClass *DstRC
 | 
						|
    = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
 | 
						|
  if (!SrcRC || !DstRC)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
 | 
						|
    LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
 | 
						|
    return false;
 | 
						|
  }
 | 
						|
 | 
						|
  if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
 | 
						|
    MachineBasicBlock *MBB = I.getParent();
 | 
						|
    const DebugLoc &DL = I.getDebugLoc();
 | 
						|
 | 
						|
    Register LoReg = MRI->createVirtualRegister(DstRC);
 | 
						|
    Register HiReg = MRI->createVirtualRegister(DstRC);
 | 
						|
    BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
 | 
						|
      .addReg(SrcReg, 0, AMDGPU::sub0);
 | 
						|
    BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
 | 
						|
      .addReg(SrcReg, 0, AMDGPU::sub1);
 | 
						|
 | 
						|
    if (IsVALU && STI.hasSDWA()) {
 | 
						|
      // Write the low 16-bits of the high element into the high 16-bits of the
 | 
						|
      // low element.
 | 
						|
      MachineInstr *MovSDWA =
 | 
						|
        BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
 | 
						|
        .addImm(0)                             // $src0_modifiers
 | 
						|
        .addReg(HiReg)                         // $src0
 | 
						|
        .addImm(0)                             // $clamp
 | 
						|
        .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
 | 
						|
        .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
 | 
						|
        .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
 | 
						|
        .addReg(LoReg, RegState::Implicit);
 | 
						|
      MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
 | 
						|
    } else {
 | 
						|
      Register TmpReg0 = MRI->createVirtualRegister(DstRC);
 | 
						|
      Register TmpReg1 = MRI->createVirtualRegister(DstRC);
 | 
						|
      Register ImmReg = MRI->createVirtualRegister(DstRC);
 | 
						|
      if (IsVALU) {
 | 
						|
        BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
 | 
						|
          .addImm(16)
 | 
						|
          .addReg(HiReg);
 | 
						|
      } else {
 | 
						|
        BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
 | 
						|
          .addReg(HiReg)
 | 
						|
          .addImm(16);
 | 
						|
      }
 | 
						|
 | 
						|
      unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
 | 
						|
      unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
 | 
						|
      unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
 | 
						|
 | 
						|
      BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
 | 
						|
        .addImm(0xffff);
 | 
						|
      BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
 | 
						|
        .addReg(LoReg)
 | 
						|
        .addReg(ImmReg);
 | 
						|
      BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
 | 
						|
        .addReg(TmpReg0)
 | 
						|
        .addReg(TmpReg1);
 | 
						|
    }
 | 
						|
 | 
						|
    I.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (!DstTy.isScalar())
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (SrcSize > 32) {
 | 
						|
    int SubRegIdx = sizeToSubRegIndex(DstSize);
 | 
						|
    if (SubRegIdx == -1)
 | 
						|
      return false;
 | 
						|
 | 
						|
    // Deal with weird cases where the class only partially supports the subreg
 | 
						|
    // index.
 | 
						|
    const TargetRegisterClass *SrcWithSubRC
 | 
						|
      = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
 | 
						|
    if (!SrcWithSubRC)
 | 
						|
      return false;
 | 
						|
 | 
						|
    if (SrcWithSubRC != SrcRC) {
 | 
						|
      if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
 | 
						|
        return false;
 | 
						|
    }
 | 
						|
 | 
						|
    I.getOperand(1).setSubReg(SubRegIdx);
 | 
						|
  }
 | 
						|
 | 
						|
  I.setDesc(TII.get(TargetOpcode::COPY));
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
/// \returns true if a bitmask for \p Size bits will be an inline immediate.
 | 
						|
static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
 | 
						|
  Mask = maskTrailingOnes<unsigned>(Size);
 | 
						|
  int SignedMask = static_cast<int>(Mask);
 | 
						|
  return SignedMask >= -16 && SignedMask <= 64;
 | 
						|
}
 | 
						|
 | 
						|
// Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
 | 
						|
const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
 | 
						|
  Register Reg, const MachineRegisterInfo &MRI,
 | 
						|
  const TargetRegisterInfo &TRI) const {
 | 
						|
  const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
 | 
						|
  if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
 | 
						|
    return RB;
 | 
						|
 | 
						|
  // Ignore the type, since we don't use vcc in artifacts.
 | 
						|
  if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
 | 
						|
    return &RBI.getRegBankFromRegClass(*RC, LLT());
 | 
						|
  return nullptr;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
 | 
						|
  bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
 | 
						|
  bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  MachineBasicBlock &MBB = *I.getParent();
 | 
						|
  const Register DstReg = I.getOperand(0).getReg();
 | 
						|
  const Register SrcReg = I.getOperand(1).getReg();
 | 
						|
 | 
						|
  const LLT DstTy = MRI->getType(DstReg);
 | 
						|
  const LLT SrcTy = MRI->getType(SrcReg);
 | 
						|
  const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
 | 
						|
    I.getOperand(2).getImm() : SrcTy.getSizeInBits();
 | 
						|
  const unsigned DstSize = DstTy.getSizeInBits();
 | 
						|
  if (!DstTy.isScalar())
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (I.getOpcode() == AMDGPU::G_ANYEXT)
 | 
						|
    return selectCOPY(I);
 | 
						|
 | 
						|
  // Artifact casts should never use vcc.
 | 
						|
  const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
 | 
						|
 | 
						|
  if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
 | 
						|
    // 64-bit should have been split up in RegBankSelect
 | 
						|
 | 
						|
    // Try to use an and with a mask if it will save code size.
 | 
						|
    unsigned Mask;
 | 
						|
    if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
 | 
						|
      MachineInstr *ExtI =
 | 
						|
      BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
 | 
						|
        .addImm(Mask)
 | 
						|
        .addReg(SrcReg);
 | 
						|
      I.eraseFromParent();
 | 
						|
      return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
 | 
						|
    }
 | 
						|
 | 
						|
    const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
 | 
						|
    MachineInstr *ExtI =
 | 
						|
      BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
 | 
						|
      .addReg(SrcReg)
 | 
						|
      .addImm(0) // Offset
 | 
						|
      .addImm(SrcSize); // Width
 | 
						|
    I.eraseFromParent();
 | 
						|
    return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
 | 
						|
  }
 | 
						|
 | 
						|
  if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
 | 
						|
    const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
 | 
						|
      AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
 | 
						|
    if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
 | 
						|
      return false;
 | 
						|
 | 
						|
    if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
 | 
						|
      const unsigned SextOpc = SrcSize == 8 ?
 | 
						|
        AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
 | 
						|
      BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
 | 
						|
        .addReg(SrcReg);
 | 
						|
      I.eraseFromParent();
 | 
						|
      return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
 | 
						|
    }
 | 
						|
 | 
						|
    const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
 | 
						|
    const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
 | 
						|
 | 
						|
    // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
 | 
						|
    if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
 | 
						|
      // We need a 64-bit register source, but the high bits don't matter.
 | 
						|
      Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
 | 
						|
      Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
      unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
 | 
						|
 | 
						|
      BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
 | 
						|
      BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
 | 
						|
        .addReg(SrcReg, 0, SubReg)
 | 
						|
        .addImm(AMDGPU::sub0)
 | 
						|
        .addReg(UndefReg)
 | 
						|
        .addImm(AMDGPU::sub1);
 | 
						|
 | 
						|
      BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
 | 
						|
        .addReg(ExtReg)
 | 
						|
        .addImm(SrcSize << 16);
 | 
						|
 | 
						|
      I.eraseFromParent();
 | 
						|
      return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
 | 
						|
    }
 | 
						|
 | 
						|
    unsigned Mask;
 | 
						|
    if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
 | 
						|
      BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
 | 
						|
        .addReg(SrcReg)
 | 
						|
        .addImm(Mask);
 | 
						|
    } else {
 | 
						|
      BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
 | 
						|
        .addReg(SrcReg)
 | 
						|
        .addImm(SrcSize << 16);
 | 
						|
    }
 | 
						|
 | 
						|
    I.eraseFromParent();
 | 
						|
    return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
 | 
						|
  }
 | 
						|
 | 
						|
  return false;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  MachineOperand &ImmOp = I.getOperand(1);
 | 
						|
 | 
						|
  // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
 | 
						|
  if (ImmOp.isFPImm()) {
 | 
						|
    const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
 | 
						|
    ImmOp.ChangeToImmediate(Imm.getZExtValue());
 | 
						|
  } else if (ImmOp.isCImm()) {
 | 
						|
    ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
 | 
						|
  }
 | 
						|
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  unsigned Size;
 | 
						|
  bool IsSgpr;
 | 
						|
  const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
 | 
						|
  if (RB) {
 | 
						|
    IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
 | 
						|
    Size = MRI->getType(DstReg).getSizeInBits();
 | 
						|
  } else {
 | 
						|
    const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
 | 
						|
    IsSgpr = TRI.isSGPRClass(RC);
 | 
						|
    Size = TRI.getRegSizeInBits(*RC);
 | 
						|
  }
 | 
						|
 | 
						|
  if (Size != 32 && Size != 64)
 | 
						|
    return false;
 | 
						|
 | 
						|
  unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
 | 
						|
  if (Size == 32) {
 | 
						|
    I.setDesc(TII.get(Opcode));
 | 
						|
    I.addImplicitDefUseOperands(*MF);
 | 
						|
    return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 | 
						|
  }
 | 
						|
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
 | 
						|
  APInt Imm(Size, I.getOperand(1).getImm());
 | 
						|
 | 
						|
  MachineInstr *ResInst;
 | 
						|
  if (IsSgpr && TII.isInlineConstant(Imm)) {
 | 
						|
    ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
 | 
						|
      .addImm(I.getOperand(1).getImm());
 | 
						|
  } else {
 | 
						|
    const TargetRegisterClass *RC = IsSgpr ?
 | 
						|
      &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
 | 
						|
    Register LoReg = MRI->createVirtualRegister(RC);
 | 
						|
    Register HiReg = MRI->createVirtualRegister(RC);
 | 
						|
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
 | 
						|
      .addImm(Imm.trunc(32).getZExtValue());
 | 
						|
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
 | 
						|
      .addImm(Imm.ashr(32).getZExtValue());
 | 
						|
 | 
						|
    ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
 | 
						|
      .addReg(LoReg)
 | 
						|
      .addImm(AMDGPU::sub0)
 | 
						|
      .addReg(HiReg)
 | 
						|
      .addImm(AMDGPU::sub1);
 | 
						|
  }
 | 
						|
 | 
						|
  // We can't call constrainSelectedInstRegOperands here, because it doesn't
 | 
						|
  // work for target independent opcodes
 | 
						|
  I.eraseFromParent();
 | 
						|
  const TargetRegisterClass *DstRC =
 | 
						|
    TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
 | 
						|
  if (!DstRC)
 | 
						|
    return true;
 | 
						|
  return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
 | 
						|
  // Only manually handle the f64 SGPR case.
 | 
						|
  //
 | 
						|
  // FIXME: This is a workaround for 2.5 different tablegen problems. Because
 | 
						|
  // the bit ops theoretically have a second result due to the implicit def of
 | 
						|
  // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
 | 
						|
  // that is easy by disabling the check. The result works, but uses a
 | 
						|
  // nonsensical sreg32orlds_and_sreg_1 regclass.
 | 
						|
  //
 | 
						|
  // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
 | 
						|
  // the variadic REG_SEQUENCE operands.
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
 | 
						|
  if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
 | 
						|
      MRI->getType(Dst) != LLT::scalar(64))
 | 
						|
    return false;
 | 
						|
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
  MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
 | 
						|
  if (Fabs)
 | 
						|
    Src = Fabs->getOperand(1).getReg();
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
 | 
						|
    .addReg(Src, 0, AMDGPU::sub0);
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
 | 
						|
    .addReg(Src, 0, AMDGPU::sub1);
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
 | 
						|
    .addImm(0x80000000);
 | 
						|
 | 
						|
  // Set or toggle sign bit.
 | 
						|
  unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
 | 
						|
    .addReg(HiReg)
 | 
						|
    .addReg(ConstReg);
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
 | 
						|
    .addReg(LoReg)
 | 
						|
    .addImm(AMDGPU::sub0)
 | 
						|
    .addReg(OpReg)
 | 
						|
    .addImm(AMDGPU::sub1);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// FIXME: This is a workaround for the same tablegen problems as G_FNEG
 | 
						|
bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
 | 
						|
  if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
 | 
						|
      MRI->getType(Dst) != LLT::scalar(64))
 | 
						|
    return false;
 | 
						|
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
 | 
						|
    .addReg(Src, 0, AMDGPU::sub0);
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
 | 
						|
    .addReg(Src, 0, AMDGPU::sub1);
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
 | 
						|
    .addImm(0x7fffffff);
 | 
						|
 | 
						|
  // Clear sign bit.
 | 
						|
  // TODO: Should this used S_BITSET0_*?
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
 | 
						|
    .addReg(HiReg)
 | 
						|
    .addReg(ConstReg);
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
 | 
						|
    .addReg(LoReg)
 | 
						|
    .addImm(AMDGPU::sub0)
 | 
						|
    .addReg(OpReg)
 | 
						|
    .addImm(AMDGPU::sub1);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static bool isConstant(const MachineInstr &MI) {
 | 
						|
  return MI.getOpcode() == TargetOpcode::G_CONSTANT;
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
 | 
						|
    const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
 | 
						|
 | 
						|
  const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
 | 
						|
 | 
						|
  assert(PtrMI);
 | 
						|
 | 
						|
  if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
 | 
						|
    return;
 | 
						|
 | 
						|
  GEPInfo GEPInfo(*PtrMI);
 | 
						|
 | 
						|
  for (unsigned i = 1; i != 3; ++i) {
 | 
						|
    const MachineOperand &GEPOp = PtrMI->getOperand(i);
 | 
						|
    const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
 | 
						|
    assert(OpDef);
 | 
						|
    if (i == 2 && isConstant(*OpDef)) {
 | 
						|
      // TODO: Could handle constant base + variable offset, but a combine
 | 
						|
      // probably should have commuted it.
 | 
						|
      assert(GEPInfo.Imm == 0);
 | 
						|
      GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
 | 
						|
      continue;
 | 
						|
    }
 | 
						|
    const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
 | 
						|
    if (OpBank->getID() == AMDGPU::SGPRRegBankID)
 | 
						|
      GEPInfo.SgprParts.push_back(GEPOp.getReg());
 | 
						|
    else
 | 
						|
      GEPInfo.VgprParts.push_back(GEPOp.getReg());
 | 
						|
  }
 | 
						|
 | 
						|
  AddrInfo.push_back(GEPInfo);
 | 
						|
  getAddrModeInfo(*PtrMI, MRI, AddrInfo);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
 | 
						|
  if (!MI.hasOneMemOperand())
 | 
						|
    return false;
 | 
						|
 | 
						|
  const MachineMemOperand *MMO = *MI.memoperands_begin();
 | 
						|
  const Value *Ptr = MMO->getValue();
 | 
						|
 | 
						|
  // UndefValue means this is a load of a kernel input.  These are uniform.
 | 
						|
  // Sometimes LDS instructions have constant pointers.
 | 
						|
  // If Ptr is null, then that means this mem operand contains a
 | 
						|
  // PseudoSourceValue like GOT.
 | 
						|
  if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
 | 
						|
      isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
 | 
						|
    return true;
 | 
						|
 | 
						|
  if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
 | 
						|
    return true;
 | 
						|
 | 
						|
  const Instruction *I = dyn_cast<Instruction>(Ptr);
 | 
						|
  return I && I->getMetadata("amdgpu.uniform");
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
 | 
						|
  for (const GEPInfo &GEPInfo : AddrInfo) {
 | 
						|
    if (!GEPInfo.VgprParts.empty())
 | 
						|
      return true;
 | 
						|
  }
 | 
						|
  return false;
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
 | 
						|
  const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
 | 
						|
  unsigned AS = PtrTy.getAddressSpace();
 | 
						|
  if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
 | 
						|
      STI.ldsRequiresM0Init()) {
 | 
						|
    // If DS instructions require M0 initializtion, insert it before selecting.
 | 
						|
    BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
 | 
						|
      .addImm(-1);
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
 | 
						|
  initM0(I);
 | 
						|
  return selectImpl(I, *CoverageInfo);
 | 
						|
}
 | 
						|
 | 
						|
// TODO: No rtn optimization.
 | 
						|
bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
 | 
						|
  MachineInstr &MI) const {
 | 
						|
  Register PtrReg = MI.getOperand(1).getReg();
 | 
						|
  const LLT PtrTy = MRI->getType(PtrReg);
 | 
						|
  if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
 | 
						|
      STI.useFlatForGlobal())
 | 
						|
    return selectImpl(MI, *CoverageInfo);
 | 
						|
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  const LLT Ty = MRI->getType(DstReg);
 | 
						|
  const bool Is64 = Ty.getSizeInBits() == 64;
 | 
						|
  const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
 | 
						|
  Register TmpReg = MRI->createVirtualRegister(
 | 
						|
    Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
 | 
						|
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
 | 
						|
  Register VAddr, RSrcReg, SOffset;
 | 
						|
  int64_t Offset = 0;
 | 
						|
 | 
						|
  unsigned Opcode;
 | 
						|
  if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
 | 
						|
    Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
 | 
						|
                             AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
 | 
						|
  } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
 | 
						|
                                   RSrcReg, SOffset, Offset)) {
 | 
						|
    Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
 | 
						|
                    AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
 | 
						|
  } else
 | 
						|
    return selectImpl(MI, *CoverageInfo);
 | 
						|
 | 
						|
  auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
 | 
						|
    .addReg(MI.getOperand(2).getReg());
 | 
						|
 | 
						|
  if (VAddr)
 | 
						|
    MIB.addReg(VAddr);
 | 
						|
 | 
						|
  MIB.addReg(RSrcReg);
 | 
						|
  if (SOffset)
 | 
						|
    MIB.addReg(SOffset);
 | 
						|
  else
 | 
						|
    MIB.addImm(0);
 | 
						|
 | 
						|
  MIB.addImm(Offset);
 | 
						|
  MIB.addImm(0); // slc
 | 
						|
  MIB.cloneMemRefs(MI);
 | 
						|
 | 
						|
  BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
 | 
						|
    .addReg(TmpReg, RegState::Kill, SubReg);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
 | 
						|
  MRI->setRegClass(
 | 
						|
    DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
 | 
						|
  return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  MachineOperand &CondOp = I.getOperand(0);
 | 
						|
  Register CondReg = CondOp.getReg();
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
 | 
						|
  unsigned BrOpcode;
 | 
						|
  Register CondPhysReg;
 | 
						|
  const TargetRegisterClass *ConstrainRC;
 | 
						|
 | 
						|
  // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
 | 
						|
  // whether the branch is uniform when selecting the instruction. In
 | 
						|
  // GlobalISel, we should push that decision into RegBankSelect. Assume for now
 | 
						|
  // RegBankSelect knows what it's doing if the branch condition is scc, even
 | 
						|
  // though it currently does not.
 | 
						|
  if (!isVCC(CondReg, *MRI)) {
 | 
						|
    if (MRI->getType(CondReg) != LLT::scalar(32))
 | 
						|
      return false;
 | 
						|
 | 
						|
    CondPhysReg = AMDGPU::SCC;
 | 
						|
    BrOpcode = AMDGPU::S_CBRANCH_SCC1;
 | 
						|
    // FIXME: Hack for isSCC tests
 | 
						|
    ConstrainRC = &AMDGPU::SGPR_32RegClass;
 | 
						|
  } else {
 | 
						|
    // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
 | 
						|
    // We sort of know that a VCC producer based on the register bank, that ands
 | 
						|
    // inactive lanes with 0. What if there was a logical operation with vcc
 | 
						|
    // producers in different blocks/with different exec masks?
 | 
						|
    // FIXME: Should scc->vcc copies and with exec?
 | 
						|
    CondPhysReg = TRI.getVCC();
 | 
						|
    BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
 | 
						|
    ConstrainRC = TRI.getBoolRC();
 | 
						|
  }
 | 
						|
 | 
						|
  if (!MRI->getRegClassOrNull(CondReg))
 | 
						|
    MRI->setRegClass(CondReg, ConstrainRC);
 | 
						|
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
 | 
						|
    .addReg(CondReg);
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(BrOpcode))
 | 
						|
    .addMBB(I.getOperand(1).getMBB());
 | 
						|
 | 
						|
  I.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_FRAME_INDEX_GLOBAL_VALUE(
 | 
						|
  MachineInstr &I) const {
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
 | 
						|
  I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
 | 
						|
  if (IsVGPR)
 | 
						|
    I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
 | 
						|
 | 
						|
  return RBI.constrainGenericRegister(
 | 
						|
    DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
 | 
						|
  Register DstReg = I.getOperand(0).getReg();
 | 
						|
  Register SrcReg = I.getOperand(1).getReg();
 | 
						|
  Register MaskReg = I.getOperand(2).getReg();
 | 
						|
  LLT Ty = MRI->getType(DstReg);
 | 
						|
  LLT MaskTy = MRI->getType(MaskReg);
 | 
						|
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
 | 
						|
  const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
 | 
						|
  const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
 | 
						|
  if (DstRB != SrcRB) // Should only happen for hand written MIR.
 | 
						|
    return false;
 | 
						|
 | 
						|
  unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
 | 
						|
  const TargetRegisterClass &RegRC
 | 
						|
    = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
 | 
						|
 | 
						|
  const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
 | 
						|
                                                                  *MRI);
 | 
						|
  const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
 | 
						|
                                                                  *MRI);
 | 
						|
  const TargetRegisterClass *MaskRC =
 | 
						|
      TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineBasicBlock *BB = I.getParent();
 | 
						|
  const DebugLoc &DL = I.getDebugLoc();
 | 
						|
  if (Ty.getSizeInBits() == 32) {
 | 
						|
    assert(MaskTy.getSizeInBits() == 32 &&
 | 
						|
           "ptrmask should have been narrowed during legalize");
 | 
						|
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
 | 
						|
      .addReg(SrcReg)
 | 
						|
      .addReg(MaskReg);
 | 
						|
    I.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  Register HiReg = MRI->createVirtualRegister(&RegRC);
 | 
						|
  Register LoReg = MRI->createVirtualRegister(&RegRC);
 | 
						|
 | 
						|
  // Extract the subregisters from the source pointer.
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
 | 
						|
    .addReg(SrcReg, 0, AMDGPU::sub0);
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
 | 
						|
    .addReg(SrcReg, 0, AMDGPU::sub1);
 | 
						|
 | 
						|
  Register MaskedLo, MaskedHi;
 | 
						|
 | 
						|
  // Try to avoid emitting a bit operation when we only need to touch half of
 | 
						|
  // the 64-bit pointer.
 | 
						|
  APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
 | 
						|
 | 
						|
  const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
 | 
						|
  const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
 | 
						|
  if ((MaskOnes & MaskLo32) == MaskLo32) {
 | 
						|
    // If all the bits in the low half are 1, we only need a copy for it.
 | 
						|
    MaskedLo = LoReg;
 | 
						|
  } else {
 | 
						|
    // Extract the mask subregister and apply the and.
 | 
						|
    Register MaskLo = MRI->createVirtualRegister(&RegRC);
 | 
						|
    MaskedLo = MRI->createVirtualRegister(&RegRC);
 | 
						|
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
 | 
						|
      .addReg(MaskReg, 0, AMDGPU::sub0);
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
 | 
						|
      .addReg(LoReg)
 | 
						|
      .addReg(MaskLo);
 | 
						|
  }
 | 
						|
 | 
						|
  if ((MaskOnes & MaskHi32) == MaskHi32) {
 | 
						|
    // If all the bits in the high half are 1, we only need a copy for it.
 | 
						|
    MaskedHi = HiReg;
 | 
						|
  } else {
 | 
						|
    Register MaskHi = MRI->createVirtualRegister(&RegRC);
 | 
						|
    MaskedHi = MRI->createVirtualRegister(&RegRC);
 | 
						|
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
 | 
						|
      .addReg(MaskReg, 0, AMDGPU::sub1);
 | 
						|
    BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
 | 
						|
      .addReg(HiReg)
 | 
						|
      .addReg(MaskHi);
 | 
						|
  }
 | 
						|
 | 
						|
  BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
 | 
						|
    .addReg(MaskedLo)
 | 
						|
    .addImm(AMDGPU::sub0)
 | 
						|
    .addReg(MaskedHi)
 | 
						|
    .addImm(AMDGPU::sub1);
 | 
						|
  I.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
/// Return the register to use for the index value, and the subregister to use
 | 
						|
/// for the indirectly accessed register.
 | 
						|
static std::pair<Register, unsigned>
 | 
						|
computeIndirectRegIndex(MachineRegisterInfo &MRI,
 | 
						|
                        const SIRegisterInfo &TRI,
 | 
						|
                        const TargetRegisterClass *SuperRC,
 | 
						|
                        Register IdxReg,
 | 
						|
                        unsigned EltSize) {
 | 
						|
  Register IdxBaseReg;
 | 
						|
  int Offset;
 | 
						|
  MachineInstr *Unused;
 | 
						|
 | 
						|
  std::tie(IdxBaseReg, Offset, Unused)
 | 
						|
    = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
 | 
						|
  if (IdxBaseReg == AMDGPU::NoRegister) {
 | 
						|
    // This will happen if the index is a known constant. This should ordinarily
 | 
						|
    // be legalized out, but handle it as a register just in case.
 | 
						|
    assert(Offset == 0);
 | 
						|
    IdxBaseReg = IdxReg;
 | 
						|
  }
 | 
						|
 | 
						|
  ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
 | 
						|
 | 
						|
  // Skip out of bounds offsets, or else we would end up using an undefined
 | 
						|
  // register.
 | 
						|
  if (static_cast<unsigned>(Offset) >= SubRegs.size())
 | 
						|
    return std::make_pair(IdxReg, SubRegs[0]);
 | 
						|
  return std::make_pair(IdxBaseReg, SubRegs[Offset]);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
 | 
						|
  MachineInstr &MI) const {
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register SrcReg = MI.getOperand(1).getReg();
 | 
						|
  Register IdxReg = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
  LLT DstTy = MRI->getType(DstReg);
 | 
						|
  LLT SrcTy = MRI->getType(SrcReg);
 | 
						|
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
 | 
						|
  const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
 | 
						|
 | 
						|
  // The index must be scalar. If it wasn't RegBankSelect should have moved this
 | 
						|
  // into a waterfall loop.
 | 
						|
  if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
 | 
						|
                                                                  *MRI);
 | 
						|
  const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
 | 
						|
                                                                  *MRI);
 | 
						|
  if (!SrcRC || !DstRC)
 | 
						|
    return false;
 | 
						|
  if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
  const bool Is64 = DstTy.getSizeInBits() == 64;
 | 
						|
 | 
						|
  unsigned SubReg;
 | 
						|
  std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
 | 
						|
                                                     DstTy.getSizeInBits() / 8);
 | 
						|
 | 
						|
  if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
 | 
						|
    if (DstTy.getSizeInBits() != 32 && !Is64)
 | 
						|
      return false;
 | 
						|
 | 
						|
    BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
 | 
						|
      .addReg(IdxReg);
 | 
						|
 | 
						|
    unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
 | 
						|
    BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
 | 
						|
      .addReg(SrcReg, 0, SubReg)
 | 
						|
      .addReg(SrcReg, RegState::Implicit);
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (!STI.useVGPRIndexMode()) {
 | 
						|
    BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
 | 
						|
      .addReg(IdxReg);
 | 
						|
    BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
 | 
						|
      .addReg(SrcReg, RegState::Undef, SubReg)
 | 
						|
      .addReg(SrcReg, RegState::Implicit);
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
 | 
						|
    .addReg(IdxReg)
 | 
						|
    .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
 | 
						|
  BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
 | 
						|
    .addReg(SrcReg, RegState::Undef, SubReg)
 | 
						|
    .addReg(SrcReg, RegState::Implicit)
 | 
						|
    .addReg(AMDGPU::M0, RegState::Implicit);
 | 
						|
  BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
 | 
						|
bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
 | 
						|
  MachineInstr &MI) const {
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register VecReg = MI.getOperand(1).getReg();
 | 
						|
  Register ValReg = MI.getOperand(2).getReg();
 | 
						|
  Register IdxReg = MI.getOperand(3).getReg();
 | 
						|
 | 
						|
  LLT VecTy = MRI->getType(DstReg);
 | 
						|
  LLT ValTy = MRI->getType(ValReg);
 | 
						|
  unsigned VecSize = VecTy.getSizeInBits();
 | 
						|
  unsigned ValSize = ValTy.getSizeInBits();
 | 
						|
 | 
						|
  const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
 | 
						|
  const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
 | 
						|
  const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
 | 
						|
 | 
						|
  assert(VecTy.getElementType() == ValTy);
 | 
						|
 | 
						|
  // The index must be scalar. If it wasn't RegBankSelect should have moved this
 | 
						|
  // into a waterfall loop.
 | 
						|
  if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
 | 
						|
                                                                  *MRI);
 | 
						|
  const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
 | 
						|
                                                                  *MRI);
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
 | 
						|
    return false;
 | 
						|
 | 
						|
  unsigned SubReg;
 | 
						|
  std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
 | 
						|
                                                     ValSize / 8);
 | 
						|
 | 
						|
  const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
 | 
						|
                         STI.useVGPRIndexMode();
 | 
						|
 | 
						|
  MachineBasicBlock *BB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
 | 
						|
  if (IndexMode) {
 | 
						|
    BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
 | 
						|
      .addReg(IdxReg)
 | 
						|
      .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
 | 
						|
  } else {
 | 
						|
    BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
 | 
						|
      .addReg(IdxReg);
 | 
						|
  }
 | 
						|
 | 
						|
  const MCInstrDesc &RegWriteOp
 | 
						|
    = TII.getIndirectRegWritePseudo(VecSize, ValSize,
 | 
						|
                                    VecRB->getID() == AMDGPU::SGPRRegBankID);
 | 
						|
  BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
 | 
						|
    .addReg(VecReg)
 | 
						|
    .addReg(ValReg)
 | 
						|
    .addImm(SubReg);
 | 
						|
 | 
						|
  if (IndexMode)
 | 
						|
    BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static bool isZeroOrUndef(int X) {
 | 
						|
  return X == 0 || X == -1;
 | 
						|
}
 | 
						|
 | 
						|
static bool isOneOrUndef(int X) {
 | 
						|
  return X == 1 || X == -1;
 | 
						|
}
 | 
						|
 | 
						|
static bool isZeroOrOneOrUndef(int X) {
 | 
						|
  return X == 0 || X == 1 || X == -1;
 | 
						|
}
 | 
						|
 | 
						|
// Normalize a VOP3P shuffle mask to refer to the low/high half of a single
 | 
						|
// 32-bit register.
 | 
						|
static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
 | 
						|
                                   ArrayRef<int> Mask) {
 | 
						|
  NewMask[0] = Mask[0];
 | 
						|
  NewMask[1] = Mask[1];
 | 
						|
  if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
 | 
						|
    return Src0;
 | 
						|
 | 
						|
  assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
 | 
						|
  assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
 | 
						|
 | 
						|
  // Shift the mask inputs to be 0/1;
 | 
						|
  NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
 | 
						|
  NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
 | 
						|
  return Src1;
 | 
						|
}
 | 
						|
 | 
						|
// This is only legal with VOP3P instructions as an aid to op_sel matching.
 | 
						|
bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
 | 
						|
  MachineInstr &MI) const {
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register Src0Reg = MI.getOperand(1).getReg();
 | 
						|
  Register Src1Reg = MI.getOperand(2).getReg();
 | 
						|
  ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
 | 
						|
 | 
						|
  const LLT V2S16 = LLT::vector(2, 16);
 | 
						|
  if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
 | 
						|
    return false;
 | 
						|
 | 
						|
  assert(ShufMask.size() == 2);
 | 
						|
  assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
 | 
						|
 | 
						|
  MachineBasicBlock *MBB = MI.getParent();
 | 
						|
  const DebugLoc &DL = MI.getDebugLoc();
 | 
						|
 | 
						|
  const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
 | 
						|
  const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
 | 
						|
  const TargetRegisterClass &RC = IsVALU ?
 | 
						|
    AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
 | 
						|
 | 
						|
  // Handle the degenerate case which should have folded out.
 | 
						|
  if (ShufMask[0] == -1 && ShufMask[1] == -1) {
 | 
						|
    BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
 | 
						|
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return RBI.constrainGenericRegister(DstReg, RC, *MRI);
 | 
						|
  }
 | 
						|
 | 
						|
  // A legal VOP3P mask only reads one of the sources.
 | 
						|
  int Mask[2];
 | 
						|
  Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
 | 
						|
 | 
						|
  if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
 | 
						|
      !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
 | 
						|
    return false;
 | 
						|
 | 
						|
  // TODO: This also should have been folded out
 | 
						|
  if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
 | 
						|
    BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
 | 
						|
      .addReg(SrcVec);
 | 
						|
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (Mask[0] == 1 && Mask[1] == -1) {
 | 
						|
    if (IsVALU) {
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
 | 
						|
        .addImm(16)
 | 
						|
        .addReg(SrcVec);
 | 
						|
    } else {
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
 | 
						|
        .addReg(SrcVec)
 | 
						|
        .addImm(16);
 | 
						|
    }
 | 
						|
  } else if (Mask[0] == -1 && Mask[1] == 0) {
 | 
						|
    if (IsVALU) {
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
 | 
						|
        .addImm(16)
 | 
						|
        .addReg(SrcVec);
 | 
						|
    } else {
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
 | 
						|
        .addReg(SrcVec)
 | 
						|
        .addImm(16);
 | 
						|
    }
 | 
						|
  } else if (Mask[0] == 0 && Mask[1] == 0) {
 | 
						|
    if (IsVALU) {
 | 
						|
      // Write low half of the register into the high half.
 | 
						|
      MachineInstr *MovSDWA =
 | 
						|
        BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
 | 
						|
        .addImm(0)                             // $src0_modifiers
 | 
						|
        .addReg(SrcVec)                        // $src0
 | 
						|
        .addImm(0)                             // $clamp
 | 
						|
        .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
 | 
						|
        .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
 | 
						|
        .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
 | 
						|
        .addReg(SrcVec, RegState::Implicit);
 | 
						|
      MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
 | 
						|
    } else {
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
 | 
						|
        .addReg(SrcVec)
 | 
						|
        .addReg(SrcVec);
 | 
						|
    }
 | 
						|
  } else if (Mask[0] == 1 && Mask[1] == 1) {
 | 
						|
    if (IsVALU) {
 | 
						|
      // Write high half of the register into the low half.
 | 
						|
      MachineInstr *MovSDWA =
 | 
						|
        BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
 | 
						|
        .addImm(0)                             // $src0_modifiers
 | 
						|
        .addReg(SrcVec)                        // $src0
 | 
						|
        .addImm(0)                             // $clamp
 | 
						|
        .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
 | 
						|
        .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
 | 
						|
        .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
 | 
						|
        .addReg(SrcVec, RegState::Implicit);
 | 
						|
      MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
 | 
						|
    } else {
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
 | 
						|
        .addReg(SrcVec)
 | 
						|
        .addReg(SrcVec);
 | 
						|
    }
 | 
						|
  } else if (Mask[0] == 1 && Mask[1] == 0) {
 | 
						|
    if (IsVALU) {
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
 | 
						|
        .addReg(SrcVec)
 | 
						|
        .addReg(SrcVec)
 | 
						|
        .addImm(16);
 | 
						|
    } else {
 | 
						|
      Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
 | 
						|
        .addReg(SrcVec)
 | 
						|
        .addImm(16);
 | 
						|
      BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
 | 
						|
        .addReg(TmpReg)
 | 
						|
        .addReg(SrcVec);
 | 
						|
    }
 | 
						|
  } else
 | 
						|
    llvm_unreachable("all shuffle masks should be handled");
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::select(MachineInstr &I) {
 | 
						|
  if (I.isPHI())
 | 
						|
    return selectPHI(I);
 | 
						|
 | 
						|
  if (!I.isPreISelOpcode()) {
 | 
						|
    if (I.isCopy())
 | 
						|
      return selectCOPY(I);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  switch (I.getOpcode()) {
 | 
						|
  case TargetOpcode::G_AND:
 | 
						|
  case TargetOpcode::G_OR:
 | 
						|
  case TargetOpcode::G_XOR:
 | 
						|
    if (selectImpl(I, *CoverageInfo))
 | 
						|
      return true;
 | 
						|
    return selectG_AND_OR_XOR(I);
 | 
						|
  case TargetOpcode::G_ADD:
 | 
						|
  case TargetOpcode::G_SUB:
 | 
						|
    if (selectImpl(I, *CoverageInfo))
 | 
						|
      return true;
 | 
						|
    return selectG_ADD_SUB(I);
 | 
						|
  case TargetOpcode::G_UADDO:
 | 
						|
  case TargetOpcode::G_USUBO:
 | 
						|
  case TargetOpcode::G_UADDE:
 | 
						|
  case TargetOpcode::G_USUBE:
 | 
						|
    return selectG_UADDO_USUBO_UADDE_USUBE(I);
 | 
						|
  case TargetOpcode::G_INTTOPTR:
 | 
						|
  case TargetOpcode::G_BITCAST:
 | 
						|
  case TargetOpcode::G_PTRTOINT:
 | 
						|
    return selectCOPY(I);
 | 
						|
  case TargetOpcode::G_CONSTANT:
 | 
						|
  case TargetOpcode::G_FCONSTANT:
 | 
						|
    return selectG_CONSTANT(I);
 | 
						|
  case TargetOpcode::G_FNEG:
 | 
						|
    if (selectImpl(I, *CoverageInfo))
 | 
						|
      return true;
 | 
						|
    return selectG_FNEG(I);
 | 
						|
  case TargetOpcode::G_FABS:
 | 
						|
    if (selectImpl(I, *CoverageInfo))
 | 
						|
      return true;
 | 
						|
    return selectG_FABS(I);
 | 
						|
  case TargetOpcode::G_EXTRACT:
 | 
						|
    return selectG_EXTRACT(I);
 | 
						|
  case TargetOpcode::G_MERGE_VALUES:
 | 
						|
  case TargetOpcode::G_BUILD_VECTOR:
 | 
						|
  case TargetOpcode::G_CONCAT_VECTORS:
 | 
						|
    return selectG_MERGE_VALUES(I);
 | 
						|
  case TargetOpcode::G_UNMERGE_VALUES:
 | 
						|
    return selectG_UNMERGE_VALUES(I);
 | 
						|
  case TargetOpcode::G_BUILD_VECTOR_TRUNC:
 | 
						|
    return selectG_BUILD_VECTOR_TRUNC(I);
 | 
						|
  case TargetOpcode::G_PTR_ADD:
 | 
						|
    return selectG_PTR_ADD(I);
 | 
						|
  case TargetOpcode::G_IMPLICIT_DEF:
 | 
						|
    return selectG_IMPLICIT_DEF(I);
 | 
						|
  case TargetOpcode::G_INSERT:
 | 
						|
    return selectG_INSERT(I);
 | 
						|
  case TargetOpcode::G_INTRINSIC:
 | 
						|
    return selectG_INTRINSIC(I);
 | 
						|
  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
 | 
						|
    return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
 | 
						|
  case TargetOpcode::G_ICMP:
 | 
						|
    if (selectG_ICMP(I))
 | 
						|
      return true;
 | 
						|
    return selectImpl(I, *CoverageInfo);
 | 
						|
  case TargetOpcode::G_LOAD:
 | 
						|
  case TargetOpcode::G_ATOMIC_CMPXCHG:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_XCHG:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_ADD:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_SUB:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_AND:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_OR:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_XOR:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_MIN:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_MAX:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_UMIN:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_UMAX:
 | 
						|
  case TargetOpcode::G_ATOMICRMW_FADD:
 | 
						|
    return selectG_LOAD_ATOMICRMW(I);
 | 
						|
  case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
 | 
						|
    return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
 | 
						|
  case TargetOpcode::G_SELECT:
 | 
						|
    return selectG_SELECT(I);
 | 
						|
  case TargetOpcode::G_STORE:
 | 
						|
    return selectG_STORE(I);
 | 
						|
  case TargetOpcode::G_TRUNC:
 | 
						|
    return selectG_TRUNC(I);
 | 
						|
  case TargetOpcode::G_SEXT:
 | 
						|
  case TargetOpcode::G_ZEXT:
 | 
						|
  case TargetOpcode::G_ANYEXT:
 | 
						|
  case TargetOpcode::G_SEXT_INREG:
 | 
						|
    if (selectImpl(I, *CoverageInfo))
 | 
						|
      return true;
 | 
						|
    return selectG_SZA_EXT(I);
 | 
						|
  case TargetOpcode::G_BRCOND:
 | 
						|
    return selectG_BRCOND(I);
 | 
						|
  case TargetOpcode::G_FRAME_INDEX:
 | 
						|
  case TargetOpcode::G_GLOBAL_VALUE:
 | 
						|
    return selectG_FRAME_INDEX_GLOBAL_VALUE(I);
 | 
						|
  case TargetOpcode::G_PTRMASK:
 | 
						|
    return selectG_PTRMASK(I);
 | 
						|
  case TargetOpcode::G_EXTRACT_VECTOR_ELT:
 | 
						|
    return selectG_EXTRACT_VECTOR_ELT(I);
 | 
						|
  case TargetOpcode::G_INSERT_VECTOR_ELT:
 | 
						|
    return selectG_INSERT_VECTOR_ELT(I);
 | 
						|
  case TargetOpcode::G_SHUFFLE_VECTOR:
 | 
						|
    return selectG_SHUFFLE_VECTOR(I);
 | 
						|
  case AMDGPU::G_AMDGPU_ATOMIC_INC:
 | 
						|
  case AMDGPU::G_AMDGPU_ATOMIC_DEC:
 | 
						|
    initM0(I);
 | 
						|
    return selectImpl(I, *CoverageInfo);
 | 
						|
  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
 | 
						|
  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
 | 
						|
    const AMDGPU::ImageDimIntrinsicInfo *Intr
 | 
						|
      = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
 | 
						|
    assert(Intr && "not an image intrinsic with image pseudo");
 | 
						|
    return selectImageIntrinsic(I, Intr);
 | 
						|
  }
 | 
						|
  default:
 | 
						|
    return selectImpl(I, *CoverageInfo);
 | 
						|
  }
 | 
						|
  return false;
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
 | 
						|
  }};
 | 
						|
 | 
						|
}
 | 
						|
 | 
						|
std::pair<Register, unsigned>
 | 
						|
AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root) const {
 | 
						|
  Register Src = Root.getReg();
 | 
						|
  Register OrigSrc = Src;
 | 
						|
  unsigned Mods = 0;
 | 
						|
  MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
 | 
						|
 | 
						|
  if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
 | 
						|
    Src = MI->getOperand(1).getReg();
 | 
						|
    Mods |= SISrcMods::NEG;
 | 
						|
    MI = getDefIgnoringCopies(Src, *MRI);
 | 
						|
  }
 | 
						|
 | 
						|
  if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
 | 
						|
    Src = MI->getOperand(1).getReg();
 | 
						|
    Mods |= SISrcMods::ABS;
 | 
						|
  }
 | 
						|
 | 
						|
  if (Mods != 0 &&
 | 
						|
      RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
 | 
						|
    MachineInstr *UseMI = Root.getParent();
 | 
						|
 | 
						|
    // If we looked through copies to find source modifiers on an SGPR operand,
 | 
						|
    // we now have an SGPR register source. To avoid potentially violating the
 | 
						|
    // constant bus restriction, we need to insert a copy to a VGPR.
 | 
						|
    Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
 | 
						|
    BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
 | 
						|
            TII.get(AMDGPU::COPY), VGPRSrc)
 | 
						|
      .addReg(Src);
 | 
						|
    Src = VGPRSrc;
 | 
						|
  }
 | 
						|
 | 
						|
  return std::make_pair(Src, Mods);
 | 
						|
}
 | 
						|
 | 
						|
///
 | 
						|
/// This will select either an SGPR or VGPR operand and will save us from
 | 
						|
/// having to write an extra tablegen pattern.
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
 | 
						|
  Register Src;
 | 
						|
  unsigned Mods;
 | 
						|
  std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
 | 
						|
  Register Src;
 | 
						|
  unsigned Mods;
 | 
						|
  std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
 | 
						|
  Register Reg = Root.getReg();
 | 
						|
  const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
 | 
						|
  if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
 | 
						|
              Def->getOpcode() == AMDGPU::G_FABS))
 | 
						|
    return {};
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
std::pair<Register, unsigned>
 | 
						|
AMDGPUInstructionSelector::selectVOP3PModsImpl(
 | 
						|
  Register Src, const MachineRegisterInfo &MRI) const {
 | 
						|
  unsigned Mods = 0;
 | 
						|
  MachineInstr *MI = MRI.getVRegDef(Src);
 | 
						|
 | 
						|
  if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
 | 
						|
      // It's possible to see an f32 fneg here, but unlikely.
 | 
						|
      // TODO: Treat f32 fneg as only high bit.
 | 
						|
      MRI.getType(Src) == LLT::vector(2, 16)) {
 | 
						|
    Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
 | 
						|
    Src = MI->getOperand(1).getReg();
 | 
						|
    MI = MRI.getVRegDef(Src);
 | 
						|
  }
 | 
						|
 | 
						|
  // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
 | 
						|
 | 
						|
  // Packed instructions do not have abs modifiers.
 | 
						|
  Mods |= SISrcMods::OP_SEL_1;
 | 
						|
 | 
						|
  return std::make_pair(Src, Mods);
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
 | 
						|
  MachineRegisterInfo &MRI
 | 
						|
    = Root.getParent()->getParent()->getParent()->getRegInfo();
 | 
						|
 | 
						|
  Register Src;
 | 
						|
  unsigned Mods;
 | 
						|
  std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
 | 
						|
  Register Src;
 | 
						|
  unsigned Mods;
 | 
						|
  std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
 | 
						|
  if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
 | 
						|
    return None;
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
 | 
						|
  // FIXME: Handle op_sel
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
 | 
						|
  SmallVector<GEPInfo, 4> AddrInfo;
 | 
						|
  getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
 | 
						|
 | 
						|
  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
 | 
						|
    return None;
 | 
						|
 | 
						|
  const GEPInfo &GEPInfo = AddrInfo[0];
 | 
						|
  Optional<int64_t> EncodedImm =
 | 
						|
      AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
 | 
						|
  if (!EncodedImm)
 | 
						|
    return None;
 | 
						|
 | 
						|
  unsigned PtrReg = GEPInfo.SgprParts[0];
 | 
						|
  return {{
 | 
						|
    [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
 | 
						|
    [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
 | 
						|
  SmallVector<GEPInfo, 4> AddrInfo;
 | 
						|
  getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
 | 
						|
 | 
						|
  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
 | 
						|
    return None;
 | 
						|
 | 
						|
  const GEPInfo &GEPInfo = AddrInfo[0];
 | 
						|
  Register PtrReg = GEPInfo.SgprParts[0];
 | 
						|
  Optional<int64_t> EncodedImm =
 | 
						|
      AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
 | 
						|
  if (!EncodedImm)
 | 
						|
    return None;
 | 
						|
 | 
						|
  return {{
 | 
						|
    [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
 | 
						|
    [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
 | 
						|
  MachineInstr *MI = Root.getParent();
 | 
						|
  MachineBasicBlock *MBB = MI->getParent();
 | 
						|
 | 
						|
  SmallVector<GEPInfo, 4> AddrInfo;
 | 
						|
  getAddrModeInfo(*MI, *MRI, AddrInfo);
 | 
						|
 | 
						|
  // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
 | 
						|
  // then we can select all ptr + 32-bit offsets not just immediate offsets.
 | 
						|
  if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
 | 
						|
    return None;
 | 
						|
 | 
						|
  const GEPInfo &GEPInfo = AddrInfo[0];
 | 
						|
  // SGPR offset is unsigned.
 | 
						|
  if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
 | 
						|
    return None;
 | 
						|
 | 
						|
  // If we make it this far we have a load with an 32-bit immediate offset.
 | 
						|
  // It is OK to select this using a sgpr offset, because we have already
 | 
						|
  // failed trying to select this load into one of the _IMM variants since
 | 
						|
  // the _IMM Patterns are considered before the _SGPR patterns.
 | 
						|
  Register PtrReg = GEPInfo.SgprParts[0];
 | 
						|
  Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
 | 
						|
          .addImm(GEPInfo.Imm);
 | 
						|
  return {{
 | 
						|
    [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
 | 
						|
    [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
template <bool Signed>
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
 | 
						|
  MachineInstr *MI = Root.getParent();
 | 
						|
 | 
						|
  InstructionSelector::ComplexRendererFns Default = {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
 | 
						|
    }};
 | 
						|
 | 
						|
  if (!STI.hasFlatInstOffsets())
 | 
						|
    return Default;
 | 
						|
 | 
						|
  const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
 | 
						|
  if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
 | 
						|
    return Default;
 | 
						|
 | 
						|
  Optional<int64_t> Offset =
 | 
						|
    getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
 | 
						|
  if (!Offset.hasValue())
 | 
						|
    return Default;
 | 
						|
 | 
						|
  unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
 | 
						|
  if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
 | 
						|
    return Default;
 | 
						|
 | 
						|
  Register BasePtr = OpDef->getOperand(1).getReg();
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
 | 
						|
    }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
 | 
						|
  return selectFlatOffsetImpl<false>(Root);
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
 | 
						|
  return selectFlatOffsetImpl<true>(Root);
 | 
						|
}
 | 
						|
 | 
						|
static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
 | 
						|
  auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
 | 
						|
  return PSV && PSV->isStack();
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
 | 
						|
  MachineInstr *MI = Root.getParent();
 | 
						|
  MachineBasicBlock *MBB = MI->getParent();
 | 
						|
  MachineFunction *MF = MBB->getParent();
 | 
						|
  const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
 | 
						|
 | 
						|
  int64_t Offset = 0;
 | 
						|
  if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
 | 
						|
      Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
 | 
						|
    Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
 | 
						|
 | 
						|
    // TODO: Should this be inside the render function? The iterator seems to
 | 
						|
    // move.
 | 
						|
    BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
 | 
						|
            HighBits)
 | 
						|
      .addImm(Offset & ~4095);
 | 
						|
 | 
						|
    return {{[=](MachineInstrBuilder &MIB) { // rsrc
 | 
						|
               MIB.addReg(Info->getScratchRSrcReg());
 | 
						|
             },
 | 
						|
             [=](MachineInstrBuilder &MIB) { // vaddr
 | 
						|
               MIB.addReg(HighBits);
 | 
						|
             },
 | 
						|
             [=](MachineInstrBuilder &MIB) { // soffset
 | 
						|
               const MachineMemOperand *MMO = *MI->memoperands_begin();
 | 
						|
               const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
 | 
						|
 | 
						|
               if (isStackPtrRelative(PtrInfo))
 | 
						|
                 MIB.addReg(Info->getStackPtrOffsetReg());
 | 
						|
               else
 | 
						|
                 MIB.addImm(0);
 | 
						|
             },
 | 
						|
             [=](MachineInstrBuilder &MIB) { // offset
 | 
						|
               MIB.addImm(Offset & 4095);
 | 
						|
             }}};
 | 
						|
  }
 | 
						|
 | 
						|
  assert(Offset == 0 || Offset == -1);
 | 
						|
 | 
						|
  // Try to fold a frame index directly into the MUBUF vaddr field, and any
 | 
						|
  // offsets.
 | 
						|
  Optional<int> FI;
 | 
						|
  Register VAddr = Root.getReg();
 | 
						|
  if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
 | 
						|
    if (isBaseWithConstantOffset(Root, *MRI)) {
 | 
						|
      const MachineOperand &LHS = RootDef->getOperand(1);
 | 
						|
      const MachineOperand &RHS = RootDef->getOperand(2);
 | 
						|
      const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
 | 
						|
      const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
 | 
						|
      if (LHSDef && RHSDef) {
 | 
						|
        int64_t PossibleOffset =
 | 
						|
            RHSDef->getOperand(1).getCImm()->getSExtValue();
 | 
						|
        if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
 | 
						|
            (!STI.privateMemoryResourceIsRangeChecked() ||
 | 
						|
             KnownBits->signBitIsZero(LHS.getReg()))) {
 | 
						|
          if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
 | 
						|
            FI = LHSDef->getOperand(1).getIndex();
 | 
						|
          else
 | 
						|
            VAddr = LHS.getReg();
 | 
						|
          Offset = PossibleOffset;
 | 
						|
        }
 | 
						|
      }
 | 
						|
    } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
 | 
						|
      FI = RootDef->getOperand(1).getIndex();
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  return {{[=](MachineInstrBuilder &MIB) { // rsrc
 | 
						|
             MIB.addReg(Info->getScratchRSrcReg());
 | 
						|
           },
 | 
						|
           [=](MachineInstrBuilder &MIB) { // vaddr
 | 
						|
             if (FI.hasValue())
 | 
						|
               MIB.addFrameIndex(FI.getValue());
 | 
						|
             else
 | 
						|
               MIB.addReg(VAddr);
 | 
						|
           },
 | 
						|
           [=](MachineInstrBuilder &MIB) { // soffset
 | 
						|
             // If we don't know this private access is a local stack object, it
 | 
						|
             // needs to be relative to the entry point's scratch wave offset.
 | 
						|
             // TODO: Should split large offsets that don't fit like above.
 | 
						|
             // TODO: Don't use scratch wave offset just because the offset
 | 
						|
             // didn't fit.
 | 
						|
             if (!Info->isEntryFunction() && FI.hasValue())
 | 
						|
               MIB.addReg(Info->getStackPtrOffsetReg());
 | 
						|
             else
 | 
						|
               MIB.addImm(0);
 | 
						|
           },
 | 
						|
           [=](MachineInstrBuilder &MIB) { // offset
 | 
						|
             MIB.addImm(Offset);
 | 
						|
           }}};
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
 | 
						|
                                                int64_t Offset,
 | 
						|
                                                unsigned OffsetBits) const {
 | 
						|
  if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
 | 
						|
      (OffsetBits == 8 && !isUInt<8>(Offset)))
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
 | 
						|
    return true;
 | 
						|
 | 
						|
  // On Southern Islands instruction with a negative base value and an offset
 | 
						|
  // don't seem to work.
 | 
						|
  return KnownBits->signBitIsZero(Base);
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectMUBUFScratchOffset(
 | 
						|
    MachineOperand &Root) const {
 | 
						|
  MachineInstr *MI = Root.getParent();
 | 
						|
  MachineBasicBlock *MBB = MI->getParent();
 | 
						|
 | 
						|
  int64_t Offset = 0;
 | 
						|
  if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
 | 
						|
      !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
 | 
						|
    return {};
 | 
						|
 | 
						|
  const MachineFunction *MF = MBB->getParent();
 | 
						|
  const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
 | 
						|
  const MachineMemOperand *MMO = *MI->memoperands_begin();
 | 
						|
  const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { // rsrc
 | 
						|
        MIB.addReg(Info->getScratchRSrcReg());
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // soffset
 | 
						|
        if (isStackPtrRelative(PtrInfo))
 | 
						|
          MIB.addReg(Info->getStackPtrOffsetReg());
 | 
						|
        else
 | 
						|
          MIB.addImm(0);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
 | 
						|
  }};
 | 
						|
}
 | 
						|
 | 
						|
std::pair<Register, unsigned>
 | 
						|
AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
 | 
						|
  const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
 | 
						|
  if (!RootDef)
 | 
						|
    return std::make_pair(Root.getReg(), 0);
 | 
						|
 | 
						|
  int64_t ConstAddr = 0;
 | 
						|
 | 
						|
  Register PtrBase;
 | 
						|
  int64_t Offset;
 | 
						|
  std::tie(PtrBase, Offset) =
 | 
						|
    getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
 | 
						|
 | 
						|
  if (Offset) {
 | 
						|
    if (isDSOffsetLegal(PtrBase, Offset, 16)) {
 | 
						|
      // (add n0, c0)
 | 
						|
      return std::make_pair(PtrBase, Offset);
 | 
						|
    }
 | 
						|
  } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
 | 
						|
    // TODO
 | 
						|
 | 
						|
 | 
						|
  } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
 | 
						|
    // TODO
 | 
						|
 | 
						|
  }
 | 
						|
 | 
						|
  return std::make_pair(Root.getReg(), 0);
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
 | 
						|
  Register Reg;
 | 
						|
  unsigned Offset;
 | 
						|
  std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
 | 
						|
    }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
 | 
						|
  Register Reg;
 | 
						|
  unsigned Offset;
 | 
						|
  std::tie(Reg, Offset) = selectDS64Bit4ByteAlignedImpl(Root);
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
 | 
						|
    }};
 | 
						|
}
 | 
						|
 | 
						|
std::pair<Register, unsigned>
 | 
						|
AMDGPUInstructionSelector::selectDS64Bit4ByteAlignedImpl(MachineOperand &Root) const {
 | 
						|
  const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
 | 
						|
  if (!RootDef)
 | 
						|
    return std::make_pair(Root.getReg(), 0);
 | 
						|
 | 
						|
  int64_t ConstAddr = 0;
 | 
						|
 | 
						|
  Register PtrBase;
 | 
						|
  int64_t Offset;
 | 
						|
  std::tie(PtrBase, Offset) =
 | 
						|
    getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
 | 
						|
 | 
						|
  if (Offset) {
 | 
						|
    int64_t DWordOffset0 = Offset / 4;
 | 
						|
    int64_t DWordOffset1 = DWordOffset0 + 1;
 | 
						|
    if (isDSOffsetLegal(PtrBase, DWordOffset1, 8)) {
 | 
						|
      // (add n0, c0)
 | 
						|
      return std::make_pair(PtrBase, DWordOffset0);
 | 
						|
    }
 | 
						|
  } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
 | 
						|
    // TODO
 | 
						|
 | 
						|
  } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
 | 
						|
    // TODO
 | 
						|
 | 
						|
  }
 | 
						|
 | 
						|
  return std::make_pair(Root.getReg(), 0);
 | 
						|
}
 | 
						|
 | 
						|
/// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
 | 
						|
/// the base value with the constant offset. There may be intervening copies
 | 
						|
/// between \p Root and the identified constant. Returns \p Root, 0 if this does
 | 
						|
/// not match the pattern.
 | 
						|
std::pair<Register, int64_t>
 | 
						|
AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
 | 
						|
  Register Root, const MachineRegisterInfo &MRI) const {
 | 
						|
  MachineInstr *RootI = MRI.getVRegDef(Root);
 | 
						|
  if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
 | 
						|
    return {Root, 0};
 | 
						|
 | 
						|
  MachineOperand &RHS = RootI->getOperand(2);
 | 
						|
  Optional<ValueAndVReg> MaybeOffset
 | 
						|
    = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
 | 
						|
  if (!MaybeOffset)
 | 
						|
    return {Root, 0};
 | 
						|
  return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
 | 
						|
}
 | 
						|
 | 
						|
static void addZeroImm(MachineInstrBuilder &MIB) {
 | 
						|
  MIB.addImm(0);
 | 
						|
}
 | 
						|
 | 
						|
/// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
 | 
						|
/// BasePtr is not valid, a null base pointer will be used.
 | 
						|
static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
 | 
						|
                          uint32_t FormatLo, uint32_t FormatHi,
 | 
						|
                          Register BasePtr) {
 | 
						|
  Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
 | 
						|
  Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
 | 
						|
 | 
						|
  B.buildInstr(AMDGPU::S_MOV_B32)
 | 
						|
    .addDef(RSrc2)
 | 
						|
    .addImm(FormatLo);
 | 
						|
  B.buildInstr(AMDGPU::S_MOV_B32)
 | 
						|
    .addDef(RSrc3)
 | 
						|
    .addImm(FormatHi);
 | 
						|
 | 
						|
  // Build the half of the subregister with the constants before building the
 | 
						|
  // full 128-bit register. If we are building multiple resource descriptors,
 | 
						|
  // this will allow CSEing of the 2-component register.
 | 
						|
  B.buildInstr(AMDGPU::REG_SEQUENCE)
 | 
						|
    .addDef(RSrcHi)
 | 
						|
    .addReg(RSrc2)
 | 
						|
    .addImm(AMDGPU::sub0)
 | 
						|
    .addReg(RSrc3)
 | 
						|
    .addImm(AMDGPU::sub1);
 | 
						|
 | 
						|
  Register RSrcLo = BasePtr;
 | 
						|
  if (!BasePtr) {
 | 
						|
    RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
 | 
						|
    B.buildInstr(AMDGPU::S_MOV_B64)
 | 
						|
      .addDef(RSrcLo)
 | 
						|
      .addImm(0);
 | 
						|
  }
 | 
						|
 | 
						|
  B.buildInstr(AMDGPU::REG_SEQUENCE)
 | 
						|
    .addDef(RSrc)
 | 
						|
    .addReg(RSrcLo)
 | 
						|
    .addImm(AMDGPU::sub0_sub1)
 | 
						|
    .addReg(RSrcHi)
 | 
						|
    .addImm(AMDGPU::sub2_sub3);
 | 
						|
 | 
						|
  return RSrc;
 | 
						|
}
 | 
						|
 | 
						|
static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
 | 
						|
                                const SIInstrInfo &TII, Register BasePtr) {
 | 
						|
  uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
 | 
						|
 | 
						|
  // FIXME: Why are half the "default" bits ignored based on the addressing
 | 
						|
  // mode?
 | 
						|
  return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
 | 
						|
}
 | 
						|
 | 
						|
static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
 | 
						|
                               const SIInstrInfo &TII, Register BasePtr) {
 | 
						|
  uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
 | 
						|
 | 
						|
  // FIXME: Why are half the "default" bits ignored based on the addressing
 | 
						|
  // mode?
 | 
						|
  return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
 | 
						|
}
 | 
						|
 | 
						|
AMDGPUInstructionSelector::MUBUFAddressData
 | 
						|
AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
 | 
						|
  MUBUFAddressData Data;
 | 
						|
  Data.N0 = Src;
 | 
						|
 | 
						|
  Register PtrBase;
 | 
						|
  int64_t Offset;
 | 
						|
 | 
						|
  std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
 | 
						|
  if (isUInt<32>(Offset)) {
 | 
						|
    Data.N0 = PtrBase;
 | 
						|
    Data.Offset = Offset;
 | 
						|
  }
 | 
						|
 | 
						|
  if (MachineInstr *InputAdd
 | 
						|
      = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
 | 
						|
    Data.N2 = InputAdd->getOperand(1).getReg();
 | 
						|
    Data.N3 = InputAdd->getOperand(2).getReg();
 | 
						|
 | 
						|
    // FIXME: Need to fix extra SGPR->VGPRcopies inserted
 | 
						|
    // FIXME: Don't know this was defined by operand 0
 | 
						|
    //
 | 
						|
    // TODO: Remove this when we have copy folding optimizations after
 | 
						|
    // RegBankSelect.
 | 
						|
    Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
 | 
						|
    Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
 | 
						|
  }
 | 
						|
 | 
						|
  return Data;
 | 
						|
}
 | 
						|
 | 
						|
/// Return if the addr64 mubuf mode should be used for the given address.
 | 
						|
bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
 | 
						|
  // (ptr_add N2, N3) -> addr64, or
 | 
						|
  // (ptr_add (ptr_add N2, N3), C1) -> addr64
 | 
						|
  if (Addr.N2)
 | 
						|
    return true;
 | 
						|
 | 
						|
  const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
 | 
						|
  return N0Bank->getID() == AMDGPU::VGPRRegBankID;
 | 
						|
}
 | 
						|
 | 
						|
/// Split an immediate offset \p ImmOffset depending on whether it fits in the
 | 
						|
/// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
 | 
						|
/// component.
 | 
						|
void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
 | 
						|
  MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
 | 
						|
  if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
 | 
						|
    return;
 | 
						|
 | 
						|
  // Illegal offset, store it in soffset.
 | 
						|
  SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
  B.buildInstr(AMDGPU::S_MOV_B32)
 | 
						|
    .addDef(SOffset)
 | 
						|
    .addImm(ImmOffset);
 | 
						|
  ImmOffset = 0;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
 | 
						|
  MachineOperand &Root, Register &VAddr, Register &RSrcReg,
 | 
						|
  Register &SOffset, int64_t &Offset) const {
 | 
						|
  // FIXME: Predicates should stop this from reaching here.
 | 
						|
  // addr64 bit was removed for volcanic islands.
 | 
						|
  if (!STI.hasAddr64() || STI.useFlatForGlobal())
 | 
						|
    return false;
 | 
						|
 | 
						|
  MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
 | 
						|
  if (!shouldUseAddr64(AddrData))
 | 
						|
    return false;
 | 
						|
 | 
						|
  Register N0 = AddrData.N0;
 | 
						|
  Register N2 = AddrData.N2;
 | 
						|
  Register N3 = AddrData.N3;
 | 
						|
  Offset = AddrData.Offset;
 | 
						|
 | 
						|
  // Base pointer for the SRD.
 | 
						|
  Register SRDPtr;
 | 
						|
 | 
						|
  if (N2) {
 | 
						|
    if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
 | 
						|
      assert(N3);
 | 
						|
      if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
 | 
						|
        // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
 | 
						|
        // addr64, and construct the default resource from a 0 address.
 | 
						|
        VAddr = N0;
 | 
						|
      } else {
 | 
						|
        SRDPtr = N3;
 | 
						|
        VAddr = N2;
 | 
						|
      }
 | 
						|
    } else {
 | 
						|
      // N2 is not divergent.
 | 
						|
      SRDPtr = N2;
 | 
						|
      VAddr = N3;
 | 
						|
    }
 | 
						|
  } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
 | 
						|
    // Use the default null pointer in the resource
 | 
						|
    VAddr = N0;
 | 
						|
  } else {
 | 
						|
    // N0 -> offset, or
 | 
						|
    // (N0 + C1) -> offset
 | 
						|
    SRDPtr = N0;
 | 
						|
  }
 | 
						|
 | 
						|
  MachineIRBuilder B(*Root.getParent());
 | 
						|
  RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
 | 
						|
  splitIllegalMUBUFOffset(B, SOffset, Offset);
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
 | 
						|
  MachineOperand &Root, Register &RSrcReg, Register &SOffset,
 | 
						|
  int64_t &Offset) const {
 | 
						|
  MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
 | 
						|
  if (shouldUseAddr64(AddrData))
 | 
						|
    return false;
 | 
						|
 | 
						|
  // N0 -> offset, or
 | 
						|
  // (N0 + C1) -> offset
 | 
						|
  Register SRDPtr = AddrData.N0;
 | 
						|
  Offset = AddrData.Offset;
 | 
						|
 | 
						|
  // TODO: Look through extensions for 32-bit soffset.
 | 
						|
  MachineIRBuilder B(*Root.getParent());
 | 
						|
 | 
						|
  RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
 | 
						|
  splitIllegalMUBUFOffset(B, SOffset, Offset);
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
 | 
						|
  Register VAddr;
 | 
						|
  Register RSrcReg;
 | 
						|
  Register SOffset;
 | 
						|
  int64_t Offset = 0;
 | 
						|
 | 
						|
  if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
 | 
						|
    return {};
 | 
						|
 | 
						|
  // FIXME: Use defaulted operands for trailing 0s and remove from the complex
 | 
						|
  // pattern.
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) {  // rsrc
 | 
						|
        MIB.addReg(RSrcReg);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // vaddr
 | 
						|
        MIB.addReg(VAddr);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // soffset
 | 
						|
        if (SOffset)
 | 
						|
          MIB.addReg(SOffset);
 | 
						|
        else
 | 
						|
          MIB.addImm(0);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // offset
 | 
						|
        MIB.addImm(Offset);
 | 
						|
      },
 | 
						|
      addZeroImm, //  glc
 | 
						|
      addZeroImm, //  slc
 | 
						|
      addZeroImm, //  tfe
 | 
						|
      addZeroImm, //  dlc
 | 
						|
      addZeroImm  //  swz
 | 
						|
    }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
 | 
						|
  Register RSrcReg;
 | 
						|
  Register SOffset;
 | 
						|
  int64_t Offset = 0;
 | 
						|
 | 
						|
  if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
 | 
						|
    return {};
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) {  // rsrc
 | 
						|
        MIB.addReg(RSrcReg);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // soffset
 | 
						|
        if (SOffset)
 | 
						|
          MIB.addReg(SOffset);
 | 
						|
        else
 | 
						|
          MIB.addImm(0);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
 | 
						|
      addZeroImm, //  glc
 | 
						|
      addZeroImm, //  slc
 | 
						|
      addZeroImm, //  tfe
 | 
						|
      addZeroImm, //  dlc
 | 
						|
      addZeroImm  //  swz
 | 
						|
    }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
 | 
						|
  Register VAddr;
 | 
						|
  Register RSrcReg;
 | 
						|
  Register SOffset;
 | 
						|
  int64_t Offset = 0;
 | 
						|
 | 
						|
  if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
 | 
						|
    return {};
 | 
						|
 | 
						|
  // FIXME: Use defaulted operands for trailing 0s and remove from the complex
 | 
						|
  // pattern.
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) {  // rsrc
 | 
						|
        MIB.addReg(RSrcReg);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // vaddr
 | 
						|
        MIB.addReg(VAddr);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // soffset
 | 
						|
        if (SOffset)
 | 
						|
          MIB.addReg(SOffset);
 | 
						|
        else
 | 
						|
          MIB.addImm(0);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // offset
 | 
						|
        MIB.addImm(Offset);
 | 
						|
      },
 | 
						|
      addZeroImm //  slc
 | 
						|
    }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
 | 
						|
  Register RSrcReg;
 | 
						|
  Register SOffset;
 | 
						|
  int64_t Offset = 0;
 | 
						|
 | 
						|
  if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
 | 
						|
    return {};
 | 
						|
 | 
						|
  return {{
 | 
						|
      [=](MachineInstrBuilder &MIB) {  // rsrc
 | 
						|
        MIB.addReg(RSrcReg);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { // soffset
 | 
						|
        if (SOffset)
 | 
						|
          MIB.addReg(SOffset);
 | 
						|
        else
 | 
						|
          MIB.addImm(0);
 | 
						|
      },
 | 
						|
      [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
 | 
						|
      addZeroImm //  slc
 | 
						|
    }};
 | 
						|
}
 | 
						|
 | 
						|
/// Get an immediate that must be 32-bits, and treated as zero extended.
 | 
						|
static Optional<uint64_t> getConstantZext32Val(Register Reg,
 | 
						|
                                               const MachineRegisterInfo &MRI) {
 | 
						|
  // getConstantVRegVal sexts any values, so see if that matters.
 | 
						|
  Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
 | 
						|
  if (!OffsetVal || !isInt<32>(*OffsetVal))
 | 
						|
    return None;
 | 
						|
  return Lo_32(*OffsetVal);
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
 | 
						|
  Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
 | 
						|
  if (!OffsetVal)
 | 
						|
    return {};
 | 
						|
 | 
						|
  Optional<int64_t> EncodedImm =
 | 
						|
      AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
 | 
						|
  if (!EncodedImm)
 | 
						|
    return {};
 | 
						|
 | 
						|
  return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
 | 
						|
}
 | 
						|
 | 
						|
InstructionSelector::ComplexRendererFns
 | 
						|
AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
 | 
						|
  assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
 | 
						|
 | 
						|
  Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
 | 
						|
  if (!OffsetVal)
 | 
						|
    return {};
 | 
						|
 | 
						|
  Optional<int64_t> EncodedImm
 | 
						|
    = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
 | 
						|
  if (!EncodedImm)
 | 
						|
    return {};
 | 
						|
 | 
						|
  return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
 | 
						|
                                                 const MachineInstr &MI,
 | 
						|
                                                 int OpIdx) const {
 | 
						|
  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
 | 
						|
         "Expected G_CONSTANT");
 | 
						|
  MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
 | 
						|
                                                const MachineInstr &MI,
 | 
						|
                                                int OpIdx) const {
 | 
						|
  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
 | 
						|
         "Expected G_CONSTANT");
 | 
						|
  MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
 | 
						|
                                                 const MachineInstr &MI,
 | 
						|
                                                 int OpIdx) const {
 | 
						|
  assert(OpIdx == -1);
 | 
						|
 | 
						|
  const MachineOperand &Op = MI.getOperand(1);
 | 
						|
  if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
 | 
						|
    MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
 | 
						|
  else {
 | 
						|
    assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
 | 
						|
    MIB.addImm(Op.getCImm()->getSExtValue());
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
 | 
						|
                                                const MachineInstr &MI,
 | 
						|
                                                int OpIdx) const {
 | 
						|
  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
 | 
						|
         "Expected G_CONSTANT");
 | 
						|
  MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
 | 
						|
}
 | 
						|
 | 
						|
/// This only really exists to satisfy DAG type checking machinery, so is a
 | 
						|
/// no-op here.
 | 
						|
void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
 | 
						|
                                                const MachineInstr &MI,
 | 
						|
                                                int OpIdx) const {
 | 
						|
  MIB.addImm(MI.getOperand(OpIdx).getImm());
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
 | 
						|
                                                 const MachineInstr &MI,
 | 
						|
                                                 int OpIdx) const {
 | 
						|
  assert(OpIdx >= 0 && "expected to match an immediate operand");
 | 
						|
  MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
 | 
						|
                                                 const MachineInstr &MI,
 | 
						|
                                                 int OpIdx) const {
 | 
						|
  assert(OpIdx >= 0 && "expected to match an immediate operand");
 | 
						|
  MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
 | 
						|
                                                 const MachineInstr &MI,
 | 
						|
                                                 int OpIdx) const {
 | 
						|
  assert(OpIdx >= 0 && "expected to match an immediate operand");
 | 
						|
  MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
 | 
						|
                                                 const MachineInstr &MI,
 | 
						|
                                                 int OpIdx) const {
 | 
						|
  assert(OpIdx >= 0 && "expected to match an immediate operand");
 | 
						|
  MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
 | 
						|
  return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
 | 
						|
  return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
 | 
						|
  return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
 | 
						|
  return TII.isInlineConstant(Imm);
 | 
						|
}
 |