4840 lines
		
	
	
		
			167 KiB
		
	
	
	
		
			C++
		
	
	
	
			
		
		
	
	
			4840 lines
		
	
	
		
			167 KiB
		
	
	
	
		
			C++
		
	
	
	
//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
 | 
						|
//
 | 
						|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 | 
						|
// See https://llvm.org/LICENSE.txt for license information.
 | 
						|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 | 
						|
//
 | 
						|
//===----------------------------------------------------------------------===//
 | 
						|
/// \file
 | 
						|
/// This file implements the targeting of the Machinelegalizer class for
 | 
						|
/// AMDGPU.
 | 
						|
/// \todo This should be generated by TableGen.
 | 
						|
//===----------------------------------------------------------------------===//
 | 
						|
 | 
						|
#include "AMDGPULegalizerInfo.h"
 | 
						|
 | 
						|
#include "AMDGPU.h"
 | 
						|
#include "AMDGPUGlobalISelUtils.h"
 | 
						|
#include "AMDGPUInstrInfo.h"
 | 
						|
#include "AMDGPUTargetMachine.h"
 | 
						|
#include "SIMachineFunctionInfo.h"
 | 
						|
#include "llvm/ADT/ScopeExit.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 | 
						|
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 | 
						|
#include "llvm/IR/DiagnosticInfo.h"
 | 
						|
#include "llvm/IR/IntrinsicsAMDGPU.h"
 | 
						|
 | 
						|
#define DEBUG_TYPE "amdgpu-legalinfo"
 | 
						|
 | 
						|
using namespace llvm;
 | 
						|
using namespace LegalizeActions;
 | 
						|
using namespace LegalizeMutations;
 | 
						|
using namespace LegalityPredicates;
 | 
						|
using namespace MIPatternMatch;
 | 
						|
 | 
						|
// Hack until load/store selection patterns support any tuple of legal types.
 | 
						|
static cl::opt<bool> EnableNewLegality(
 | 
						|
  "amdgpu-global-isel-new-legality",
 | 
						|
  cl::desc("Use GlobalISel desired legality, rather than try to use"
 | 
						|
           "rules compatible with selection patterns"),
 | 
						|
  cl::init(false),
 | 
						|
  cl::ReallyHidden);
 | 
						|
 | 
						|
static constexpr unsigned MaxRegisterSize = 1024;
 | 
						|
 | 
						|
// Round the number of elements to the next power of two elements
 | 
						|
static LLT getPow2VectorType(LLT Ty) {
 | 
						|
  unsigned NElts = Ty.getNumElements();
 | 
						|
  unsigned Pow2NElts = 1 <<  Log2_32_Ceil(NElts);
 | 
						|
  return Ty.changeNumElements(Pow2NElts);
 | 
						|
}
 | 
						|
 | 
						|
// Round the number of bits to the next power of two bits
 | 
						|
static LLT getPow2ScalarType(LLT Ty) {
 | 
						|
  unsigned Bits = Ty.getSizeInBits();
 | 
						|
  unsigned Pow2Bits = 1 <<  Log2_32_Ceil(Bits);
 | 
						|
  return LLT::scalar(Pow2Bits);
 | 
						|
}
 | 
						|
 | 
						|
/// \returs true if this is an odd sized vector which should widen by adding an
 | 
						|
/// additional element. This is mostly to handle <3 x s16> -> <4 x s16>. This
 | 
						|
/// excludes s1 vectors, which should always be scalarized.
 | 
						|
static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    if (!Ty.isVector())
 | 
						|
      return false;
 | 
						|
 | 
						|
    const LLT EltTy = Ty.getElementType();
 | 
						|
    const unsigned EltSize = EltTy.getSizeInBits();
 | 
						|
    return Ty.getNumElements() % 2 != 0 &&
 | 
						|
           EltSize > 1 && EltSize < 32 &&
 | 
						|
           Ty.getSizeInBits() % 32 != 0;
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalityPredicate sizeIsMultipleOf32(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    return Ty.getSizeInBits() % 32 == 0;
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalityPredicate isWideVec16(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    const LLT EltTy = Ty.getScalarType();
 | 
						|
    return EltTy.getSizeInBits() == 16 && Ty.getNumElements() > 2;
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    const LLT EltTy = Ty.getElementType();
 | 
						|
    return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    const LLT EltTy = Ty.getElementType();
 | 
						|
    unsigned Size = Ty.getSizeInBits();
 | 
						|
    unsigned Pieces = (Size + 63) / 64;
 | 
						|
    unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
 | 
						|
    return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
// Increase the number of vector elements to reach the next multiple of 32-bit
 | 
						|
// type.
 | 
						|
static LegalizeMutation moreEltsToNext32Bit(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
 | 
						|
    const LLT EltTy = Ty.getElementType();
 | 
						|
    const int Size = Ty.getSizeInBits();
 | 
						|
    const int EltSize = EltTy.getSizeInBits();
 | 
						|
    const int NextMul32 = (Size + 31) / 32;
 | 
						|
 | 
						|
    assert(EltSize < 32);
 | 
						|
 | 
						|
    const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize;
 | 
						|
    return std::make_pair(TypeIdx, LLT::vector(NewNumElts, EltTy));
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LLT getBitcastRegisterType(const LLT Ty) {
 | 
						|
  const unsigned Size = Ty.getSizeInBits();
 | 
						|
 | 
						|
  LLT CoercedTy;
 | 
						|
  if (Size <= 32) {
 | 
						|
    // <2 x s8> -> s16
 | 
						|
    // <4 x s8> -> s32
 | 
						|
    return LLT::scalar(Size);
 | 
						|
  }
 | 
						|
 | 
						|
  return LLT::scalarOrVector(Size / 32, 32);
 | 
						|
}
 | 
						|
 | 
						|
static LegalizeMutation bitcastToRegisterType(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    return std::make_pair(TypeIdx, getBitcastRegisterType(Ty));
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalizeMutation bitcastToVectorElement32(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    unsigned Size = Ty.getSizeInBits();
 | 
						|
    assert(Size % 32 == 0);
 | 
						|
    return std::make_pair(TypeIdx, LLT::scalarOrVector(Size / 32, 32));
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalityPredicate vectorSmallerThan(unsigned TypeIdx, unsigned Size) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT QueryTy = Query.Types[TypeIdx];
 | 
						|
    return QueryTy.isVector() && QueryTy.getSizeInBits() < Size;
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT QueryTy = Query.Types[TypeIdx];
 | 
						|
    return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT QueryTy = Query.Types[TypeIdx];
 | 
						|
    return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static bool isRegisterSize(unsigned Size) {
 | 
						|
  return Size % 32 == 0 && Size <= MaxRegisterSize;
 | 
						|
}
 | 
						|
 | 
						|
static bool isRegisterVectorElementType(LLT EltTy) {
 | 
						|
  const int EltSize = EltTy.getSizeInBits();
 | 
						|
  return EltSize == 16 || EltSize % 32 == 0;
 | 
						|
}
 | 
						|
 | 
						|
static bool isRegisterVectorType(LLT Ty) {
 | 
						|
  const int EltSize = Ty.getElementType().getSizeInBits();
 | 
						|
  return EltSize == 32 || EltSize == 64 ||
 | 
						|
         (EltSize == 16 && Ty.getNumElements() % 2 == 0) ||
 | 
						|
         EltSize == 128 || EltSize == 256;
 | 
						|
}
 | 
						|
 | 
						|
static bool isRegisterType(LLT Ty) {
 | 
						|
  if (!isRegisterSize(Ty.getSizeInBits()))
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (Ty.isVector())
 | 
						|
    return isRegisterVectorType(Ty);
 | 
						|
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// Any combination of 32 or 64-bit elements up the maximum register size, and
 | 
						|
// multiples of v2s16.
 | 
						|
static LegalityPredicate isRegisterType(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    return isRegisterType(Query.Types[TypeIdx]);
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalityPredicate elementTypeIsLegal(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT QueryTy = Query.Types[TypeIdx];
 | 
						|
    if (!QueryTy.isVector())
 | 
						|
      return false;
 | 
						|
    const LLT EltTy = QueryTy.getElementType();
 | 
						|
    return EltTy == LLT::scalar(16) || EltTy.getSizeInBits() >= 32;
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
static LegalityPredicate isWideScalarTruncStore(unsigned TypeIdx) {
 | 
						|
  return [=](const LegalityQuery &Query) {
 | 
						|
    const LLT Ty = Query.Types[TypeIdx];
 | 
						|
    return !Ty.isVector() && Ty.getSizeInBits() > 32 &&
 | 
						|
           Query.MMODescrs[0].SizeInBits < Ty.getSizeInBits();
 | 
						|
  };
 | 
						|
}
 | 
						|
 | 
						|
// TODO: Should load to s16 be legal? Most loads extend to 32-bits, but we
 | 
						|
// handle some operations by just promoting the register during
 | 
						|
// selection. There are also d16 loads on GFX9+ which preserve the high bits.
 | 
						|
static unsigned maxSizeForAddrSpace(const GCNSubtarget &ST, unsigned AS,
 | 
						|
                                    bool IsLoad) {
 | 
						|
  switch (AS) {
 | 
						|
  case AMDGPUAS::PRIVATE_ADDRESS:
 | 
						|
    // FIXME: Private element size.
 | 
						|
    return ST.enableFlatScratch() ? 128 : 32;
 | 
						|
  case AMDGPUAS::LOCAL_ADDRESS:
 | 
						|
    return ST.useDS128() ? 128 : 64;
 | 
						|
  case AMDGPUAS::GLOBAL_ADDRESS:
 | 
						|
  case AMDGPUAS::CONSTANT_ADDRESS:
 | 
						|
  case AMDGPUAS::CONSTANT_ADDRESS_32BIT:
 | 
						|
    // Treat constant and global as identical. SMRD loads are sometimes usable for
 | 
						|
    // global loads (ideally constant address space should be eliminated)
 | 
						|
    // depending on the context. Legality cannot be context dependent, but
 | 
						|
    // RegBankSelect can split the load as necessary depending on the pointer
 | 
						|
    // register bank/uniformity and if the memory is invariant or not written in a
 | 
						|
    // kernel.
 | 
						|
    return IsLoad ? 512 : 128;
 | 
						|
  default:
 | 
						|
    // Flat addresses may contextually need to be split to 32-bit parts if they
 | 
						|
    // may alias scratch depending on the subtarget.
 | 
						|
    return 128;
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
static bool isLoadStoreSizeLegal(const GCNSubtarget &ST,
 | 
						|
                                 const LegalityQuery &Query,
 | 
						|
                                 unsigned Opcode) {
 | 
						|
  const LLT Ty = Query.Types[0];
 | 
						|
 | 
						|
  // Handle G_LOAD, G_ZEXTLOAD, G_SEXTLOAD
 | 
						|
  const bool IsLoad = Opcode != AMDGPU::G_STORE;
 | 
						|
 | 
						|
  unsigned RegSize = Ty.getSizeInBits();
 | 
						|
  unsigned MemSize = Query.MMODescrs[0].SizeInBits;
 | 
						|
  unsigned AlignBits = Query.MMODescrs[0].AlignInBits;
 | 
						|
  unsigned AS = Query.Types[1].getAddressSpace();
 | 
						|
 | 
						|
  // All of these need to be custom lowered to cast the pointer operand.
 | 
						|
  if (AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
 | 
						|
    return false;
 | 
						|
 | 
						|
  // TODO: We should be able to widen loads if the alignment is high enough, but
 | 
						|
  // we also need to modify the memory access size.
 | 
						|
#if 0
 | 
						|
  // Accept widening loads based on alignment.
 | 
						|
  if (IsLoad && MemSize < Size)
 | 
						|
    MemSize = std::max(MemSize, Align);
 | 
						|
#endif
 | 
						|
 | 
						|
  // Only 1-byte and 2-byte to 32-bit extloads are valid.
 | 
						|
  if (MemSize != RegSize && RegSize != 32)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (MemSize > maxSizeForAddrSpace(ST, AS, IsLoad))
 | 
						|
    return false;
 | 
						|
 | 
						|
  switch (MemSize) {
 | 
						|
  case 8:
 | 
						|
  case 16:
 | 
						|
  case 32:
 | 
						|
  case 64:
 | 
						|
  case 128:
 | 
						|
    break;
 | 
						|
  case 96:
 | 
						|
    if (!ST.hasDwordx3LoadStores())
 | 
						|
      return false;
 | 
						|
    break;
 | 
						|
  case 256:
 | 
						|
  case 512:
 | 
						|
    // These may contextually need to be broken down.
 | 
						|
    break;
 | 
						|
  default:
 | 
						|
    return false;
 | 
						|
  }
 | 
						|
 | 
						|
  assert(RegSize >= MemSize);
 | 
						|
 | 
						|
  if (AlignBits < MemSize) {
 | 
						|
    const SITargetLowering *TLI = ST.getTargetLowering();
 | 
						|
    if (!TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS,
 | 
						|
                                                 Align(AlignBits / 8)))
 | 
						|
      return false;
 | 
						|
  }
 | 
						|
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// The current selector can't handle <6 x s16>, <8 x s16>, s96, s128 etc, so
 | 
						|
// workaround this. Eventually it should ignore the type for loads and only care
 | 
						|
// about the size. Return true in cases where we will workaround this for now by
 | 
						|
// bitcasting.
 | 
						|
static bool loadStoreBitcastWorkaround(const LLT Ty) {
 | 
						|
  if (EnableNewLegality)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const unsigned Size = Ty.getSizeInBits();
 | 
						|
  if (Size <= 64)
 | 
						|
    return false;
 | 
						|
  if (!Ty.isVector())
 | 
						|
    return true;
 | 
						|
 | 
						|
  LLT EltTy = Ty.getElementType();
 | 
						|
  if (EltTy.isPointer())
 | 
						|
    return true;
 | 
						|
 | 
						|
  unsigned EltSize = EltTy.getSizeInBits();
 | 
						|
  return EltSize != 32 && EltSize != 64;
 | 
						|
}
 | 
						|
 | 
						|
static bool isLoadStoreLegal(const GCNSubtarget &ST, const LegalityQuery &Query,
 | 
						|
                             unsigned Opcode) {
 | 
						|
  const LLT Ty = Query.Types[0];
 | 
						|
  return isRegisterType(Ty) && isLoadStoreSizeLegal(ST, Query, Opcode) &&
 | 
						|
         !loadStoreBitcastWorkaround(Ty);
 | 
						|
}
 | 
						|
 | 
						|
/// Return true if a load or store of the type should be lowered with a bitcast
 | 
						|
/// to a different type.
 | 
						|
static bool shouldBitcastLoadStoreType(const GCNSubtarget &ST, const LLT Ty,
 | 
						|
                                       const unsigned MemSizeInBits) {
 | 
						|
  const unsigned Size = Ty.getSizeInBits();
 | 
						|
    if (Size != MemSizeInBits)
 | 
						|
      return Size <= 32 && Ty.isVector();
 | 
						|
 | 
						|
  if (loadStoreBitcastWorkaround(Ty) && isRegisterType(Ty))
 | 
						|
    return true;
 | 
						|
  return Ty.isVector() && (Size <= 32 || isRegisterSize(Size)) &&
 | 
						|
         !isRegisterVectorElementType(Ty.getElementType());
 | 
						|
}
 | 
						|
 | 
						|
/// Return true if we should legalize a load by widening an odd sized memory
 | 
						|
/// access up to the alignment. Note this case when the memory access itself
 | 
						|
/// changes, not the size of the result register.
 | 
						|
static bool shouldWidenLoad(const GCNSubtarget &ST, unsigned SizeInBits,
 | 
						|
                            unsigned AlignInBits, unsigned AddrSpace,
 | 
						|
                            unsigned Opcode) {
 | 
						|
  // We don't want to widen cases that are naturally legal.
 | 
						|
  if (isPowerOf2_32(SizeInBits))
 | 
						|
    return false;
 | 
						|
 | 
						|
  // If we have 96-bit memory operations, we shouldn't touch them. Note we may
 | 
						|
  // end up widening these for a scalar load during RegBankSelect, since there
 | 
						|
  // aren't 96-bit scalar loads.
 | 
						|
  if (SizeInBits == 96 && ST.hasDwordx3LoadStores())
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (SizeInBits >= maxSizeForAddrSpace(ST, AddrSpace, Opcode))
 | 
						|
    return false;
 | 
						|
 | 
						|
  // A load is known dereferenceable up to the alignment, so it's legal to widen
 | 
						|
  // to it.
 | 
						|
  //
 | 
						|
  // TODO: Could check dereferenceable for less aligned cases.
 | 
						|
  unsigned RoundedSize = NextPowerOf2(SizeInBits);
 | 
						|
  if (AlignInBits < RoundedSize)
 | 
						|
    return false;
 | 
						|
 | 
						|
  // Do not widen if it would introduce a slow unaligned load.
 | 
						|
  const SITargetLowering *TLI = ST.getTargetLowering();
 | 
						|
  bool Fast = false;
 | 
						|
  return TLI->allowsMisalignedMemoryAccessesImpl(
 | 
						|
             RoundedSize, AddrSpace, Align(AlignInBits / 8),
 | 
						|
             MachineMemOperand::MOLoad, &Fast) &&
 | 
						|
         Fast;
 | 
						|
}
 | 
						|
 | 
						|
static bool shouldWidenLoad(const GCNSubtarget &ST, const LegalityQuery &Query,
 | 
						|
                            unsigned Opcode) {
 | 
						|
  if (Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic)
 | 
						|
    return false;
 | 
						|
 | 
						|
  return shouldWidenLoad(ST, Query.MMODescrs[0].SizeInBits,
 | 
						|
                         Query.MMODescrs[0].AlignInBits,
 | 
						|
                         Query.Types[1].getAddressSpace(), Opcode);
 | 
						|
}
 | 
						|
 | 
						|
AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
 | 
						|
                                         const GCNTargetMachine &TM)
 | 
						|
  :  ST(ST_) {
 | 
						|
  using namespace TargetOpcode;
 | 
						|
 | 
						|
  auto GetAddrSpacePtr = [&TM](unsigned AS) {
 | 
						|
    return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
 | 
						|
  };
 | 
						|
 | 
						|
  const LLT S1 = LLT::scalar(1);
 | 
						|
  const LLT S8 = LLT::scalar(8);
 | 
						|
  const LLT S16 = LLT::scalar(16);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
  const LLT S128 = LLT::scalar(128);
 | 
						|
  const LLT S256 = LLT::scalar(256);
 | 
						|
  const LLT S512 = LLT::scalar(512);
 | 
						|
  const LLT MaxScalar = LLT::scalar(MaxRegisterSize);
 | 
						|
 | 
						|
  const LLT V2S8 = LLT::vector(2, 8);
 | 
						|
  const LLT V2S16 = LLT::vector(2, 16);
 | 
						|
  const LLT V4S16 = LLT::vector(4, 16);
 | 
						|
 | 
						|
  const LLT V2S32 = LLT::vector(2, 32);
 | 
						|
  const LLT V3S32 = LLT::vector(3, 32);
 | 
						|
  const LLT V4S32 = LLT::vector(4, 32);
 | 
						|
  const LLT V5S32 = LLT::vector(5, 32);
 | 
						|
  const LLT V6S32 = LLT::vector(6, 32);
 | 
						|
  const LLT V7S32 = LLT::vector(7, 32);
 | 
						|
  const LLT V8S32 = LLT::vector(8, 32);
 | 
						|
  const LLT V9S32 = LLT::vector(9, 32);
 | 
						|
  const LLT V10S32 = LLT::vector(10, 32);
 | 
						|
  const LLT V11S32 = LLT::vector(11, 32);
 | 
						|
  const LLT V12S32 = LLT::vector(12, 32);
 | 
						|
  const LLT V13S32 = LLT::vector(13, 32);
 | 
						|
  const LLT V14S32 = LLT::vector(14, 32);
 | 
						|
  const LLT V15S32 = LLT::vector(15, 32);
 | 
						|
  const LLT V16S32 = LLT::vector(16, 32);
 | 
						|
  const LLT V32S32 = LLT::vector(32, 32);
 | 
						|
 | 
						|
  const LLT V2S64 = LLT::vector(2, 64);
 | 
						|
  const LLT V3S64 = LLT::vector(3, 64);
 | 
						|
  const LLT V4S64 = LLT::vector(4, 64);
 | 
						|
  const LLT V5S64 = LLT::vector(5, 64);
 | 
						|
  const LLT V6S64 = LLT::vector(6, 64);
 | 
						|
  const LLT V7S64 = LLT::vector(7, 64);
 | 
						|
  const LLT V8S64 = LLT::vector(8, 64);
 | 
						|
  const LLT V16S64 = LLT::vector(16, 64);
 | 
						|
 | 
						|
  std::initializer_list<LLT> AllS32Vectors =
 | 
						|
    {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
 | 
						|
     V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32, V32S32};
 | 
						|
  std::initializer_list<LLT> AllS64Vectors =
 | 
						|
    {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64, V16S64};
 | 
						|
 | 
						|
  const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
 | 
						|
  const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
 | 
						|
  const LLT Constant32Ptr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS_32BIT);
 | 
						|
  const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
 | 
						|
  const LLT RegionPtr = GetAddrSpacePtr(AMDGPUAS::REGION_ADDRESS);
 | 
						|
  const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
 | 
						|
  const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
 | 
						|
 | 
						|
  const LLT CodePtr = FlatPtr;
 | 
						|
 | 
						|
  const std::initializer_list<LLT> AddrSpaces64 = {
 | 
						|
    GlobalPtr, ConstantPtr, FlatPtr
 | 
						|
  };
 | 
						|
 | 
						|
  const std::initializer_list<LLT> AddrSpaces32 = {
 | 
						|
    LocalPtr, PrivatePtr, Constant32Ptr, RegionPtr
 | 
						|
  };
 | 
						|
 | 
						|
  const std::initializer_list<LLT> FPTypesBase = {
 | 
						|
    S32, S64
 | 
						|
  };
 | 
						|
 | 
						|
  const std::initializer_list<LLT> FPTypes16 = {
 | 
						|
    S32, S64, S16
 | 
						|
  };
 | 
						|
 | 
						|
  const std::initializer_list<LLT> FPTypesPK16 = {
 | 
						|
    S32, S64, S16, V2S16
 | 
						|
  };
 | 
						|
 | 
						|
  const LLT MinScalarFPTy = ST.has16BitInsts() ? S16 : S32;
 | 
						|
 | 
						|
  setAction({G_BRCOND, S1}, Legal); // VCC branches
 | 
						|
  setAction({G_BRCOND, S32}, Legal); // SCC branches
 | 
						|
 | 
						|
  // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
 | 
						|
  // elements for v3s16
 | 
						|
  getActionDefinitionsBuilder(G_PHI)
 | 
						|
    .legalFor({S32, S64, V2S16, S16, V4S16, S1, S128, S256})
 | 
						|
    .legalFor(AllS32Vectors)
 | 
						|
    .legalFor(AllS64Vectors)
 | 
						|
    .legalFor(AddrSpaces64)
 | 
						|
    .legalFor(AddrSpaces32)
 | 
						|
    .legalIf(isPointer(0))
 | 
						|
    .clampScalar(0, S16, S256)
 | 
						|
    .widenScalarToNextPow2(0, 32)
 | 
						|
    .clampMaxNumElements(0, S32, 16)
 | 
						|
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
 | 
						|
    .scalarize(0);
 | 
						|
 | 
						|
  if (ST.hasVOP3PInsts() && ST.hasAddNoCarry() && ST.hasIntClamp()) {
 | 
						|
    // Full set of gfx9 features.
 | 
						|
    getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
 | 
						|
      .legalFor({S32, S16, V2S16})
 | 
						|
      .clampScalar(0, S16, S32)
 | 
						|
      .clampMaxNumElements(0, S16, 2)
 | 
						|
      .scalarize(0)
 | 
						|
      .widenScalarToNextPow2(0, 32);
 | 
						|
 | 
						|
    getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT, G_SADDSAT, G_SSUBSAT})
 | 
						|
      .legalFor({S32, S16, V2S16}) // Clamp modifier
 | 
						|
      .minScalarOrElt(0, S16)
 | 
						|
      .clampMaxNumElements(0, S16, 2)
 | 
						|
      .scalarize(0)
 | 
						|
      .widenScalarToNextPow2(0, 32)
 | 
						|
      .lower();
 | 
						|
  } else if (ST.has16BitInsts()) {
 | 
						|
    getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
 | 
						|
      .legalFor({S32, S16})
 | 
						|
      .clampScalar(0, S16, S32)
 | 
						|
      .scalarize(0)
 | 
						|
      .widenScalarToNextPow2(0, 32); // FIXME: min should be 16
 | 
						|
 | 
						|
    // Technically the saturating operations require clamp bit support, but this
 | 
						|
    // was introduced at the same time as 16-bit operations.
 | 
						|
    getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
 | 
						|
      .legalFor({S32, S16}) // Clamp modifier
 | 
						|
      .minScalar(0, S16)
 | 
						|
      .scalarize(0)
 | 
						|
      .widenScalarToNextPow2(0, 16)
 | 
						|
      .lower();
 | 
						|
 | 
						|
    // We're just lowering this, but it helps get a better result to try to
 | 
						|
    // coerce to the desired type first.
 | 
						|
    getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT})
 | 
						|
      .minScalar(0, S16)
 | 
						|
      .scalarize(0)
 | 
						|
      .lower();
 | 
						|
  } else {
 | 
						|
    getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
 | 
						|
      .legalFor({S32})
 | 
						|
      .clampScalar(0, S32, S32)
 | 
						|
      .scalarize(0);
 | 
						|
 | 
						|
    if (ST.hasIntClamp()) {
 | 
						|
      getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
 | 
						|
        .legalFor({S32}) // Clamp modifier.
 | 
						|
        .scalarize(0)
 | 
						|
        .minScalarOrElt(0, S32)
 | 
						|
        .lower();
 | 
						|
    } else {
 | 
						|
      // Clamp bit support was added in VI, along with 16-bit operations.
 | 
						|
      getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
 | 
						|
        .minScalar(0, S32)
 | 
						|
        .scalarize(0)
 | 
						|
        .lower();
 | 
						|
    }
 | 
						|
 | 
						|
    // FIXME: DAG expansion gets better results. The widening uses the smaller
 | 
						|
    // range values and goes for the min/max lowering directly.
 | 
						|
    getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT})
 | 
						|
      .minScalar(0, S32)
 | 
						|
      .scalarize(0)
 | 
						|
      .lower();
 | 
						|
  }
 | 
						|
 | 
						|
  getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_SREM, G_UREM})
 | 
						|
    .customFor({S32, S64})
 | 
						|
    .clampScalar(0, S32, S64)
 | 
						|
    .widenScalarToNextPow2(0, 32)
 | 
						|
    .scalarize(0);
 | 
						|
 | 
						|
  auto &Mulh = getActionDefinitionsBuilder({G_UMULH, G_SMULH})
 | 
						|
                   .legalFor({S32})
 | 
						|
                   .maxScalarOrElt(0, S32);
 | 
						|
 | 
						|
  if (ST.hasVOP3PInsts()) {
 | 
						|
    Mulh
 | 
						|
      .clampMaxNumElements(0, S8, 2)
 | 
						|
      .lowerFor({V2S8});
 | 
						|
  }
 | 
						|
 | 
						|
  Mulh
 | 
						|
    .scalarize(0)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  // Report legal for any types we can handle anywhere. For the cases only legal
 | 
						|
  // on the SALU, RegBankSelect will be able to re-legalize.
 | 
						|
  getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
 | 
						|
    .legalFor({S32, S1, S64, V2S32, S16, V2S16, V4S16})
 | 
						|
    .clampScalar(0, S32, S64)
 | 
						|
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
 | 
						|
    .fewerElementsIf(vectorWiderThan(0, 64), fewerEltsToSize64Vector(0))
 | 
						|
    .widenScalarToNextPow2(0)
 | 
						|
    .scalarize(0);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder({G_UADDO, G_USUBO,
 | 
						|
                               G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
 | 
						|
    .legalFor({{S32, S1}, {S32, S32}})
 | 
						|
    .minScalar(0, S32)
 | 
						|
    // TODO: .scalarize(0)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_BITCAST)
 | 
						|
    // Don't worry about the size constraint.
 | 
						|
    .legalIf(all(isRegisterType(0), isRegisterType(1)))
 | 
						|
    .lower();
 | 
						|
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_CONSTANT)
 | 
						|
    .legalFor({S1, S32, S64, S16, GlobalPtr,
 | 
						|
               LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
 | 
						|
    .legalIf(isPointer(0))
 | 
						|
    .clampScalar(0, S32, S64)
 | 
						|
    .widenScalarToNextPow2(0);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FCONSTANT)
 | 
						|
    .legalFor({S32, S64, S16})
 | 
						|
    .clampScalar(0, S16, S64);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
 | 
						|
      .legalIf(isRegisterType(0))
 | 
						|
      // s1 and s16 are special cases because they have legal operations on
 | 
						|
      // them, but don't really occupy registers in the normal way.
 | 
						|
      .legalFor({S1, S16})
 | 
						|
      .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
 | 
						|
      .clampScalarOrElt(0, S32, MaxScalar)
 | 
						|
      .widenScalarToNextPow2(0, 32)
 | 
						|
      .clampMaxNumElements(0, S32, 16);
 | 
						|
 | 
						|
  setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
 | 
						|
 | 
						|
  // If the amount is divergent, we have to do a wave reduction to get the
 | 
						|
  // maximum value, so this is expanded during RegBankSelect.
 | 
						|
  getActionDefinitionsBuilder(G_DYN_STACKALLOC)
 | 
						|
    .legalFor({{PrivatePtr, S32}});
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_GLOBAL_VALUE)
 | 
						|
    .customIf(typeIsNot(0, PrivatePtr));
 | 
						|
 | 
						|
  setAction({G_BLOCK_ADDR, CodePtr}, Legal);
 | 
						|
 | 
						|
  auto &FPOpActions = getActionDefinitionsBuilder(
 | 
						|
    { G_FADD, G_FMUL, G_FMA, G_FCANONICALIZE})
 | 
						|
    .legalFor({S32, S64});
 | 
						|
  auto &TrigActions = getActionDefinitionsBuilder({G_FSIN, G_FCOS})
 | 
						|
    .customFor({S32, S64});
 | 
						|
  auto &FDIVActions = getActionDefinitionsBuilder(G_FDIV)
 | 
						|
    .customFor({S32, S64});
 | 
						|
 | 
						|
  if (ST.has16BitInsts()) {
 | 
						|
    if (ST.hasVOP3PInsts())
 | 
						|
      FPOpActions.legalFor({S16, V2S16});
 | 
						|
    else
 | 
						|
      FPOpActions.legalFor({S16});
 | 
						|
 | 
						|
    TrigActions.customFor({S16});
 | 
						|
    FDIVActions.customFor({S16});
 | 
						|
  }
 | 
						|
 | 
						|
  auto &MinNumMaxNum = getActionDefinitionsBuilder({
 | 
						|
      G_FMINNUM, G_FMAXNUM, G_FMINNUM_IEEE, G_FMAXNUM_IEEE});
 | 
						|
 | 
						|
  if (ST.hasVOP3PInsts()) {
 | 
						|
    MinNumMaxNum.customFor(FPTypesPK16)
 | 
						|
      .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
 | 
						|
      .clampMaxNumElements(0, S16, 2)
 | 
						|
      .clampScalar(0, S16, S64)
 | 
						|
      .scalarize(0);
 | 
						|
  } else if (ST.has16BitInsts()) {
 | 
						|
    MinNumMaxNum.customFor(FPTypes16)
 | 
						|
      .clampScalar(0, S16, S64)
 | 
						|
      .scalarize(0);
 | 
						|
  } else {
 | 
						|
    MinNumMaxNum.customFor(FPTypesBase)
 | 
						|
      .clampScalar(0, S32, S64)
 | 
						|
      .scalarize(0);
 | 
						|
  }
 | 
						|
 | 
						|
  if (ST.hasVOP3PInsts())
 | 
						|
    FPOpActions.clampMaxNumElements(0, S16, 2);
 | 
						|
 | 
						|
  FPOpActions
 | 
						|
    .scalarize(0)
 | 
						|
    .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
 | 
						|
 | 
						|
  TrigActions
 | 
						|
    .scalarize(0)
 | 
						|
    .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
 | 
						|
 | 
						|
  FDIVActions
 | 
						|
    .scalarize(0)
 | 
						|
    .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder({G_FNEG, G_FABS})
 | 
						|
    .legalFor(FPTypesPK16)
 | 
						|
    .clampMaxNumElements(0, S16, 2)
 | 
						|
    .scalarize(0)
 | 
						|
    .clampScalar(0, S16, S64);
 | 
						|
 | 
						|
  if (ST.has16BitInsts()) {
 | 
						|
    getActionDefinitionsBuilder({G_FSQRT, G_FFLOOR})
 | 
						|
      .legalFor({S32, S64, S16})
 | 
						|
      .scalarize(0)
 | 
						|
      .clampScalar(0, S16, S64);
 | 
						|
  } else {
 | 
						|
    getActionDefinitionsBuilder(G_FSQRT)
 | 
						|
      .legalFor({S32, S64})
 | 
						|
      .scalarize(0)
 | 
						|
      .clampScalar(0, S32, S64);
 | 
						|
 | 
						|
    if (ST.hasFractBug()) {
 | 
						|
      getActionDefinitionsBuilder(G_FFLOOR)
 | 
						|
        .customFor({S64})
 | 
						|
        .legalFor({S32, S64})
 | 
						|
        .scalarize(0)
 | 
						|
        .clampScalar(0, S32, S64);
 | 
						|
    } else {
 | 
						|
      getActionDefinitionsBuilder(G_FFLOOR)
 | 
						|
        .legalFor({S32, S64})
 | 
						|
        .scalarize(0)
 | 
						|
        .clampScalar(0, S32, S64);
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FPTRUNC)
 | 
						|
    .legalFor({{S32, S64}, {S16, S32}})
 | 
						|
    .scalarize(0)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FPEXT)
 | 
						|
    .legalFor({{S64, S32}, {S32, S16}})
 | 
						|
    .narrowScalarFor({{S64, S16}}, changeTo(0, S32))
 | 
						|
    .scalarize(0);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FSUB)
 | 
						|
      // Use actual fsub instruction
 | 
						|
      .legalFor({S32})
 | 
						|
      // Must use fadd + fneg
 | 
						|
      .lowerFor({S64, S16, V2S16})
 | 
						|
      .scalarize(0)
 | 
						|
      .clampScalar(0, S32, S64);
 | 
						|
 | 
						|
  // Whether this is legal depends on the floating point mode for the function.
 | 
						|
  auto &FMad = getActionDefinitionsBuilder(G_FMAD);
 | 
						|
  if (ST.hasMadF16() && ST.hasMadMacF32Insts())
 | 
						|
    FMad.customFor({S32, S16});
 | 
						|
  else if (ST.hasMadMacF32Insts())
 | 
						|
    FMad.customFor({S32});
 | 
						|
  else if (ST.hasMadF16())
 | 
						|
    FMad.customFor({S16});
 | 
						|
  FMad.scalarize(0)
 | 
						|
      .lower();
 | 
						|
 | 
						|
  auto &FRem = getActionDefinitionsBuilder(G_FREM);
 | 
						|
  if (ST.has16BitInsts()) {
 | 
						|
    FRem.customFor({S16, S32, S64});
 | 
						|
  } else {
 | 
						|
    FRem.minScalar(0, S32)
 | 
						|
        .customFor({S32, S64});
 | 
						|
  }
 | 
						|
  FRem.scalarize(0);
 | 
						|
 | 
						|
  // TODO: Do we need to clamp maximum bitwidth?
 | 
						|
  getActionDefinitionsBuilder(G_TRUNC)
 | 
						|
    .legalIf(isScalar(0))
 | 
						|
    .legalFor({{V2S16, V2S32}})
 | 
						|
    .clampMaxNumElements(0, S16, 2)
 | 
						|
    // Avoid scalarizing in cases that should be truly illegal. In unresolvable
 | 
						|
    // situations (like an invalid implicit use), we don't want to infinite loop
 | 
						|
    // in the legalizer.
 | 
						|
    .fewerElementsIf(elementTypeIsLegal(0), LegalizeMutations::scalarize(0))
 | 
						|
    .alwaysLegal();
 | 
						|
 | 
						|
  getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
 | 
						|
    .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
 | 
						|
               {S32, S1}, {S64, S1}, {S16, S1}})
 | 
						|
    .scalarize(0)
 | 
						|
    .clampScalar(0, S32, S64)
 | 
						|
    .widenScalarToNextPow2(1, 32);
 | 
						|
 | 
						|
  // TODO: Split s1->s64 during regbankselect for VALU.
 | 
						|
  auto &IToFP = getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
 | 
						|
    .legalFor({{S32, S32}, {S64, S32}, {S16, S32}})
 | 
						|
    .lowerFor({{S32, S64}})
 | 
						|
    .lowerIf(typeIs(1, S1))
 | 
						|
    .customFor({{S64, S64}});
 | 
						|
  if (ST.has16BitInsts())
 | 
						|
    IToFP.legalFor({{S16, S16}});
 | 
						|
  IToFP.clampScalar(1, S32, S64)
 | 
						|
       .minScalar(0, S32)
 | 
						|
       .scalarize(0)
 | 
						|
       .widenScalarToNextPow2(1);
 | 
						|
 | 
						|
  auto &FPToI = getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
 | 
						|
    .legalFor({{S32, S32}, {S32, S64}, {S32, S16}})
 | 
						|
    .customFor({{S64, S64}})
 | 
						|
    .narrowScalarFor({{S64, S16}}, changeTo(0, S32));
 | 
						|
  if (ST.has16BitInsts())
 | 
						|
    FPToI.legalFor({{S16, S16}});
 | 
						|
  else
 | 
						|
    FPToI.minScalar(1, S32);
 | 
						|
 | 
						|
  FPToI.minScalar(0, S32)
 | 
						|
       .scalarize(0)
 | 
						|
       .lower();
 | 
						|
 | 
						|
  // Lower roundeven into G_FRINT
 | 
						|
  getActionDefinitionsBuilder({G_INTRINSIC_ROUND, G_INTRINSIC_ROUNDEVEN})
 | 
						|
    .scalarize(0)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  if (ST.has16BitInsts()) {
 | 
						|
    getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
 | 
						|
      .legalFor({S16, S32, S64})
 | 
						|
      .clampScalar(0, S16, S64)
 | 
						|
      .scalarize(0);
 | 
						|
  } else if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
 | 
						|
    getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
 | 
						|
      .legalFor({S32, S64})
 | 
						|
      .clampScalar(0, S32, S64)
 | 
						|
      .scalarize(0);
 | 
						|
  } else {
 | 
						|
    getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
 | 
						|
      .legalFor({S32})
 | 
						|
      .customFor({S64})
 | 
						|
      .clampScalar(0, S32, S64)
 | 
						|
      .scalarize(0);
 | 
						|
  }
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_PTR_ADD)
 | 
						|
    .legalIf(all(isPointer(0), sameSize(0, 1)))
 | 
						|
    .scalarize(0)
 | 
						|
    .scalarSameSizeAs(1, 0);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_PTRMASK)
 | 
						|
    .legalIf(all(sameSize(0, 1), typeInSet(1, {S64, S32})))
 | 
						|
    .scalarSameSizeAs(1, 0)
 | 
						|
    .scalarize(0);
 | 
						|
 | 
						|
  auto &CmpBuilder =
 | 
						|
    getActionDefinitionsBuilder(G_ICMP)
 | 
						|
    // The compare output type differs based on the register bank of the output,
 | 
						|
    // so make both s1 and s32 legal.
 | 
						|
    //
 | 
						|
    // Scalar compares producing output in scc will be promoted to s32, as that
 | 
						|
    // is the allocatable register type that will be needed for the copy from
 | 
						|
    // scc. This will be promoted during RegBankSelect, and we assume something
 | 
						|
    // before that won't try to use s32 result types.
 | 
						|
    //
 | 
						|
    // Vector compares producing an output in vcc/SGPR will use s1 in VCC reg
 | 
						|
    // bank.
 | 
						|
    .legalForCartesianProduct(
 | 
						|
      {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
 | 
						|
    .legalForCartesianProduct(
 | 
						|
      {S32}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr});
 | 
						|
  if (ST.has16BitInsts()) {
 | 
						|
    CmpBuilder.legalFor({{S1, S16}});
 | 
						|
  }
 | 
						|
 | 
						|
  CmpBuilder
 | 
						|
    .widenScalarToNextPow2(1)
 | 
						|
    .clampScalar(1, S32, S64)
 | 
						|
    .scalarize(0)
 | 
						|
    .legalIf(all(typeInSet(0, {S1, S32}), isPointer(1)));
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FCMP)
 | 
						|
    .legalForCartesianProduct({S1}, ST.has16BitInsts() ? FPTypes16 : FPTypesBase)
 | 
						|
    .widenScalarToNextPow2(1)
 | 
						|
    .clampScalar(1, S32, S64)
 | 
						|
    .scalarize(0);
 | 
						|
 | 
						|
  // FIXME: fpow has a selection pattern that should move to custom lowering.
 | 
						|
  auto &Exp2Ops = getActionDefinitionsBuilder({G_FEXP2, G_FLOG2});
 | 
						|
  if (ST.has16BitInsts())
 | 
						|
    Exp2Ops.legalFor({S32, S16});
 | 
						|
  else
 | 
						|
    Exp2Ops.legalFor({S32});
 | 
						|
  Exp2Ops.clampScalar(0, MinScalarFPTy, S32);
 | 
						|
  Exp2Ops.scalarize(0);
 | 
						|
 | 
						|
  auto &ExpOps = getActionDefinitionsBuilder({G_FEXP, G_FLOG, G_FLOG10, G_FPOW});
 | 
						|
  if (ST.has16BitInsts())
 | 
						|
    ExpOps.customFor({{S32}, {S16}});
 | 
						|
  else
 | 
						|
    ExpOps.customFor({S32});
 | 
						|
  ExpOps.clampScalar(0, MinScalarFPTy, S32)
 | 
						|
        .scalarize(0);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FPOWI)
 | 
						|
    .clampScalar(0, MinScalarFPTy, S32)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  // The 64-bit versions produce 32-bit results, but only on the SALU.
 | 
						|
  getActionDefinitionsBuilder(G_CTPOP)
 | 
						|
    .legalFor({{S32, S32}, {S32, S64}})
 | 
						|
    .clampScalar(0, S32, S32)
 | 
						|
    .clampScalar(1, S32, S64)
 | 
						|
    .scalarize(0)
 | 
						|
    .widenScalarToNextPow2(0, 32)
 | 
						|
    .widenScalarToNextPow2(1, 32);
 | 
						|
 | 
						|
  // The hardware instructions return a different result on 0 than the generic
 | 
						|
  // instructions expect. The hardware produces -1, but these produce the
 | 
						|
  // bitwidth.
 | 
						|
  getActionDefinitionsBuilder({G_CTLZ, G_CTTZ})
 | 
						|
    .scalarize(0)
 | 
						|
    .clampScalar(0, S32, S32)
 | 
						|
    .clampScalar(1, S32, S64)
 | 
						|
    .widenScalarToNextPow2(0, 32)
 | 
						|
    .widenScalarToNextPow2(1, 32)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  // The 64-bit versions produce 32-bit results, but only on the SALU.
 | 
						|
  getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF})
 | 
						|
    .legalFor({{S32, S32}, {S32, S64}})
 | 
						|
    .clampScalar(0, S32, S32)
 | 
						|
    .clampScalar(1, S32, S64)
 | 
						|
    .scalarize(0)
 | 
						|
    .widenScalarToNextPow2(0, 32)
 | 
						|
    .widenScalarToNextPow2(1, 32);
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_BITREVERSE)
 | 
						|
    .legalFor({S32})
 | 
						|
    .clampScalar(0, S32, S32)
 | 
						|
    .scalarize(0);
 | 
						|
 | 
						|
  if (ST.has16BitInsts()) {
 | 
						|
    getActionDefinitionsBuilder(G_BSWAP)
 | 
						|
      .legalFor({S16, S32, V2S16})
 | 
						|
      .clampMaxNumElements(0, S16, 2)
 | 
						|
      // FIXME: Fixing non-power-of-2 before clamp is workaround for
 | 
						|
      // narrowScalar limitation.
 | 
						|
      .widenScalarToNextPow2(0)
 | 
						|
      .clampScalar(0, S16, S32)
 | 
						|
      .scalarize(0);
 | 
						|
 | 
						|
    if (ST.hasVOP3PInsts()) {
 | 
						|
      getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
 | 
						|
        .legalFor({S32, S16, V2S16})
 | 
						|
        .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
 | 
						|
        .clampMaxNumElements(0, S16, 2)
 | 
						|
        .minScalar(0, S16)
 | 
						|
        .widenScalarToNextPow2(0)
 | 
						|
        .scalarize(0)
 | 
						|
        .lower();
 | 
						|
    } else {
 | 
						|
      getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
 | 
						|
        .legalFor({S32, S16})
 | 
						|
        .widenScalarToNextPow2(0)
 | 
						|
        .minScalar(0, S16)
 | 
						|
        .scalarize(0)
 | 
						|
        .lower();
 | 
						|
    }
 | 
						|
  } else {
 | 
						|
    // TODO: Should have same legality without v_perm_b32
 | 
						|
    getActionDefinitionsBuilder(G_BSWAP)
 | 
						|
      .legalFor({S32})
 | 
						|
      .lowerIf(scalarNarrowerThan(0, 32))
 | 
						|
      // FIXME: Fixing non-power-of-2 before clamp is workaround for
 | 
						|
      // narrowScalar limitation.
 | 
						|
      .widenScalarToNextPow2(0)
 | 
						|
      .maxScalar(0, S32)
 | 
						|
      .scalarize(0)
 | 
						|
      .lower();
 | 
						|
 | 
						|
    getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
 | 
						|
      .legalFor({S32})
 | 
						|
      .minScalar(0, S32)
 | 
						|
      .widenScalarToNextPow2(0)
 | 
						|
      .scalarize(0)
 | 
						|
      .lower();
 | 
						|
  }
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_INTTOPTR)
 | 
						|
    // List the common cases
 | 
						|
    .legalForCartesianProduct(AddrSpaces64, {S64})
 | 
						|
    .legalForCartesianProduct(AddrSpaces32, {S32})
 | 
						|
    .scalarize(0)
 | 
						|
    // Accept any address space as long as the size matches
 | 
						|
    .legalIf(sameSize(0, 1))
 | 
						|
    .widenScalarIf(smallerThan(1, 0),
 | 
						|
      [](const LegalityQuery &Query) {
 | 
						|
        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
 | 
						|
      })
 | 
						|
    .narrowScalarIf(largerThan(1, 0),
 | 
						|
      [](const LegalityQuery &Query) {
 | 
						|
        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
 | 
						|
      });
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_PTRTOINT)
 | 
						|
    // List the common cases
 | 
						|
    .legalForCartesianProduct(AddrSpaces64, {S64})
 | 
						|
    .legalForCartesianProduct(AddrSpaces32, {S32})
 | 
						|
    .scalarize(0)
 | 
						|
    // Accept any address space as long as the size matches
 | 
						|
    .legalIf(sameSize(0, 1))
 | 
						|
    .widenScalarIf(smallerThan(0, 1),
 | 
						|
      [](const LegalityQuery &Query) {
 | 
						|
        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
 | 
						|
      })
 | 
						|
    .narrowScalarIf(
 | 
						|
      largerThan(0, 1),
 | 
						|
      [](const LegalityQuery &Query) {
 | 
						|
        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
 | 
						|
      });
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
 | 
						|
    .scalarize(0)
 | 
						|
    .custom();
 | 
						|
 | 
						|
  const auto needToSplitMemOp = [=](const LegalityQuery &Query,
 | 
						|
                                    bool IsLoad) -> bool {
 | 
						|
    const LLT DstTy = Query.Types[0];
 | 
						|
 | 
						|
    // Split vector extloads.
 | 
						|
    unsigned MemSize = Query.MMODescrs[0].SizeInBits;
 | 
						|
    unsigned AlignBits = Query.MMODescrs[0].AlignInBits;
 | 
						|
 | 
						|
    if (MemSize < DstTy.getSizeInBits())
 | 
						|
      MemSize = std::max(MemSize, AlignBits);
 | 
						|
 | 
						|
    if (DstTy.isVector() && DstTy.getSizeInBits() > MemSize)
 | 
						|
      return true;
 | 
						|
 | 
						|
    const LLT PtrTy = Query.Types[1];
 | 
						|
    unsigned AS = PtrTy.getAddressSpace();
 | 
						|
    if (MemSize > maxSizeForAddrSpace(ST, AS, IsLoad))
 | 
						|
      return true;
 | 
						|
 | 
						|
    // Catch weird sized loads that don't evenly divide into the access sizes
 | 
						|
    // TODO: May be able to widen depending on alignment etc.
 | 
						|
    unsigned NumRegs = (MemSize + 31) / 32;
 | 
						|
    if (NumRegs == 3) {
 | 
						|
      if (!ST.hasDwordx3LoadStores())
 | 
						|
        return true;
 | 
						|
    } else {
 | 
						|
      // If the alignment allows, these should have been widened.
 | 
						|
      if (!isPowerOf2_32(NumRegs))
 | 
						|
        return true;
 | 
						|
    }
 | 
						|
 | 
						|
    if (AlignBits < MemSize) {
 | 
						|
      const SITargetLowering *TLI = ST.getTargetLowering();
 | 
						|
      return !TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS,
 | 
						|
                                                      Align(AlignBits / 8));
 | 
						|
    }
 | 
						|
 | 
						|
    return false;
 | 
						|
  };
 | 
						|
 | 
						|
  unsigned GlobalAlign32 = ST.hasUnalignedBufferAccessEnabled() ? 0 : 32;
 | 
						|
  unsigned GlobalAlign16 = ST.hasUnalignedBufferAccessEnabled() ? 0 : 16;
 | 
						|
  unsigned GlobalAlign8 = ST.hasUnalignedBufferAccessEnabled() ? 0 : 8;
 | 
						|
 | 
						|
  // TODO: Refine based on subtargets which support unaligned access or 128-bit
 | 
						|
  // LDS
 | 
						|
  // TODO: Unsupported flat for SI.
 | 
						|
 | 
						|
  for (unsigned Op : {G_LOAD, G_STORE}) {
 | 
						|
    const bool IsStore = Op == G_STORE;
 | 
						|
 | 
						|
    auto &Actions = getActionDefinitionsBuilder(Op);
 | 
						|
    // Explicitly list some common cases.
 | 
						|
    // TODO: Does this help compile time at all?
 | 
						|
    Actions.legalForTypesWithMemDesc({{S32, GlobalPtr, 32, GlobalAlign32},
 | 
						|
                                      {V2S32, GlobalPtr, 64, GlobalAlign32},
 | 
						|
                                      {V4S32, GlobalPtr, 128, GlobalAlign32},
 | 
						|
                                      {S64, GlobalPtr, 64, GlobalAlign32},
 | 
						|
                                      {V2S64, GlobalPtr, 128, GlobalAlign32},
 | 
						|
                                      {V2S16, GlobalPtr, 32, GlobalAlign32},
 | 
						|
                                      {S32, GlobalPtr, 8, GlobalAlign8},
 | 
						|
                                      {S32, GlobalPtr, 16, GlobalAlign16},
 | 
						|
 | 
						|
                                      {S32, LocalPtr, 32, 32},
 | 
						|
                                      {S64, LocalPtr, 64, 32},
 | 
						|
                                      {V2S32, LocalPtr, 64, 32},
 | 
						|
                                      {S32, LocalPtr, 8, 8},
 | 
						|
                                      {S32, LocalPtr, 16, 16},
 | 
						|
                                      {V2S16, LocalPtr, 32, 32},
 | 
						|
 | 
						|
                                      {S32, PrivatePtr, 32, 32},
 | 
						|
                                      {S32, PrivatePtr, 8, 8},
 | 
						|
                                      {S32, PrivatePtr, 16, 16},
 | 
						|
                                      {V2S16, PrivatePtr, 32, 32},
 | 
						|
 | 
						|
                                      {S32, ConstantPtr, 32, GlobalAlign32},
 | 
						|
                                      {V2S32, ConstantPtr, 64, GlobalAlign32},
 | 
						|
                                      {V4S32, ConstantPtr, 128, GlobalAlign32},
 | 
						|
                                      {S64, ConstantPtr, 64, GlobalAlign32},
 | 
						|
                                      {V2S32, ConstantPtr, 32, GlobalAlign32}});
 | 
						|
    Actions.legalIf(
 | 
						|
      [=](const LegalityQuery &Query) -> bool {
 | 
						|
        return isLoadStoreLegal(ST, Query, Op);
 | 
						|
      });
 | 
						|
 | 
						|
    // Constant 32-bit is handled by addrspacecasting the 32-bit pointer to
 | 
						|
    // 64-bits.
 | 
						|
    //
 | 
						|
    // TODO: Should generalize bitcast action into coerce, which will also cover
 | 
						|
    // inserting addrspacecasts.
 | 
						|
    Actions.customIf(typeIs(1, Constant32Ptr));
 | 
						|
 | 
						|
    // Turn any illegal element vectors into something easier to deal
 | 
						|
    // with. These will ultimately produce 32-bit scalar shifts to extract the
 | 
						|
    // parts anyway.
 | 
						|
    //
 | 
						|
    // For odd 16-bit element vectors, prefer to split those into pieces with
 | 
						|
    // 16-bit vector parts.
 | 
						|
    Actions.bitcastIf(
 | 
						|
      [=](const LegalityQuery &Query) -> bool {
 | 
						|
        return shouldBitcastLoadStoreType(ST, Query.Types[0],
 | 
						|
                                          Query.MMODescrs[0].SizeInBits);
 | 
						|
      }, bitcastToRegisterType(0));
 | 
						|
 | 
						|
    if (!IsStore) {
 | 
						|
      // Widen suitably aligned loads by loading extra bytes. The standard
 | 
						|
      // legalization actions can't properly express widening memory operands.
 | 
						|
      Actions.customIf([=](const LegalityQuery &Query) -> bool {
 | 
						|
        return shouldWidenLoad(ST, Query, G_LOAD);
 | 
						|
      });
 | 
						|
    }
 | 
						|
 | 
						|
    // FIXME: load/store narrowing should be moved to lower action
 | 
						|
    Actions
 | 
						|
        .narrowScalarIf(
 | 
						|
            [=](const LegalityQuery &Query) -> bool {
 | 
						|
              return !Query.Types[0].isVector() &&
 | 
						|
                     needToSplitMemOp(Query, Op == G_LOAD);
 | 
						|
            },
 | 
						|
            [=](const LegalityQuery &Query) -> std::pair<unsigned, LLT> {
 | 
						|
              const LLT DstTy = Query.Types[0];
 | 
						|
              const LLT PtrTy = Query.Types[1];
 | 
						|
 | 
						|
              const unsigned DstSize = DstTy.getSizeInBits();
 | 
						|
              unsigned MemSize = Query.MMODescrs[0].SizeInBits;
 | 
						|
 | 
						|
              // Split extloads.
 | 
						|
              if (DstSize > MemSize)
 | 
						|
                return std::make_pair(0, LLT::scalar(MemSize));
 | 
						|
 | 
						|
              if (!isPowerOf2_32(DstSize)) {
 | 
						|
                // We're probably decomposing an odd sized store. Try to split
 | 
						|
                // to the widest type. TODO: Account for alignment. As-is it
 | 
						|
                // should be OK, since the new parts will be further legalized.
 | 
						|
                unsigned FloorSize = PowerOf2Floor(DstSize);
 | 
						|
                return std::make_pair(0, LLT::scalar(FloorSize));
 | 
						|
              }
 | 
						|
 | 
						|
              if (DstSize > 32 && (DstSize % 32 != 0)) {
 | 
						|
                // FIXME: Need a way to specify non-extload of larger size if
 | 
						|
                // suitably aligned.
 | 
						|
                return std::make_pair(0, LLT::scalar(32 * (DstSize / 32)));
 | 
						|
              }
 | 
						|
 | 
						|
              unsigned MaxSize = maxSizeForAddrSpace(ST,
 | 
						|
                                                     PtrTy.getAddressSpace(),
 | 
						|
                                                     Op == G_LOAD);
 | 
						|
              if (MemSize > MaxSize)
 | 
						|
                return std::make_pair(0, LLT::scalar(MaxSize));
 | 
						|
 | 
						|
              unsigned Align = Query.MMODescrs[0].AlignInBits;
 | 
						|
              return std::make_pair(0, LLT::scalar(Align));
 | 
						|
            })
 | 
						|
        .fewerElementsIf(
 | 
						|
            [=](const LegalityQuery &Query) -> bool {
 | 
						|
              return Query.Types[0].isVector() &&
 | 
						|
                     needToSplitMemOp(Query, Op == G_LOAD);
 | 
						|
            },
 | 
						|
            [=](const LegalityQuery &Query) -> std::pair<unsigned, LLT> {
 | 
						|
              const LLT DstTy = Query.Types[0];
 | 
						|
              const LLT PtrTy = Query.Types[1];
 | 
						|
 | 
						|
              LLT EltTy = DstTy.getElementType();
 | 
						|
              unsigned MaxSize = maxSizeForAddrSpace(ST,
 | 
						|
                                                     PtrTy.getAddressSpace(),
 | 
						|
                                                     Op == G_LOAD);
 | 
						|
 | 
						|
              // FIXME: Handle widened to power of 2 results better. This ends
 | 
						|
              // up scalarizing.
 | 
						|
              // FIXME: 3 element stores scalarized on SI
 | 
						|
 | 
						|
              // Split if it's too large for the address space.
 | 
						|
              if (Query.MMODescrs[0].SizeInBits > MaxSize) {
 | 
						|
                unsigned NumElts = DstTy.getNumElements();
 | 
						|
                unsigned EltSize = EltTy.getSizeInBits();
 | 
						|
 | 
						|
                if (MaxSize % EltSize == 0) {
 | 
						|
                  return std::make_pair(
 | 
						|
                    0, LLT::scalarOrVector(MaxSize / EltSize, EltTy));
 | 
						|
                }
 | 
						|
 | 
						|
                unsigned NumPieces = Query.MMODescrs[0].SizeInBits / MaxSize;
 | 
						|
 | 
						|
                // FIXME: Refine when odd breakdowns handled
 | 
						|
                // The scalars will need to be re-legalized.
 | 
						|
                if (NumPieces == 1 || NumPieces >= NumElts ||
 | 
						|
                    NumElts % NumPieces != 0)
 | 
						|
                  return std::make_pair(0, EltTy);
 | 
						|
 | 
						|
                return std::make_pair(0,
 | 
						|
                                      LLT::vector(NumElts / NumPieces, EltTy));
 | 
						|
              }
 | 
						|
 | 
						|
              // FIXME: We could probably handle weird extending loads better.
 | 
						|
              unsigned MemSize = Query.MMODescrs[0].SizeInBits;
 | 
						|
              if (DstTy.getSizeInBits() > MemSize)
 | 
						|
                return std::make_pair(0, EltTy);
 | 
						|
 | 
						|
              unsigned EltSize = EltTy.getSizeInBits();
 | 
						|
              unsigned DstSize = DstTy.getSizeInBits();
 | 
						|
              if (!isPowerOf2_32(DstSize)) {
 | 
						|
                // We're probably decomposing an odd sized store. Try to split
 | 
						|
                // to the widest type. TODO: Account for alignment. As-is it
 | 
						|
                // should be OK, since the new parts will be further legalized.
 | 
						|
                unsigned FloorSize = PowerOf2Floor(DstSize);
 | 
						|
                return std::make_pair(
 | 
						|
                  0, LLT::scalarOrVector(FloorSize / EltSize, EltTy));
 | 
						|
              }
 | 
						|
 | 
						|
              // Need to split because of alignment.
 | 
						|
              unsigned Align = Query.MMODescrs[0].AlignInBits;
 | 
						|
              if (EltSize > Align &&
 | 
						|
                  (EltSize / Align < DstTy.getNumElements())) {
 | 
						|
                return std::make_pair(0, LLT::vector(EltSize / Align, EltTy));
 | 
						|
              }
 | 
						|
 | 
						|
              // May need relegalization for the scalars.
 | 
						|
              return std::make_pair(0, EltTy);
 | 
						|
            })
 | 
						|
    .lowerIfMemSizeNotPow2()
 | 
						|
    .minScalar(0, S32);
 | 
						|
 | 
						|
    if (IsStore)
 | 
						|
      Actions.narrowScalarIf(isWideScalarTruncStore(0), changeTo(0, S32));
 | 
						|
 | 
						|
    Actions
 | 
						|
        .widenScalarToNextPow2(0)
 | 
						|
        .moreElementsIf(vectorSmallerThan(0, 32), moreEltsToNext32Bit(0))
 | 
						|
        .lower();
 | 
						|
  }
 | 
						|
 | 
						|
  auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
 | 
						|
                       .legalForTypesWithMemDesc({{S32, GlobalPtr, 8, 8},
 | 
						|
                                                  {S32, GlobalPtr, 16, 2 * 8},
 | 
						|
                                                  {S32, LocalPtr, 8, 8},
 | 
						|
                                                  {S32, LocalPtr, 16, 16},
 | 
						|
                                                  {S32, PrivatePtr, 8, 8},
 | 
						|
                                                  {S32, PrivatePtr, 16, 16},
 | 
						|
                                                  {S32, ConstantPtr, 8, 8},
 | 
						|
                                                  {S32, ConstantPtr, 16, 2 * 8}});
 | 
						|
  if (ST.hasFlatAddressSpace()) {
 | 
						|
    ExtLoads.legalForTypesWithMemDesc(
 | 
						|
        {{S32, FlatPtr, 8, 8}, {S32, FlatPtr, 16, 16}});
 | 
						|
  }
 | 
						|
 | 
						|
  ExtLoads.clampScalar(0, S32, S32)
 | 
						|
          .widenScalarToNextPow2(0)
 | 
						|
          .unsupportedIfMemSizeNotPow2()
 | 
						|
          .lower();
 | 
						|
 | 
						|
  auto &Atomics = getActionDefinitionsBuilder(
 | 
						|
    {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
 | 
						|
     G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
 | 
						|
     G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
 | 
						|
     G_ATOMICRMW_UMIN})
 | 
						|
    .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
 | 
						|
               {S64, GlobalPtr}, {S64, LocalPtr},
 | 
						|
               {S32, RegionPtr}, {S64, RegionPtr}});
 | 
						|
  if (ST.hasFlatAddressSpace()) {
 | 
						|
    Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
 | 
						|
  }
 | 
						|
 | 
						|
  if (ST.hasLDSFPAtomics()) {
 | 
						|
    getActionDefinitionsBuilder(G_ATOMICRMW_FADD)
 | 
						|
      .legalFor({{S32, LocalPtr}, {S32, RegionPtr}});
 | 
						|
  }
 | 
						|
 | 
						|
  // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, and output
 | 
						|
  // demarshalling
 | 
						|
  getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
 | 
						|
    .customFor({{S32, GlobalPtr}, {S64, GlobalPtr},
 | 
						|
                {S32, FlatPtr}, {S64, FlatPtr}})
 | 
						|
    .legalFor({{S32, LocalPtr}, {S64, LocalPtr},
 | 
						|
               {S32, RegionPtr}, {S64, RegionPtr}});
 | 
						|
  // TODO: Pointer types, any 32-bit or 64-bit vector
 | 
						|
 | 
						|
  // Condition should be s32 for scalar, s1 for vector.
 | 
						|
  getActionDefinitionsBuilder(G_SELECT)
 | 
						|
    .legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
 | 
						|
          GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
 | 
						|
          LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1, S32})
 | 
						|
    .clampScalar(0, S16, S64)
 | 
						|
    .scalarize(1)
 | 
						|
    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
 | 
						|
    .fewerElementsIf(numElementsNotEven(0), scalarize(0))
 | 
						|
    .clampMaxNumElements(0, S32, 2)
 | 
						|
    .clampMaxNumElements(0, LocalPtr, 2)
 | 
						|
    .clampMaxNumElements(0, PrivatePtr, 2)
 | 
						|
    .scalarize(0)
 | 
						|
    .widenScalarToNextPow2(0)
 | 
						|
    .legalIf(all(isPointer(0), typeInSet(1, {S1, S32})));
 | 
						|
 | 
						|
  // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
 | 
						|
  // be more flexible with the shift amount type.
 | 
						|
  auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
 | 
						|
    .legalFor({{S32, S32}, {S64, S32}});
 | 
						|
  if (ST.has16BitInsts()) {
 | 
						|
    if (ST.hasVOP3PInsts()) {
 | 
						|
      Shifts.legalFor({{S16, S16}, {V2S16, V2S16}})
 | 
						|
            .clampMaxNumElements(0, S16, 2);
 | 
						|
    } else
 | 
						|
      Shifts.legalFor({{S16, S16}});
 | 
						|
 | 
						|
    // TODO: Support 16-bit shift amounts for all types
 | 
						|
    Shifts.widenScalarIf(
 | 
						|
      [=](const LegalityQuery &Query) {
 | 
						|
        // Use 16-bit shift amounts for any 16-bit shift. Otherwise we want a
 | 
						|
        // 32-bit amount.
 | 
						|
        const LLT ValTy = Query.Types[0];
 | 
						|
        const LLT AmountTy = Query.Types[1];
 | 
						|
        return ValTy.getSizeInBits() <= 16 &&
 | 
						|
               AmountTy.getSizeInBits() < 16;
 | 
						|
      }, changeTo(1, S16));
 | 
						|
    Shifts.maxScalarIf(typeIs(0, S16), 1, S16);
 | 
						|
    Shifts.clampScalar(1, S32, S32);
 | 
						|
    Shifts.clampScalar(0, S16, S64);
 | 
						|
    Shifts.widenScalarToNextPow2(0, 16);
 | 
						|
 | 
						|
    getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
 | 
						|
      .minScalar(0, S16)
 | 
						|
      .scalarize(0)
 | 
						|
      .lower();
 | 
						|
  } else {
 | 
						|
    // Make sure we legalize the shift amount type first, as the general
 | 
						|
    // expansion for the shifted type will produce much worse code if it hasn't
 | 
						|
    // been truncated already.
 | 
						|
    Shifts.clampScalar(1, S32, S32);
 | 
						|
    Shifts.clampScalar(0, S32, S64);
 | 
						|
    Shifts.widenScalarToNextPow2(0, 32);
 | 
						|
 | 
						|
    getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
 | 
						|
      .minScalar(0, S32)
 | 
						|
      .scalarize(0)
 | 
						|
      .lower();
 | 
						|
  }
 | 
						|
  Shifts.scalarize(0);
 | 
						|
 | 
						|
  for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
 | 
						|
    unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
 | 
						|
    unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
 | 
						|
    unsigned IdxTypeIdx = 2;
 | 
						|
 | 
						|
    getActionDefinitionsBuilder(Op)
 | 
						|
      .customIf([=](const LegalityQuery &Query) {
 | 
						|
          const LLT EltTy = Query.Types[EltTypeIdx];
 | 
						|
          const LLT VecTy = Query.Types[VecTypeIdx];
 | 
						|
          const LLT IdxTy = Query.Types[IdxTypeIdx];
 | 
						|
          const unsigned EltSize = EltTy.getSizeInBits();
 | 
						|
          return (EltSize == 32 || EltSize == 64) &&
 | 
						|
                  VecTy.getSizeInBits() % 32 == 0 &&
 | 
						|
                  VecTy.getSizeInBits() <= MaxRegisterSize &&
 | 
						|
                  IdxTy.getSizeInBits() == 32;
 | 
						|
        })
 | 
						|
      .bitcastIf(all(sizeIsMultipleOf32(VecTypeIdx), scalarOrEltNarrowerThan(VecTypeIdx, 32)),
 | 
						|
                 bitcastToVectorElement32(VecTypeIdx))
 | 
						|
      //.bitcastIf(vectorSmallerThan(1, 32), bitcastToScalar(1))
 | 
						|
      .bitcastIf(
 | 
						|
        all(sizeIsMultipleOf32(VecTypeIdx), scalarOrEltWiderThan(VecTypeIdx, 64)),
 | 
						|
        [=](const LegalityQuery &Query) {
 | 
						|
          // For > 64-bit element types, try to turn this into a 64-bit
 | 
						|
          // element vector since we may be able to do better indexing
 | 
						|
          // if this is scalar. If not, fall back to 32.
 | 
						|
          const LLT EltTy = Query.Types[EltTypeIdx];
 | 
						|
          const LLT VecTy = Query.Types[VecTypeIdx];
 | 
						|
          const unsigned DstEltSize = EltTy.getSizeInBits();
 | 
						|
          const unsigned VecSize = VecTy.getSizeInBits();
 | 
						|
 | 
						|
          const unsigned TargetEltSize = DstEltSize % 64 == 0 ? 64 : 32;
 | 
						|
          return std::make_pair(
 | 
						|
            VecTypeIdx, LLT::vector(VecSize / TargetEltSize, TargetEltSize));
 | 
						|
        })
 | 
						|
      .clampScalar(EltTypeIdx, S32, S64)
 | 
						|
      .clampScalar(VecTypeIdx, S32, S64)
 | 
						|
      .clampScalar(IdxTypeIdx, S32, S32)
 | 
						|
      .clampMaxNumElements(VecTypeIdx, S32, 32)
 | 
						|
      // TODO: Clamp elements for 64-bit vectors?
 | 
						|
      // It should only be necessary with variable indexes.
 | 
						|
      // As a last resort, lower to the stack
 | 
						|
      .lower();
 | 
						|
  }
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
 | 
						|
    .unsupportedIf([=](const LegalityQuery &Query) {
 | 
						|
        const LLT &EltTy = Query.Types[1].getElementType();
 | 
						|
        return Query.Types[0] != EltTy;
 | 
						|
      });
 | 
						|
 | 
						|
  for (unsigned Op : {G_EXTRACT, G_INSERT}) {
 | 
						|
    unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
 | 
						|
    unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
 | 
						|
 | 
						|
    // FIXME: Doesn't handle extract of illegal sizes.
 | 
						|
    getActionDefinitionsBuilder(Op)
 | 
						|
      .lowerIf(all(typeIs(LitTyIdx, S16), sizeIs(BigTyIdx, 32)))
 | 
						|
      // FIXME: Multiples of 16 should not be legal.
 | 
						|
      .legalIf([=](const LegalityQuery &Query) {
 | 
						|
          const LLT BigTy = Query.Types[BigTyIdx];
 | 
						|
          const LLT LitTy = Query.Types[LitTyIdx];
 | 
						|
          return (BigTy.getSizeInBits() % 32 == 0) &&
 | 
						|
                 (LitTy.getSizeInBits() % 16 == 0);
 | 
						|
        })
 | 
						|
      .widenScalarIf(
 | 
						|
        [=](const LegalityQuery &Query) {
 | 
						|
          const LLT BigTy = Query.Types[BigTyIdx];
 | 
						|
          return (BigTy.getScalarSizeInBits() < 16);
 | 
						|
        },
 | 
						|
        LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
 | 
						|
      .widenScalarIf(
 | 
						|
        [=](const LegalityQuery &Query) {
 | 
						|
          const LLT LitTy = Query.Types[LitTyIdx];
 | 
						|
          return (LitTy.getScalarSizeInBits() < 16);
 | 
						|
        },
 | 
						|
        LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
 | 
						|
      .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
 | 
						|
      .widenScalarToNextPow2(BigTyIdx, 32);
 | 
						|
 | 
						|
  }
 | 
						|
 | 
						|
  auto &BuildVector = getActionDefinitionsBuilder(G_BUILD_VECTOR)
 | 
						|
    .legalForCartesianProduct(AllS32Vectors, {S32})
 | 
						|
    .legalForCartesianProduct(AllS64Vectors, {S64})
 | 
						|
    .clampNumElements(0, V16S32, V32S32)
 | 
						|
    .clampNumElements(0, V2S64, V16S64)
 | 
						|
    .fewerElementsIf(isWideVec16(0), changeTo(0, V2S16));
 | 
						|
 | 
						|
  if (ST.hasScalarPackInsts()) {
 | 
						|
    BuildVector
 | 
						|
      // FIXME: Should probably widen s1 vectors straight to s32
 | 
						|
      .minScalarOrElt(0, S16)
 | 
						|
      // Widen source elements and produce a G_BUILD_VECTOR_TRUNC
 | 
						|
      .minScalar(1, S32);
 | 
						|
 | 
						|
    getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC)
 | 
						|
      .legalFor({V2S16, S32})
 | 
						|
      .lower();
 | 
						|
    BuildVector.minScalarOrElt(0, S32);
 | 
						|
  } else {
 | 
						|
    BuildVector.customFor({V2S16, S16});
 | 
						|
    BuildVector.minScalarOrElt(0, S32);
 | 
						|
 | 
						|
    getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC)
 | 
						|
      .customFor({V2S16, S32})
 | 
						|
      .lower();
 | 
						|
  }
 | 
						|
 | 
						|
  BuildVector.legalIf(isRegisterType(0));
 | 
						|
 | 
						|
  // FIXME: Clamp maximum size
 | 
						|
  getActionDefinitionsBuilder(G_CONCAT_VECTORS)
 | 
						|
    .legalIf(all(isRegisterType(0), isRegisterType(1)))
 | 
						|
    .clampMaxNumElements(0, S32, 32)
 | 
						|
    .clampMaxNumElements(1, S16, 2) // TODO: Make 4?
 | 
						|
    .clampMaxNumElements(0, S16, 64);
 | 
						|
 | 
						|
  // TODO: Don't fully scalarize v2s16 pieces? Or combine out thosse
 | 
						|
  // pre-legalize.
 | 
						|
  if (ST.hasVOP3PInsts()) {
 | 
						|
    getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
 | 
						|
      .customFor({V2S16, V2S16})
 | 
						|
      .lower();
 | 
						|
  } else
 | 
						|
    getActionDefinitionsBuilder(G_SHUFFLE_VECTOR).lower();
 | 
						|
 | 
						|
  // Merge/Unmerge
 | 
						|
  for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
 | 
						|
    unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
 | 
						|
    unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
 | 
						|
 | 
						|
    auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
 | 
						|
      const LLT Ty = Query.Types[TypeIdx];
 | 
						|
      if (Ty.isVector()) {
 | 
						|
        const LLT &EltTy = Ty.getElementType();
 | 
						|
        if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 512)
 | 
						|
          return true;
 | 
						|
        if (!isPowerOf2_32(EltTy.getSizeInBits()))
 | 
						|
          return true;
 | 
						|
      }
 | 
						|
      return false;
 | 
						|
    };
 | 
						|
 | 
						|
    auto &Builder = getActionDefinitionsBuilder(Op)
 | 
						|
      .legalIf(all(isRegisterType(0), isRegisterType(1)))
 | 
						|
      .lowerFor({{S16, V2S16}})
 | 
						|
      .lowerIf([=](const LegalityQuery &Query) {
 | 
						|
          const LLT BigTy = Query.Types[BigTyIdx];
 | 
						|
          return BigTy.getSizeInBits() == 32;
 | 
						|
        })
 | 
						|
      // Try to widen to s16 first for small types.
 | 
						|
      // TODO: Only do this on targets with legal s16 shifts
 | 
						|
      .minScalarOrEltIf(scalarNarrowerThan(LitTyIdx, 16), LitTyIdx, S16)
 | 
						|
      .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
 | 
						|
      .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
 | 
						|
      .fewerElementsIf(all(typeIs(0, S16), vectorWiderThan(1, 32),
 | 
						|
                           elementTypeIs(1, S16)),
 | 
						|
                       changeTo(1, V2S16))
 | 
						|
      // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
 | 
						|
      // worth considering the multiples of 64 since 2*192 and 2*384 are not
 | 
						|
      // valid.
 | 
						|
      .clampScalar(LitTyIdx, S32, S512)
 | 
						|
      .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
 | 
						|
      // Break up vectors with weird elements into scalars
 | 
						|
      .fewerElementsIf(
 | 
						|
        [=](const LegalityQuery &Query) { return notValidElt(Query, LitTyIdx); },
 | 
						|
        scalarize(0))
 | 
						|
      .fewerElementsIf(
 | 
						|
        [=](const LegalityQuery &Query) { return notValidElt(Query, BigTyIdx); },
 | 
						|
        scalarize(1))
 | 
						|
      .clampScalar(BigTyIdx, S32, MaxScalar);
 | 
						|
 | 
						|
    if (Op == G_MERGE_VALUES) {
 | 
						|
      Builder.widenScalarIf(
 | 
						|
        // TODO: Use 16-bit shifts if legal for 8-bit values?
 | 
						|
        [=](const LegalityQuery &Query) {
 | 
						|
          const LLT Ty = Query.Types[LitTyIdx];
 | 
						|
          return Ty.getSizeInBits() < 32;
 | 
						|
        },
 | 
						|
        changeTo(LitTyIdx, S32));
 | 
						|
    }
 | 
						|
 | 
						|
    Builder.widenScalarIf(
 | 
						|
      [=](const LegalityQuery &Query) {
 | 
						|
        const LLT Ty = Query.Types[BigTyIdx];
 | 
						|
        return !isPowerOf2_32(Ty.getSizeInBits()) &&
 | 
						|
          Ty.getSizeInBits() % 16 != 0;
 | 
						|
      },
 | 
						|
      [=](const LegalityQuery &Query) {
 | 
						|
        // Pick the next power of 2, or a multiple of 64 over 128.
 | 
						|
        // Whichever is smaller.
 | 
						|
        const LLT &Ty = Query.Types[BigTyIdx];
 | 
						|
        unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
 | 
						|
        if (NewSizeInBits >= 256) {
 | 
						|
          unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
 | 
						|
          if (RoundedTo < NewSizeInBits)
 | 
						|
            NewSizeInBits = RoundedTo;
 | 
						|
        }
 | 
						|
        return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
 | 
						|
      })
 | 
						|
      // Any vectors left are the wrong size. Scalarize them.
 | 
						|
      .scalarize(0)
 | 
						|
      .scalarize(1);
 | 
						|
  }
 | 
						|
 | 
						|
  // S64 is only legal on SALU, and needs to be broken into 32-bit elements in
 | 
						|
  // RegBankSelect.
 | 
						|
  auto &SextInReg = getActionDefinitionsBuilder(G_SEXT_INREG)
 | 
						|
    .legalFor({{S32}, {S64}});
 | 
						|
 | 
						|
  if (ST.hasVOP3PInsts()) {
 | 
						|
    SextInReg.lowerFor({{V2S16}})
 | 
						|
      // Prefer to reduce vector widths for 16-bit vectors before lowering, to
 | 
						|
      // get more vector shift opportunities, since we'll get those when
 | 
						|
      // expanded.
 | 
						|
      .fewerElementsIf(elementTypeIs(0, S16), changeTo(0, V2S16));
 | 
						|
  } else if (ST.has16BitInsts()) {
 | 
						|
    SextInReg.lowerFor({{S32}, {S64}, {S16}});
 | 
						|
  } else {
 | 
						|
    // Prefer to promote to s32 before lowering if we don't have 16-bit
 | 
						|
    // shifts. This avoid a lot of intermediate truncate and extend operations.
 | 
						|
    SextInReg.lowerFor({{S32}, {S64}});
 | 
						|
  }
 | 
						|
 | 
						|
  SextInReg
 | 
						|
    .scalarize(0)
 | 
						|
    .clampScalar(0, S32, S64)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FSHR)
 | 
						|
    .legalFor({{S32, S32}})
 | 
						|
    .scalarize(0)
 | 
						|
    .lower();
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_READCYCLECOUNTER)
 | 
						|
    .legalFor({S64});
 | 
						|
 | 
						|
  getActionDefinitionsBuilder(G_FENCE)
 | 
						|
    .alwaysLegal();
 | 
						|
 | 
						|
  getActionDefinitionsBuilder({
 | 
						|
      // TODO: Verify V_BFI_B32 is generated from expanded bit ops
 | 
						|
      G_FCOPYSIGN,
 | 
						|
 | 
						|
      G_ATOMIC_CMPXCHG_WITH_SUCCESS,
 | 
						|
      G_ATOMICRMW_NAND,
 | 
						|
      G_ATOMICRMW_FSUB,
 | 
						|
      G_READ_REGISTER,
 | 
						|
      G_WRITE_REGISTER,
 | 
						|
 | 
						|
      G_SADDO, G_SSUBO,
 | 
						|
 | 
						|
       // TODO: Implement
 | 
						|
      G_FMINIMUM, G_FMAXIMUM,
 | 
						|
      G_FSHL
 | 
						|
    }).lower();
 | 
						|
 | 
						|
  getActionDefinitionsBuilder({G_VASTART, G_VAARG, G_BRJT, G_JUMP_TABLE,
 | 
						|
        G_INDEXED_LOAD, G_INDEXED_SEXTLOAD,
 | 
						|
        G_INDEXED_ZEXTLOAD, G_INDEXED_STORE})
 | 
						|
    .unsupported();
 | 
						|
 | 
						|
  computeTables();
 | 
						|
  verify(*ST.getInstrInfo());
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
 | 
						|
                                         MachineInstr &MI) const {
 | 
						|
  MachineIRBuilder &B = Helper.MIRBuilder;
 | 
						|
  MachineRegisterInfo &MRI = *B.getMRI();
 | 
						|
 | 
						|
  switch (MI.getOpcode()) {
 | 
						|
  case TargetOpcode::G_ADDRSPACE_CAST:
 | 
						|
    return legalizeAddrSpaceCast(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_FRINT:
 | 
						|
    return legalizeFrint(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_FCEIL:
 | 
						|
    return legalizeFceil(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_FREM:
 | 
						|
    return legalizeFrem(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_INTRINSIC_TRUNC:
 | 
						|
    return legalizeIntrinsicTrunc(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_SITOFP:
 | 
						|
    return legalizeITOFP(MI, MRI, B, true);
 | 
						|
  case TargetOpcode::G_UITOFP:
 | 
						|
    return legalizeITOFP(MI, MRI, B, false);
 | 
						|
  case TargetOpcode::G_FPTOSI:
 | 
						|
    return legalizeFPTOI(MI, MRI, B, true);
 | 
						|
  case TargetOpcode::G_FPTOUI:
 | 
						|
    return legalizeFPTOI(MI, MRI, B, false);
 | 
						|
  case TargetOpcode::G_FMINNUM:
 | 
						|
  case TargetOpcode::G_FMAXNUM:
 | 
						|
  case TargetOpcode::G_FMINNUM_IEEE:
 | 
						|
  case TargetOpcode::G_FMAXNUM_IEEE:
 | 
						|
    return legalizeMinNumMaxNum(Helper, MI);
 | 
						|
  case TargetOpcode::G_EXTRACT_VECTOR_ELT:
 | 
						|
    return legalizeExtractVectorElt(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_INSERT_VECTOR_ELT:
 | 
						|
    return legalizeInsertVectorElt(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_SHUFFLE_VECTOR:
 | 
						|
    return legalizeShuffleVector(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_FSIN:
 | 
						|
  case TargetOpcode::G_FCOS:
 | 
						|
    return legalizeSinCos(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_GLOBAL_VALUE:
 | 
						|
    return legalizeGlobalValue(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_LOAD:
 | 
						|
    return legalizeLoad(Helper, MI);
 | 
						|
  case TargetOpcode::G_FMAD:
 | 
						|
    return legalizeFMad(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_FDIV:
 | 
						|
    return legalizeFDIV(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_UDIV:
 | 
						|
  case TargetOpcode::G_UREM:
 | 
						|
    return legalizeUDIV_UREM(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_SDIV:
 | 
						|
  case TargetOpcode::G_SREM:
 | 
						|
    return legalizeSDIV_SREM(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_ATOMIC_CMPXCHG:
 | 
						|
    return legalizeAtomicCmpXChg(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_FLOG:
 | 
						|
    return legalizeFlog(MI, B, numbers::ln2f);
 | 
						|
  case TargetOpcode::G_FLOG10:
 | 
						|
    return legalizeFlog(MI, B, numbers::ln2f / numbers::ln10f);
 | 
						|
  case TargetOpcode::G_FEXP:
 | 
						|
    return legalizeFExp(MI, B);
 | 
						|
  case TargetOpcode::G_FPOW:
 | 
						|
    return legalizeFPow(MI, B);
 | 
						|
  case TargetOpcode::G_FFLOOR:
 | 
						|
    return legalizeFFloor(MI, MRI, B);
 | 
						|
  case TargetOpcode::G_BUILD_VECTOR:
 | 
						|
    return legalizeBuildVector(MI, MRI, B);
 | 
						|
  default:
 | 
						|
    return false;
 | 
						|
  }
 | 
						|
 | 
						|
  llvm_unreachable("expected switch to return");
 | 
						|
}
 | 
						|
 | 
						|
Register AMDGPULegalizerInfo::getSegmentAperture(
 | 
						|
  unsigned AS,
 | 
						|
  MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  MachineFunction &MF = B.getMF();
 | 
						|
  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  assert(AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS);
 | 
						|
 | 
						|
  if (ST.hasApertureRegs()) {
 | 
						|
    // FIXME: Use inline constants (src_{shared, private}_base) instead of
 | 
						|
    // getreg.
 | 
						|
    unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
 | 
						|
        AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
 | 
						|
        AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
 | 
						|
    unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
 | 
						|
        AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
 | 
						|
        AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
 | 
						|
    unsigned Encoding =
 | 
						|
        AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
 | 
						|
        Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
 | 
						|
        WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
 | 
						|
 | 
						|
    Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
 | 
						|
 | 
						|
    B.buildInstr(AMDGPU::S_GETREG_B32)
 | 
						|
      .addDef(GetReg)
 | 
						|
      .addImm(Encoding);
 | 
						|
    MRI.setType(GetReg, S32);
 | 
						|
 | 
						|
    auto ShiftAmt = B.buildConstant(S32, WidthM1 + 1);
 | 
						|
    return B.buildShl(S32, GetReg, ShiftAmt).getReg(0);
 | 
						|
  }
 | 
						|
 | 
						|
  Register QueuePtr = MRI.createGenericVirtualRegister(
 | 
						|
    LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
 | 
						|
 | 
						|
  if (!loadInputValue(QueuePtr, B, AMDGPUFunctionArgInfo::QUEUE_PTR))
 | 
						|
    return Register();
 | 
						|
 | 
						|
  // Offset into amd_queue_t for group_segment_aperture_base_hi /
 | 
						|
  // private_segment_aperture_base_hi.
 | 
						|
  uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
 | 
						|
 | 
						|
  // TODO: can we be smarter about machine pointer info?
 | 
						|
  MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
 | 
						|
  MachineMemOperand *MMO = MF.getMachineMemOperand(
 | 
						|
      PtrInfo,
 | 
						|
      MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
 | 
						|
          MachineMemOperand::MOInvariant,
 | 
						|
      4, commonAlignment(Align(64), StructOffset));
 | 
						|
 | 
						|
  Register LoadAddr;
 | 
						|
 | 
						|
  B.materializePtrAdd(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
 | 
						|
  return B.buildLoad(S32, LoadAddr, *MMO).getReg(0);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  MachineFunction &MF = B.getMF();
 | 
						|
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
 | 
						|
  LLT DstTy = MRI.getType(Dst);
 | 
						|
  LLT SrcTy = MRI.getType(Src);
 | 
						|
  unsigned DestAS = DstTy.getAddressSpace();
 | 
						|
  unsigned SrcAS = SrcTy.getAddressSpace();
 | 
						|
 | 
						|
  // TODO: Avoid reloading from the queue ptr for each cast, or at least each
 | 
						|
  // vector element.
 | 
						|
  assert(!DstTy.isVector());
 | 
						|
 | 
						|
  const AMDGPUTargetMachine &TM
 | 
						|
    = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
 | 
						|
 | 
						|
  if (TM.isNoopAddrSpaceCast(SrcAS, DestAS)) {
 | 
						|
    MI.setDesc(B.getTII().get(TargetOpcode::G_BITCAST));
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
 | 
						|
    // Truncate.
 | 
						|
    B.buildExtract(Dst, Src, 0);
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
 | 
						|
    const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
 | 
						|
    uint32_t AddrHiVal = Info->get32BitAddressHighBits();
 | 
						|
 | 
						|
    // FIXME: This is a bit ugly due to creating a merge of 2 pointers to
 | 
						|
    // another. Merge operands are required to be the same type, but creating an
 | 
						|
    // extra ptrtoint would be kind of pointless.
 | 
						|
    auto HighAddr = B.buildConstant(
 | 
						|
      LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS_32BIT, 32), AddrHiVal);
 | 
						|
    B.buildMerge(Dst, {Src, HighAddr});
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
 | 
						|
    assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
 | 
						|
           DestAS == AMDGPUAS::PRIVATE_ADDRESS);
 | 
						|
    unsigned NullVal = TM.getNullPointerValue(DestAS);
 | 
						|
 | 
						|
    auto SegmentNull = B.buildConstant(DstTy, NullVal);
 | 
						|
    auto FlatNull = B.buildConstant(SrcTy, 0);
 | 
						|
 | 
						|
    // Extract low 32-bits of the pointer.
 | 
						|
    auto PtrLo32 = B.buildExtract(DstTy, Src, 0);
 | 
						|
 | 
						|
    auto CmpRes =
 | 
						|
        B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, FlatNull.getReg(0));
 | 
						|
    B.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
 | 
						|
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (SrcAS != AMDGPUAS::LOCAL_ADDRESS && SrcAS != AMDGPUAS::PRIVATE_ADDRESS)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (!ST.hasFlatAddressSpace())
 | 
						|
    return false;
 | 
						|
 | 
						|
  auto SegmentNull =
 | 
						|
      B.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
 | 
						|
  auto FlatNull =
 | 
						|
      B.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
 | 
						|
 | 
						|
  Register ApertureReg = getSegmentAperture(SrcAS, MRI, B);
 | 
						|
  if (!ApertureReg.isValid())
 | 
						|
    return false;
 | 
						|
 | 
						|
  auto CmpRes =
 | 
						|
      B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, SegmentNull.getReg(0));
 | 
						|
 | 
						|
  // Coerce the type of the low half of the result so we can use merge_values.
 | 
						|
  Register SrcAsInt = B.buildPtrToInt(S32, Src).getReg(0);
 | 
						|
 | 
						|
  // TODO: Should we allow mismatched types but matching sizes in merges to
 | 
						|
  // avoid the ptrtoint?
 | 
						|
  auto BuildPtr = B.buildMerge(DstTy, {SrcAsInt, ApertureReg});
 | 
						|
  B.buildSelect(Dst, CmpRes, BuildPtr, FlatNull);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFrint(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
  LLT Ty = MRI.getType(Src);
 | 
						|
  assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
 | 
						|
 | 
						|
  APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
 | 
						|
  APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
 | 
						|
 | 
						|
  auto C1 = B.buildFConstant(Ty, C1Val);
 | 
						|
  auto CopySign = B.buildFCopysign(Ty, C1, Src);
 | 
						|
 | 
						|
  // TODO: Should this propagate fast-math-flags?
 | 
						|
  auto Tmp1 = B.buildFAdd(Ty, Src, CopySign);
 | 
						|
  auto Tmp2 = B.buildFSub(Ty, Tmp1, CopySign);
 | 
						|
 | 
						|
  auto C2 = B.buildFConstant(Ty, C2Val);
 | 
						|
  auto Fabs = B.buildFAbs(Ty, Src);
 | 
						|
 | 
						|
  auto Cond = B.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
 | 
						|
  B.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFceil(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
 | 
						|
  const LLT S1 = LLT::scalar(1);
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
  assert(MRI.getType(Src) == S64);
 | 
						|
 | 
						|
  // result = trunc(src)
 | 
						|
  // if (src > 0.0 && src != result)
 | 
						|
  //   result += 1.0
 | 
						|
 | 
						|
  auto Trunc = B.buildIntrinsicTrunc(S64, Src);
 | 
						|
 | 
						|
  const auto Zero = B.buildFConstant(S64, 0.0);
 | 
						|
  const auto One = B.buildFConstant(S64, 1.0);
 | 
						|
  auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
 | 
						|
  auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
 | 
						|
  auto And = B.buildAnd(S1, Lt0, NeTrunc);
 | 
						|
  auto Add = B.buildSelect(S64, And, One, Zero);
 | 
						|
 | 
						|
  // TODO: Should this propagate fast-math-flags?
 | 
						|
  B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFrem(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
    Register DstReg = MI.getOperand(0).getReg();
 | 
						|
    Register Src0Reg = MI.getOperand(1).getReg();
 | 
						|
    Register Src1Reg = MI.getOperand(2).getReg();
 | 
						|
    auto Flags = MI.getFlags();
 | 
						|
    LLT Ty = MRI.getType(DstReg);
 | 
						|
 | 
						|
    auto Div = B.buildFDiv(Ty, Src0Reg, Src1Reg, Flags);
 | 
						|
    auto Trunc = B.buildIntrinsicTrunc(Ty, Div, Flags);
 | 
						|
    auto Neg = B.buildFNeg(Ty, Trunc, Flags);
 | 
						|
    B.buildFMA(DstReg, Neg, Src1Reg, Src0Reg, Flags);
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
}
 | 
						|
 | 
						|
static MachineInstrBuilder extractF64Exponent(Register Hi,
 | 
						|
                                              MachineIRBuilder &B) {
 | 
						|
  const unsigned FractBits = 52;
 | 
						|
  const unsigned ExpBits = 11;
 | 
						|
  LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  auto Const0 = B.buildConstant(S32, FractBits - 32);
 | 
						|
  auto Const1 = B.buildConstant(S32, ExpBits);
 | 
						|
 | 
						|
  auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
 | 
						|
    .addUse(Hi)
 | 
						|
    .addUse(Const0.getReg(0))
 | 
						|
    .addUse(Const1.getReg(0));
 | 
						|
 | 
						|
  return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  const LLT S1 = LLT::scalar(1);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
  assert(MRI.getType(Src) == S64);
 | 
						|
 | 
						|
  // TODO: Should this use extract since the low half is unused?
 | 
						|
  auto Unmerge = B.buildUnmerge({S32, S32}, Src);
 | 
						|
  Register Hi = Unmerge.getReg(1);
 | 
						|
 | 
						|
  // Extract the upper half, since this is where we will find the sign and
 | 
						|
  // exponent.
 | 
						|
  auto Exp = extractF64Exponent(Hi, B);
 | 
						|
 | 
						|
  const unsigned FractBits = 52;
 | 
						|
 | 
						|
  // Extract the sign bit.
 | 
						|
  const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
 | 
						|
  auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
 | 
						|
 | 
						|
  const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
 | 
						|
 | 
						|
  const auto Zero32 = B.buildConstant(S32, 0);
 | 
						|
 | 
						|
  // Extend back to 64-bits.
 | 
						|
  auto SignBit64 = B.buildMerge(S64, {Zero32, SignBit});
 | 
						|
 | 
						|
  auto Shr = B.buildAShr(S64, FractMask, Exp);
 | 
						|
  auto Not = B.buildNot(S64, Shr);
 | 
						|
  auto Tmp0 = B.buildAnd(S64, Src, Not);
 | 
						|
  auto FiftyOne = B.buildConstant(S32, FractBits - 1);
 | 
						|
 | 
						|
  auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
 | 
						|
  auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
 | 
						|
 | 
						|
  auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
 | 
						|
  B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeITOFP(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B, bool Signed) const {
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
 | 
						|
 | 
						|
  auto Unmerge = B.buildUnmerge({S32, S32}, Src);
 | 
						|
 | 
						|
  auto CvtHi = Signed ?
 | 
						|
    B.buildSITOFP(S64, Unmerge.getReg(1)) :
 | 
						|
    B.buildUITOFP(S64, Unmerge.getReg(1));
 | 
						|
 | 
						|
  auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
 | 
						|
 | 
						|
  auto ThirtyTwo = B.buildConstant(S32, 32);
 | 
						|
  auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
 | 
						|
    .addUse(CvtHi.getReg(0))
 | 
						|
    .addUse(ThirtyTwo.getReg(0));
 | 
						|
 | 
						|
  // TODO: Should this propagate fast-math-flags?
 | 
						|
  B.buildFAdd(Dst, LdExp, CvtLo);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// TODO: Copied from DAG implementation. Verify logic and document how this
 | 
						|
// actually works.
 | 
						|
bool AMDGPULegalizerInfo::legalizeFPTOI(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B, bool Signed) const {
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
 | 
						|
 | 
						|
  unsigned Flags = MI.getFlags();
 | 
						|
 | 
						|
  auto Trunc = B.buildIntrinsicTrunc(S64, Src, Flags);
 | 
						|
  auto K0 = B.buildFConstant(S64, BitsToDouble(UINT64_C(0x3df0000000000000)));
 | 
						|
  auto K1 = B.buildFConstant(S64, BitsToDouble(UINT64_C(0xc1f0000000000000)));
 | 
						|
 | 
						|
  auto Mul = B.buildFMul(S64, Trunc, K0, Flags);
 | 
						|
  auto FloorMul = B.buildFFloor(S64, Mul, Flags);
 | 
						|
  auto Fma = B.buildFMA(S64, FloorMul, K1, Trunc, Flags);
 | 
						|
 | 
						|
  auto Hi = Signed ?
 | 
						|
    B.buildFPTOSI(S32, FloorMul) :
 | 
						|
    B.buildFPTOUI(S32, FloorMul);
 | 
						|
  auto Lo = B.buildFPTOUI(S32, Fma);
 | 
						|
 | 
						|
  B.buildMerge(Dst, { Lo, Hi });
 | 
						|
  MI.eraseFromParent();
 | 
						|
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
 | 
						|
                                               MachineInstr &MI) const {
 | 
						|
  MachineFunction &MF = Helper.MIRBuilder.getMF();
 | 
						|
  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
 | 
						|
 | 
						|
  const bool IsIEEEOp = MI.getOpcode() == AMDGPU::G_FMINNUM_IEEE ||
 | 
						|
                        MI.getOpcode() == AMDGPU::G_FMAXNUM_IEEE;
 | 
						|
 | 
						|
  // With ieee_mode disabled, the instructions have the correct behavior
 | 
						|
  // already for G_FMINNUM/G_FMAXNUM
 | 
						|
  if (!MFI->getMode().IEEE)
 | 
						|
    return !IsIEEEOp;
 | 
						|
 | 
						|
  if (IsIEEEOp)
 | 
						|
    return true;
 | 
						|
 | 
						|
  return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  // TODO: Should move some of this into LegalizerHelper.
 | 
						|
 | 
						|
  // TODO: Promote dynamic indexing of s16 to s32
 | 
						|
 | 
						|
  // FIXME: Artifact combiner probably should have replaced the truncated
 | 
						|
  // constant before this, so we shouldn't need
 | 
						|
  // getConstantVRegValWithLookThrough.
 | 
						|
  Optional<ValueAndVReg> MaybeIdxVal =
 | 
						|
      getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
 | 
						|
  if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
 | 
						|
    return true;
 | 
						|
  const int64_t IdxVal = MaybeIdxVal->Value.getSExtValue();
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Vec = MI.getOperand(1).getReg();
 | 
						|
 | 
						|
  LLT VecTy = MRI.getType(Vec);
 | 
						|
  LLT EltTy = VecTy.getElementType();
 | 
						|
  assert(EltTy == MRI.getType(Dst));
 | 
						|
 | 
						|
  if (IdxVal < VecTy.getNumElements())
 | 
						|
    B.buildExtract(Dst, Vec, IdxVal * EltTy.getSizeInBits());
 | 
						|
  else
 | 
						|
    B.buildUndef(Dst);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  // TODO: Should move some of this into LegalizerHelper.
 | 
						|
 | 
						|
  // TODO: Promote dynamic indexing of s16 to s32
 | 
						|
 | 
						|
  // FIXME: Artifact combiner probably should have replaced the truncated
 | 
						|
  // constant before this, so we shouldn't need
 | 
						|
  // getConstantVRegValWithLookThrough.
 | 
						|
  Optional<ValueAndVReg> MaybeIdxVal =
 | 
						|
      getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
 | 
						|
  if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
 | 
						|
    return true;
 | 
						|
 | 
						|
  int64_t IdxVal = MaybeIdxVal->Value.getSExtValue();
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Vec = MI.getOperand(1).getReg();
 | 
						|
  Register Ins = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
  LLT VecTy = MRI.getType(Vec);
 | 
						|
  LLT EltTy = VecTy.getElementType();
 | 
						|
  assert(EltTy == MRI.getType(Ins));
 | 
						|
 | 
						|
  if (IdxVal < VecTy.getNumElements())
 | 
						|
    B.buildInsert(Dst, Vec, Ins, IdxVal * EltTy.getSizeInBits());
 | 
						|
  else
 | 
						|
    B.buildUndef(Dst);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeShuffleVector(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  const LLT V2S16 = LLT::vector(2, 16);
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src0 = MI.getOperand(1).getReg();
 | 
						|
  LLT DstTy = MRI.getType(Dst);
 | 
						|
  LLT SrcTy = MRI.getType(Src0);
 | 
						|
 | 
						|
  if (SrcTy == V2S16 && DstTy == V2S16 &&
 | 
						|
      AMDGPU::isLegalVOP3PShuffleMask(MI.getOperand(3).getShuffleMask()))
 | 
						|
    return true;
 | 
						|
 | 
						|
  MachineIRBuilder HelperBuilder(MI);
 | 
						|
  GISelObserverWrapper DummyObserver;
 | 
						|
  LegalizerHelper Helper(B.getMF(), DummyObserver, HelperBuilder);
 | 
						|
  return Helper.lowerShuffleVector(MI) == LegalizerHelper::Legalized;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeSinCos(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register SrcReg = MI.getOperand(1).getReg();
 | 
						|
  LLT Ty = MRI.getType(DstReg);
 | 
						|
  unsigned Flags = MI.getFlags();
 | 
						|
 | 
						|
  Register TrigVal;
 | 
						|
  auto OneOver2Pi = B.buildFConstant(Ty, 0.5 * numbers::inv_pi);
 | 
						|
  if (ST.hasTrigReducedRange()) {
 | 
						|
    auto MulVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags);
 | 
						|
    TrigVal = B.buildIntrinsic(Intrinsic::amdgcn_fract, {Ty}, false)
 | 
						|
      .addUse(MulVal.getReg(0))
 | 
						|
      .setMIFlags(Flags).getReg(0);
 | 
						|
  } else
 | 
						|
    TrigVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags).getReg(0);
 | 
						|
 | 
						|
  Intrinsic::ID TrigIntrin = MI.getOpcode() == AMDGPU::G_FSIN ?
 | 
						|
    Intrinsic::amdgcn_sin : Intrinsic::amdgcn_cos;
 | 
						|
  B.buildIntrinsic(TrigIntrin, makeArrayRef<Register>(DstReg), false)
 | 
						|
    .addUse(TrigVal)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::buildPCRelGlobalAddress(Register DstReg, LLT PtrTy,
 | 
						|
                                                  MachineIRBuilder &B,
 | 
						|
                                                  const GlobalValue *GV,
 | 
						|
                                                  int64_t Offset,
 | 
						|
                                                  unsigned GAFlags) const {
 | 
						|
  assert(isInt<32>(Offset + 4) && "32-bit offset is expected!");
 | 
						|
  // In order to support pc-relative addressing, SI_PC_ADD_REL_OFFSET is lowered
 | 
						|
  // to the following code sequence:
 | 
						|
  //
 | 
						|
  // For constant address space:
 | 
						|
  //   s_getpc_b64 s[0:1]
 | 
						|
  //   s_add_u32 s0, s0, $symbol
 | 
						|
  //   s_addc_u32 s1, s1, 0
 | 
						|
  //
 | 
						|
  //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
 | 
						|
  //   a fixup or relocation is emitted to replace $symbol with a literal
 | 
						|
  //   constant, which is a pc-relative offset from the encoding of the $symbol
 | 
						|
  //   operand to the global variable.
 | 
						|
  //
 | 
						|
  // For global address space:
 | 
						|
  //   s_getpc_b64 s[0:1]
 | 
						|
  //   s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
 | 
						|
  //   s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
 | 
						|
  //
 | 
						|
  //   s_getpc_b64 returns the address of the s_add_u32 instruction and then
 | 
						|
  //   fixups or relocations are emitted to replace $symbol@*@lo and
 | 
						|
  //   $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
 | 
						|
  //   which is a 64-bit pc-relative offset from the encoding of the $symbol
 | 
						|
  //   operand to the global variable.
 | 
						|
  //
 | 
						|
  // What we want here is an offset from the value returned by s_getpc
 | 
						|
  // (which is the address of the s_add_u32 instruction) to the global
 | 
						|
  // variable, but since the encoding of $symbol starts 4 bytes after the start
 | 
						|
  // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
 | 
						|
  // small. This requires us to add 4 to the global variable offset in order to
 | 
						|
  // compute the correct address. Similarly for the s_addc_u32 instruction, the
 | 
						|
  // encoding of $symbol starts 12 bytes after the start of the s_add_u32
 | 
						|
  // instruction.
 | 
						|
 | 
						|
  LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
 | 
						|
 | 
						|
  Register PCReg = PtrTy.getSizeInBits() != 32 ? DstReg :
 | 
						|
    B.getMRI()->createGenericVirtualRegister(ConstPtrTy);
 | 
						|
 | 
						|
  MachineInstrBuilder MIB = B.buildInstr(AMDGPU::SI_PC_ADD_REL_OFFSET)
 | 
						|
    .addDef(PCReg);
 | 
						|
 | 
						|
  MIB.addGlobalAddress(GV, Offset + 4, GAFlags);
 | 
						|
  if (GAFlags == SIInstrInfo::MO_NONE)
 | 
						|
    MIB.addImm(0);
 | 
						|
  else
 | 
						|
    MIB.addGlobalAddress(GV, Offset + 12, GAFlags + 1);
 | 
						|
 | 
						|
  B.getMRI()->setRegClass(PCReg, &AMDGPU::SReg_64RegClass);
 | 
						|
 | 
						|
  if (PtrTy.getSizeInBits() == 32)
 | 
						|
    B.buildExtract(DstReg, PCReg, 0);
 | 
						|
  return true;
 | 
						|
 }
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeGlobalValue(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  LLT Ty = MRI.getType(DstReg);
 | 
						|
  unsigned AS = Ty.getAddressSpace();
 | 
						|
 | 
						|
  const GlobalValue *GV = MI.getOperand(1).getGlobal();
 | 
						|
  MachineFunction &MF = B.getMF();
 | 
						|
  SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
 | 
						|
 | 
						|
  if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
 | 
						|
    if (!MFI->isModuleEntryFunction()) {
 | 
						|
      const Function &Fn = MF.getFunction();
 | 
						|
      DiagnosticInfoUnsupported BadLDSDecl(
 | 
						|
        Fn, "local memory global used by non-kernel function", MI.getDebugLoc(),
 | 
						|
        DS_Warning);
 | 
						|
      Fn.getContext().diagnose(BadLDSDecl);
 | 
						|
 | 
						|
      // We currently don't have a way to correctly allocate LDS objects that
 | 
						|
      // aren't directly associated with a kernel. We do force inlining of
 | 
						|
      // functions that use local objects. However, if these dead functions are
 | 
						|
      // not eliminated, we don't want a compile time error. Just emit a warning
 | 
						|
      // and a trap, since there should be no callable path here.
 | 
						|
      B.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>(), true);
 | 
						|
      B.buildUndef(DstReg);
 | 
						|
      MI.eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    // TODO: We could emit code to handle the initialization somewhere.
 | 
						|
    if (!AMDGPUTargetLowering::hasDefinedInitializer(GV)) {
 | 
						|
      const SITargetLowering *TLI = ST.getTargetLowering();
 | 
						|
      if (!TLI->shouldUseLDSConstAddress(GV)) {
 | 
						|
        MI.getOperand(1).setTargetFlags(SIInstrInfo::MO_ABS32_LO);
 | 
						|
        return true; // Leave in place;
 | 
						|
      }
 | 
						|
 | 
						|
      if (AS == AMDGPUAS::LOCAL_ADDRESS && GV->hasExternalLinkage()) {
 | 
						|
        Type *Ty = GV->getValueType();
 | 
						|
        // HIP uses an unsized array `extern __shared__ T s[]` or similar
 | 
						|
        // zero-sized type in other languages to declare the dynamic shared
 | 
						|
        // memory which size is not known at the compile time. They will be
 | 
						|
        // allocated by the runtime and placed directly after the static
 | 
						|
        // allocated ones. They all share the same offset.
 | 
						|
        if (B.getDataLayout().getTypeAllocSize(Ty).isZero()) {
 | 
						|
          // Adjust alignment for that dynamic shared memory array.
 | 
						|
          MFI->setDynLDSAlign(B.getDataLayout(), *cast<GlobalVariable>(GV));
 | 
						|
          LLT S32 = LLT::scalar(32);
 | 
						|
          auto Sz =
 | 
						|
              B.buildIntrinsic(Intrinsic::amdgcn_groupstaticsize, {S32}, false);
 | 
						|
          B.buildIntToPtr(DstReg, Sz);
 | 
						|
          MI.eraseFromParent();
 | 
						|
          return true;
 | 
						|
        }
 | 
						|
      }
 | 
						|
 | 
						|
      B.buildConstant(
 | 
						|
          DstReg,
 | 
						|
          MFI->allocateLDSGlobal(B.getDataLayout(), *cast<GlobalVariable>(GV)));
 | 
						|
      MI.eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    const Function &Fn = MF.getFunction();
 | 
						|
    DiagnosticInfoUnsupported BadInit(
 | 
						|
      Fn, "unsupported initializer for address space", MI.getDebugLoc());
 | 
						|
    Fn.getContext().diagnose(BadInit);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  const SITargetLowering *TLI = ST.getTargetLowering();
 | 
						|
 | 
						|
  if (TLI->shouldEmitFixup(GV)) {
 | 
						|
    buildPCRelGlobalAddress(DstReg, Ty, B, GV, 0);
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (TLI->shouldEmitPCReloc(GV)) {
 | 
						|
    buildPCRelGlobalAddress(DstReg, Ty, B, GV, 0, SIInstrInfo::MO_REL32);
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
 | 
						|
  Register GOTAddr = MRI.createGenericVirtualRegister(PtrTy);
 | 
						|
 | 
						|
  MachineMemOperand *GOTMMO = MF.getMachineMemOperand(
 | 
						|
      MachinePointerInfo::getGOT(MF),
 | 
						|
      MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
 | 
						|
          MachineMemOperand::MOInvariant,
 | 
						|
      8 /*Size*/, Align(8));
 | 
						|
 | 
						|
  buildPCRelGlobalAddress(GOTAddr, PtrTy, B, GV, 0, SIInstrInfo::MO_GOTPCREL32);
 | 
						|
 | 
						|
  if (Ty.getSizeInBits() == 32) {
 | 
						|
    // Truncate if this is a 32-bit constant adrdess.
 | 
						|
    auto Load = B.buildLoad(PtrTy, GOTAddr, *GOTMMO);
 | 
						|
    B.buildExtract(DstReg, Load, 0);
 | 
						|
  } else
 | 
						|
    B.buildLoad(DstReg, GOTAddr, *GOTMMO);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static LLT widenToNextPowerOf2(LLT Ty) {
 | 
						|
  if (Ty.isVector())
 | 
						|
    return Ty.changeNumElements(PowerOf2Ceil(Ty.getNumElements()));
 | 
						|
  return LLT::scalar(PowerOf2Ceil(Ty.getSizeInBits()));
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeLoad(LegalizerHelper &Helper,
 | 
						|
                                       MachineInstr &MI) const {
 | 
						|
  MachineIRBuilder &B = Helper.MIRBuilder;
 | 
						|
  MachineRegisterInfo &MRI = *B.getMRI();
 | 
						|
  GISelChangeObserver &Observer = Helper.Observer;
 | 
						|
 | 
						|
  Register PtrReg = MI.getOperand(1).getReg();
 | 
						|
  LLT PtrTy = MRI.getType(PtrReg);
 | 
						|
  unsigned AddrSpace = PtrTy.getAddressSpace();
 | 
						|
 | 
						|
  if (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
 | 
						|
    LLT ConstPtr = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
 | 
						|
    auto Cast = B.buildAddrSpaceCast(ConstPtr, PtrReg);
 | 
						|
    Observer.changingInstr(MI);
 | 
						|
    MI.getOperand(1).setReg(Cast.getReg(0));
 | 
						|
    Observer.changedInstr(MI);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  Register ValReg = MI.getOperand(0).getReg();
 | 
						|
  LLT ValTy = MRI.getType(ValReg);
 | 
						|
 | 
						|
  MachineMemOperand *MMO = *MI.memoperands_begin();
 | 
						|
  const unsigned ValSize = ValTy.getSizeInBits();
 | 
						|
  const unsigned MemSize = 8 * MMO->getSize();
 | 
						|
  const Align MemAlign = MMO->getAlign();
 | 
						|
  const unsigned AlignInBits = 8 * MemAlign.value();
 | 
						|
 | 
						|
  // Widen non-power-of-2 loads to the alignment if needed
 | 
						|
  if (shouldWidenLoad(ST, MemSize, AlignInBits, AddrSpace, MI.getOpcode())) {
 | 
						|
    const unsigned WideMemSize = PowerOf2Ceil(MemSize);
 | 
						|
 | 
						|
    // This was already the correct extending load result type, so just adjust
 | 
						|
    // the memory type.
 | 
						|
    if (WideMemSize == ValSize) {
 | 
						|
      MachineFunction &MF = B.getMF();
 | 
						|
 | 
						|
      MachineMemOperand *WideMMO =
 | 
						|
          MF.getMachineMemOperand(MMO, 0, WideMemSize / 8);
 | 
						|
      Observer.changingInstr(MI);
 | 
						|
      MI.setMemRefs(MF, {WideMMO});
 | 
						|
      Observer.changedInstr(MI);
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    // Don't bother handling edge case that should probably never be produced.
 | 
						|
    if (ValSize > WideMemSize)
 | 
						|
      return false;
 | 
						|
 | 
						|
    LLT WideTy = widenToNextPowerOf2(ValTy);
 | 
						|
 | 
						|
    Register WideLoad;
 | 
						|
    if (!WideTy.isVector()) {
 | 
						|
      WideLoad = B.buildLoadFromOffset(WideTy, PtrReg, *MMO, 0).getReg(0);
 | 
						|
      B.buildTrunc(ValReg, WideLoad).getReg(0);
 | 
						|
    } else {
 | 
						|
      // Extract the subvector.
 | 
						|
 | 
						|
      if (isRegisterType(ValTy)) {
 | 
						|
        // If this a case where G_EXTRACT is legal, use it.
 | 
						|
        // (e.g. <3 x s32> -> <4 x s32>)
 | 
						|
        WideLoad = B.buildLoadFromOffset(WideTy, PtrReg, *MMO, 0).getReg(0);
 | 
						|
        B.buildExtract(ValReg, WideLoad, 0);
 | 
						|
      } else {
 | 
						|
        // For cases where the widened type isn't a nice register value, unmerge
 | 
						|
        // from a widened register (e.g. <3 x s16> -> <4 x s16>)
 | 
						|
        B.setInsertPt(B.getMBB(), ++B.getInsertPt());
 | 
						|
        WideLoad = Helper.widenWithUnmerge(WideTy, ValReg);
 | 
						|
        B.setInsertPt(B.getMBB(), MI.getIterator());
 | 
						|
        B.buildLoadFromOffset(WideLoad, PtrReg, *MMO, 0);
 | 
						|
      }
 | 
						|
    }
 | 
						|
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  return false;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFMad(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI,
 | 
						|
  MachineIRBuilder &B) const {
 | 
						|
  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
 | 
						|
  assert(Ty.isScalar());
 | 
						|
 | 
						|
  MachineFunction &MF = B.getMF();
 | 
						|
  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
 | 
						|
 | 
						|
  // TODO: Always legal with future ftz flag.
 | 
						|
  // FIXME: Do we need just output?
 | 
						|
  if (Ty == LLT::scalar(32) && !MFI->getMode().allFP32Denormals())
 | 
						|
    return true;
 | 
						|
  if (Ty == LLT::scalar(16) && !MFI->getMode().allFP64FP16Denormals())
 | 
						|
    return true;
 | 
						|
 | 
						|
  MachineIRBuilder HelperBuilder(MI);
 | 
						|
  GISelObserverWrapper DummyObserver;
 | 
						|
  LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
 | 
						|
  return Helper.lowerFMad(MI) == LegalizerHelper::Legalized;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeAtomicCmpXChg(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register PtrReg = MI.getOperand(1).getReg();
 | 
						|
  Register CmpVal = MI.getOperand(2).getReg();
 | 
						|
  Register NewVal = MI.getOperand(3).getReg();
 | 
						|
 | 
						|
  assert(AMDGPU::isFlatGlobalAddrSpace(MRI.getType(PtrReg).getAddressSpace()) &&
 | 
						|
         "this should not have been custom lowered");
 | 
						|
 | 
						|
  LLT ValTy = MRI.getType(CmpVal);
 | 
						|
  LLT VecTy = LLT::vector(2, ValTy);
 | 
						|
 | 
						|
  Register PackedVal = B.buildBuildVector(VecTy, { NewVal, CmpVal }).getReg(0);
 | 
						|
 | 
						|
  B.buildInstr(AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG)
 | 
						|
    .addDef(DstReg)
 | 
						|
    .addUse(PtrReg)
 | 
						|
    .addUse(PackedVal)
 | 
						|
    .setMemRefs(MI.memoperands());
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFlog(
 | 
						|
  MachineInstr &MI, MachineIRBuilder &B, double Log2BaseInverted) const {
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
  LLT Ty = B.getMRI()->getType(Dst);
 | 
						|
  unsigned Flags = MI.getFlags();
 | 
						|
 | 
						|
  auto Log2Operand = B.buildFLog2(Ty, Src, Flags);
 | 
						|
  auto Log2BaseInvertedOperand = B.buildFConstant(Ty, Log2BaseInverted);
 | 
						|
 | 
						|
  B.buildFMul(Dst, Log2Operand, Log2BaseInvertedOperand, Flags);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFExp(MachineInstr &MI,
 | 
						|
                                       MachineIRBuilder &B) const {
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src = MI.getOperand(1).getReg();
 | 
						|
  unsigned Flags = MI.getFlags();
 | 
						|
  LLT Ty = B.getMRI()->getType(Dst);
 | 
						|
 | 
						|
  auto K = B.buildFConstant(Ty, numbers::log2e);
 | 
						|
  auto Mul = B.buildFMul(Ty, Src, K, Flags);
 | 
						|
  B.buildFExp2(Dst, Mul, Flags);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFPow(MachineInstr &MI,
 | 
						|
                                       MachineIRBuilder &B) const {
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src0 = MI.getOperand(1).getReg();
 | 
						|
  Register Src1 = MI.getOperand(2).getReg();
 | 
						|
  unsigned Flags = MI.getFlags();
 | 
						|
  LLT Ty = B.getMRI()->getType(Dst);
 | 
						|
  const LLT S16 = LLT::scalar(16);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  if (Ty == S32) {
 | 
						|
    auto Log = B.buildFLog2(S32, Src0, Flags);
 | 
						|
    auto Mul = B.buildIntrinsic(Intrinsic::amdgcn_fmul_legacy, {S32}, false)
 | 
						|
      .addUse(Log.getReg(0))
 | 
						|
      .addUse(Src1)
 | 
						|
      .setMIFlags(Flags);
 | 
						|
    B.buildFExp2(Dst, Mul, Flags);
 | 
						|
  } else if (Ty == S16) {
 | 
						|
    // There's no f16 fmul_legacy, so we need to convert for it.
 | 
						|
    auto Log = B.buildFLog2(S16, Src0, Flags);
 | 
						|
    auto Ext0 = B.buildFPExt(S32, Log, Flags);
 | 
						|
    auto Ext1 = B.buildFPExt(S32, Src1, Flags);
 | 
						|
    auto Mul = B.buildIntrinsic(Intrinsic::amdgcn_fmul_legacy, {S32}, false)
 | 
						|
      .addUse(Ext0.getReg(0))
 | 
						|
      .addUse(Ext1.getReg(0))
 | 
						|
      .setMIFlags(Flags);
 | 
						|
 | 
						|
    B.buildFExp2(Dst, B.buildFPTrunc(S16, Mul), Flags);
 | 
						|
  } else
 | 
						|
    return false;
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// Find a source register, ignoring any possible source modifiers.
 | 
						|
static Register stripAnySourceMods(Register OrigSrc, MachineRegisterInfo &MRI) {
 | 
						|
  Register ModSrc = OrigSrc;
 | 
						|
  if (MachineInstr *SrcFNeg = getOpcodeDef(AMDGPU::G_FNEG, ModSrc, MRI)) {
 | 
						|
    ModSrc = SrcFNeg->getOperand(1).getReg();
 | 
						|
    if (MachineInstr *SrcFAbs = getOpcodeDef(AMDGPU::G_FABS, ModSrc, MRI))
 | 
						|
      ModSrc = SrcFAbs->getOperand(1).getReg();
 | 
						|
  } else if (MachineInstr *SrcFAbs = getOpcodeDef(AMDGPU::G_FABS, ModSrc, MRI))
 | 
						|
    ModSrc = SrcFAbs->getOperand(1).getReg();
 | 
						|
  return ModSrc;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFFloor(MachineInstr &MI,
 | 
						|
                                         MachineRegisterInfo &MRI,
 | 
						|
                                         MachineIRBuilder &B) const {
 | 
						|
 | 
						|
  const LLT S1 = LLT::scalar(1);
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register OrigSrc = MI.getOperand(1).getReg();
 | 
						|
  unsigned Flags = MI.getFlags();
 | 
						|
  assert(ST.hasFractBug() && MRI.getType(Dst) == S64 &&
 | 
						|
         "this should not have been custom lowered");
 | 
						|
 | 
						|
  // V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x))
 | 
						|
  // is used instead. However, SI doesn't have V_FLOOR_F64, so the most
 | 
						|
  // efficient way to implement it is using V_FRACT_F64. The workaround for the
 | 
						|
  // V_FRACT bug is:
 | 
						|
  //    fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
 | 
						|
  //
 | 
						|
  // Convert floor(x) to (x - fract(x))
 | 
						|
 | 
						|
  auto Fract = B.buildIntrinsic(Intrinsic::amdgcn_fract, {S64}, false)
 | 
						|
    .addUse(OrigSrc)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  // Give source modifier matching some assistance before obscuring a foldable
 | 
						|
  // pattern.
 | 
						|
 | 
						|
  // TODO: We can avoid the neg on the fract? The input sign to fract
 | 
						|
  // shouldn't matter?
 | 
						|
  Register ModSrc = stripAnySourceMods(OrigSrc, MRI);
 | 
						|
 | 
						|
  auto Const = B.buildFConstant(S64, BitsToDouble(0x3fefffffffffffff));
 | 
						|
 | 
						|
  Register Min = MRI.createGenericVirtualRegister(S64);
 | 
						|
 | 
						|
  // We don't need to concern ourselves with the snan handling difference, so
 | 
						|
  // use the one which will directly select.
 | 
						|
  const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
 | 
						|
  if (MFI->getMode().IEEE)
 | 
						|
    B.buildFMinNumIEEE(Min, Fract, Const, Flags);
 | 
						|
  else
 | 
						|
    B.buildFMinNum(Min, Fract, Const, Flags);
 | 
						|
 | 
						|
  Register CorrectedFract = Min;
 | 
						|
  if (!MI.getFlag(MachineInstr::FmNoNans)) {
 | 
						|
    auto IsNan = B.buildFCmp(CmpInst::FCMP_ORD, S1, ModSrc, ModSrc, Flags);
 | 
						|
    CorrectedFract = B.buildSelect(S64, IsNan, ModSrc, Min, Flags).getReg(0);
 | 
						|
  }
 | 
						|
 | 
						|
  auto NegFract = B.buildFNeg(S64, CorrectedFract, Flags);
 | 
						|
  B.buildFAdd(Dst, OrigSrc, NegFract, Flags);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// Turn an illegal packed v2s16 build vector into bit operations.
 | 
						|
// TODO: This should probably be a bitcast action in LegalizerHelper.
 | 
						|
bool AMDGPULegalizerInfo::legalizeBuildVector(
 | 
						|
  MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  assert(MRI.getType(Dst) == LLT::vector(2, 16));
 | 
						|
 | 
						|
  Register Src0 = MI.getOperand(1).getReg();
 | 
						|
  Register Src1 = MI.getOperand(2).getReg();
 | 
						|
  assert(MRI.getType(Src0) == LLT::scalar(16));
 | 
						|
 | 
						|
  auto Merge = B.buildMerge(S32, {Src0, Src1});
 | 
						|
  B.buildBitcast(Dst, Merge);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// Check that this is a G_XOR x, -1
 | 
						|
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) {
 | 
						|
  if (MI.getOpcode() != TargetOpcode::G_XOR)
 | 
						|
    return false;
 | 
						|
  auto ConstVal = getConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI);
 | 
						|
  return ConstVal && *ConstVal == -1;
 | 
						|
}
 | 
						|
 | 
						|
// Return the use branch instruction, otherwise null if the usage is invalid.
 | 
						|
static MachineInstr *
 | 
						|
verifyCFIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI, MachineInstr *&Br,
 | 
						|
                  MachineBasicBlock *&UncondBrTarget, bool &Negated) {
 | 
						|
  Register CondDef = MI.getOperand(0).getReg();
 | 
						|
  if (!MRI.hasOneNonDBGUse(CondDef))
 | 
						|
    return nullptr;
 | 
						|
 | 
						|
  MachineBasicBlock *Parent = MI.getParent();
 | 
						|
  MachineInstr *UseMI = &*MRI.use_instr_nodbg_begin(CondDef);
 | 
						|
 | 
						|
  if (isNot(MRI, *UseMI)) {
 | 
						|
    Register NegatedCond = UseMI->getOperand(0).getReg();
 | 
						|
    if (!MRI.hasOneNonDBGUse(NegatedCond))
 | 
						|
      return nullptr;
 | 
						|
 | 
						|
    // We're deleting the def of this value, so we need to remove it.
 | 
						|
    UseMI->eraseFromParent();
 | 
						|
 | 
						|
    UseMI = &*MRI.use_instr_nodbg_begin(NegatedCond);
 | 
						|
    Negated = true;
 | 
						|
  }
 | 
						|
 | 
						|
  if (UseMI->getParent() != Parent || UseMI->getOpcode() != AMDGPU::G_BRCOND)
 | 
						|
    return nullptr;
 | 
						|
 | 
						|
  // Make sure the cond br is followed by a G_BR, or is the last instruction.
 | 
						|
  MachineBasicBlock::iterator Next = std::next(UseMI->getIterator());
 | 
						|
  if (Next == Parent->end()) {
 | 
						|
    MachineFunction::iterator NextMBB = std::next(Parent->getIterator());
 | 
						|
    if (NextMBB == Parent->getParent()->end()) // Illegal intrinsic use.
 | 
						|
      return nullptr;
 | 
						|
    UncondBrTarget = &*NextMBB;
 | 
						|
  } else {
 | 
						|
    if (Next->getOpcode() != AMDGPU::G_BR)
 | 
						|
      return nullptr;
 | 
						|
    Br = &*Next;
 | 
						|
    UncondBrTarget = Br->getOperand(0).getMBB();
 | 
						|
  }
 | 
						|
 | 
						|
  return UseMI;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
 | 
						|
                                         const ArgDescriptor *Arg,
 | 
						|
                                         const TargetRegisterClass *ArgRC,
 | 
						|
                                         LLT ArgTy) const {
 | 
						|
  MCRegister SrcReg = Arg->getRegister();
 | 
						|
  assert(Register::isPhysicalRegister(SrcReg) && "Physical register expected");
 | 
						|
  assert(DstReg.isVirtual() && "Virtual register expected");
 | 
						|
 | 
						|
  Register LiveIn = getFunctionLiveInPhysReg(B.getMF(), B.getTII(), SrcReg, *ArgRC,
 | 
						|
                                             ArgTy);
 | 
						|
  if (Arg->isMasked()) {
 | 
						|
    // TODO: Should we try to emit this once in the entry block?
 | 
						|
    const LLT S32 = LLT::scalar(32);
 | 
						|
    const unsigned Mask = Arg->getMask();
 | 
						|
    const unsigned Shift = countTrailingZeros<unsigned>(Mask);
 | 
						|
 | 
						|
    Register AndMaskSrc = LiveIn;
 | 
						|
 | 
						|
    if (Shift != 0) {
 | 
						|
      auto ShiftAmt = B.buildConstant(S32, Shift);
 | 
						|
      AndMaskSrc = B.buildLShr(S32, LiveIn, ShiftAmt).getReg(0);
 | 
						|
    }
 | 
						|
 | 
						|
    B.buildAnd(DstReg, AndMaskSrc, B.buildConstant(S32, Mask >> Shift));
 | 
						|
  } else {
 | 
						|
    B.buildCopy(DstReg, LiveIn);
 | 
						|
  }
 | 
						|
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::loadInputValue(
 | 
						|
    Register DstReg, MachineIRBuilder &B,
 | 
						|
    AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
 | 
						|
  const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
 | 
						|
  const ArgDescriptor *Arg;
 | 
						|
  const TargetRegisterClass *ArgRC;
 | 
						|
  LLT ArgTy;
 | 
						|
  std::tie(Arg, ArgRC, ArgTy) = MFI->getPreloadedValue(ArgType);
 | 
						|
 | 
						|
  if (!Arg->isRegister() || !Arg->getRegister().isValid())
 | 
						|
    return false; // TODO: Handle these
 | 
						|
  return loadInputValue(DstReg, B, Arg, ArgRC, ArgTy);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
 | 
						|
    MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
 | 
						|
    AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
 | 
						|
  if (!loadInputValue(MI.getOperand(0).getReg(), B, ArgType))
 | 
						|
    return false;
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFDIV(MachineInstr &MI,
 | 
						|
                                       MachineRegisterInfo &MRI,
 | 
						|
                                       MachineIRBuilder &B) const {
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  LLT DstTy = MRI.getType(Dst);
 | 
						|
  LLT S16 = LLT::scalar(16);
 | 
						|
  LLT S32 = LLT::scalar(32);
 | 
						|
  LLT S64 = LLT::scalar(64);
 | 
						|
 | 
						|
  if (DstTy == S16)
 | 
						|
    return legalizeFDIV16(MI, MRI, B);
 | 
						|
  if (DstTy == S32)
 | 
						|
    return legalizeFDIV32(MI, MRI, B);
 | 
						|
  if (DstTy == S64)
 | 
						|
    return legalizeFDIV64(MI, MRI, B);
 | 
						|
 | 
						|
  return false;
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPULegalizerInfo::legalizeUDIV_UREM32Impl(MachineIRBuilder &B,
 | 
						|
                                                  Register DstReg,
 | 
						|
                                                  Register X,
 | 
						|
                                                  Register Y,
 | 
						|
                                                  bool IsDiv) const {
 | 
						|
  const LLT S1 = LLT::scalar(1);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
 | 
						|
  // algorithm used here.
 | 
						|
 | 
						|
  // Initial estimate of inv(y).
 | 
						|
  auto FloatY = B.buildUITOFP(S32, Y);
 | 
						|
  auto RcpIFlag = B.buildInstr(AMDGPU::G_AMDGPU_RCP_IFLAG, {S32}, {FloatY});
 | 
						|
  auto Scale = B.buildFConstant(S32, BitsToFloat(0x4f7ffffe));
 | 
						|
  auto ScaledY = B.buildFMul(S32, RcpIFlag, Scale);
 | 
						|
  auto Z = B.buildFPTOUI(S32, ScaledY);
 | 
						|
 | 
						|
  // One round of UNR.
 | 
						|
  auto NegY = B.buildSub(S32, B.buildConstant(S32, 0), Y);
 | 
						|
  auto NegYZ = B.buildMul(S32, NegY, Z);
 | 
						|
  Z = B.buildAdd(S32, Z, B.buildUMulH(S32, Z, NegYZ));
 | 
						|
 | 
						|
  // Quotient/remainder estimate.
 | 
						|
  auto Q = B.buildUMulH(S32, X, Z);
 | 
						|
  auto R = B.buildSub(S32, X, B.buildMul(S32, Q, Y));
 | 
						|
 | 
						|
  // First quotient/remainder refinement.
 | 
						|
  auto One = B.buildConstant(S32, 1);
 | 
						|
  auto Cond = B.buildICmp(CmpInst::ICMP_UGE, S1, R, Y);
 | 
						|
  if (IsDiv)
 | 
						|
    Q = B.buildSelect(S32, Cond, B.buildAdd(S32, Q, One), Q);
 | 
						|
  R = B.buildSelect(S32, Cond, B.buildSub(S32, R, Y), R);
 | 
						|
 | 
						|
  // Second quotient/remainder refinement.
 | 
						|
  Cond = B.buildICmp(CmpInst::ICMP_UGE, S1, R, Y);
 | 
						|
  if (IsDiv)
 | 
						|
    B.buildSelect(DstReg, Cond, B.buildAdd(S32, Q, One), Q);
 | 
						|
  else
 | 
						|
    B.buildSelect(DstReg, Cond, B.buildSub(S32, R, Y), R);
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeUDIV_UREM32(MachineInstr &MI,
 | 
						|
                                              MachineRegisterInfo &MRI,
 | 
						|
                                              MachineIRBuilder &B) const {
 | 
						|
  const bool IsDiv = MI.getOpcode() == AMDGPU::G_UDIV;
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register Num = MI.getOperand(1).getReg();
 | 
						|
  Register Den = MI.getOperand(2).getReg();
 | 
						|
  legalizeUDIV_UREM32Impl(B, DstReg, Num, Den, IsDiv);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// Build integer reciprocal sequence arounud V_RCP_IFLAG_F32
 | 
						|
//
 | 
						|
// Return lo, hi of result
 | 
						|
//
 | 
						|
// %cvt.lo = G_UITOFP Val.lo
 | 
						|
// %cvt.hi = G_UITOFP Val.hi
 | 
						|
// %mad = G_FMAD %cvt.hi, 2**32, %cvt.lo
 | 
						|
// %rcp = G_AMDGPU_RCP_IFLAG %mad
 | 
						|
// %mul1 = G_FMUL %rcp, 0x5f7ffffc
 | 
						|
// %mul2 = G_FMUL %mul1, 2**(-32)
 | 
						|
// %trunc = G_INTRINSIC_TRUNC %mul2
 | 
						|
// %mad2 = G_FMAD %trunc, -(2**32), %mul1
 | 
						|
// return {G_FPTOUI %mad2, G_FPTOUI %trunc}
 | 
						|
static std::pair<Register, Register> emitReciprocalU64(MachineIRBuilder &B,
 | 
						|
                                                       Register Val) {
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  auto Unmerge = B.buildUnmerge(S32, Val);
 | 
						|
 | 
						|
  auto CvtLo = B.buildUITOFP(S32, Unmerge.getReg(0));
 | 
						|
  auto CvtHi = B.buildUITOFP(S32, Unmerge.getReg(1));
 | 
						|
 | 
						|
  auto Mad = B.buildFMAD(S32, CvtHi, // 2**32
 | 
						|
                         B.buildFConstant(S32, BitsToFloat(0x4f800000)), CvtLo);
 | 
						|
 | 
						|
  auto Rcp = B.buildInstr(AMDGPU::G_AMDGPU_RCP_IFLAG, {S32}, {Mad});
 | 
						|
  auto Mul1 =
 | 
						|
      B.buildFMul(S32, Rcp, B.buildFConstant(S32, BitsToFloat(0x5f7ffffc)));
 | 
						|
 | 
						|
  // 2**(-32)
 | 
						|
  auto Mul2 =
 | 
						|
      B.buildFMul(S32, Mul1, B.buildFConstant(S32, BitsToFloat(0x2f800000)));
 | 
						|
  auto Trunc = B.buildIntrinsicTrunc(S32, Mul2);
 | 
						|
 | 
						|
  // -(2**32)
 | 
						|
  auto Mad2 = B.buildFMAD(S32, Trunc,
 | 
						|
                          B.buildFConstant(S32, BitsToFloat(0xcf800000)), Mul1);
 | 
						|
 | 
						|
  auto ResultLo = B.buildFPTOUI(S32, Mad2);
 | 
						|
  auto ResultHi = B.buildFPTOUI(S32, Trunc);
 | 
						|
 | 
						|
  return {ResultLo.getReg(0), ResultHi.getReg(0)};
 | 
						|
}
 | 
						|
 | 
						|
void AMDGPULegalizerInfo::legalizeUDIV_UREM64Impl(MachineIRBuilder &B,
 | 
						|
                                                  Register DstReg,
 | 
						|
                                                  Register Numer,
 | 
						|
                                                  Register Denom,
 | 
						|
                                                  bool IsDiv) const {
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
  const LLT S1 = LLT::scalar(1);
 | 
						|
  Register RcpLo, RcpHi;
 | 
						|
 | 
						|
  std::tie(RcpLo, RcpHi) = emitReciprocalU64(B, Denom);
 | 
						|
 | 
						|
  auto Rcp = B.buildMerge(S64, {RcpLo, RcpHi});
 | 
						|
 | 
						|
  auto Zero64 = B.buildConstant(S64, 0);
 | 
						|
  auto NegDenom = B.buildSub(S64, Zero64, Denom);
 | 
						|
 | 
						|
  auto MulLo1 = B.buildMul(S64, NegDenom, Rcp);
 | 
						|
  auto MulHi1 = B.buildUMulH(S64, Rcp, MulLo1);
 | 
						|
 | 
						|
  auto UnmergeMulHi1 = B.buildUnmerge(S32, MulHi1);
 | 
						|
  Register MulHi1_Lo = UnmergeMulHi1.getReg(0);
 | 
						|
  Register MulHi1_Hi = UnmergeMulHi1.getReg(1);
 | 
						|
 | 
						|
  auto Add1_Lo = B.buildUAddo(S32, S1, RcpLo, MulHi1_Lo);
 | 
						|
  auto Add1_Hi = B.buildUAdde(S32, S1, RcpHi, MulHi1_Hi, Add1_Lo.getReg(1));
 | 
						|
  auto Add1_HiNc = B.buildAdd(S32, RcpHi, MulHi1_Hi);
 | 
						|
  auto Add1 = B.buildMerge(S64, {Add1_Lo, Add1_Hi});
 | 
						|
 | 
						|
  auto MulLo2 = B.buildMul(S64, NegDenom, Add1);
 | 
						|
  auto MulHi2 = B.buildUMulH(S64, Add1, MulLo2);
 | 
						|
  auto UnmergeMulHi2 = B.buildUnmerge(S32, MulHi2);
 | 
						|
  Register MulHi2_Lo = UnmergeMulHi2.getReg(0);
 | 
						|
  Register MulHi2_Hi = UnmergeMulHi2.getReg(1);
 | 
						|
 | 
						|
  auto Zero32 = B.buildConstant(S32, 0);
 | 
						|
  auto Add2_Lo = B.buildUAddo(S32, S1, Add1_Lo, MulHi2_Lo);
 | 
						|
  auto Add2_HiC =
 | 
						|
      B.buildUAdde(S32, S1, Add1_HiNc, MulHi2_Hi, Add1_Lo.getReg(1));
 | 
						|
  auto Add2_Hi = B.buildUAdde(S32, S1, Add2_HiC, Zero32, Add2_Lo.getReg(1));
 | 
						|
  auto Add2 = B.buildMerge(S64, {Add2_Lo, Add2_Hi});
 | 
						|
 | 
						|
  auto UnmergeNumer = B.buildUnmerge(S32, Numer);
 | 
						|
  Register NumerLo = UnmergeNumer.getReg(0);
 | 
						|
  Register NumerHi = UnmergeNumer.getReg(1);
 | 
						|
 | 
						|
  auto MulHi3 = B.buildUMulH(S64, Numer, Add2);
 | 
						|
  auto Mul3 = B.buildMul(S64, Denom, MulHi3);
 | 
						|
  auto UnmergeMul3 = B.buildUnmerge(S32, Mul3);
 | 
						|
  Register Mul3_Lo = UnmergeMul3.getReg(0);
 | 
						|
  Register Mul3_Hi = UnmergeMul3.getReg(1);
 | 
						|
  auto Sub1_Lo = B.buildUSubo(S32, S1, NumerLo, Mul3_Lo);
 | 
						|
  auto Sub1_Hi = B.buildUSube(S32, S1, NumerHi, Mul3_Hi, Sub1_Lo.getReg(1));
 | 
						|
  auto Sub1_Mi = B.buildSub(S32, NumerHi, Mul3_Hi);
 | 
						|
  auto Sub1 = B.buildMerge(S64, {Sub1_Lo, Sub1_Hi});
 | 
						|
 | 
						|
  auto UnmergeDenom = B.buildUnmerge(S32, Denom);
 | 
						|
  Register DenomLo = UnmergeDenom.getReg(0);
 | 
						|
  Register DenomHi = UnmergeDenom.getReg(1);
 | 
						|
 | 
						|
  auto CmpHi = B.buildICmp(CmpInst::ICMP_UGE, S1, Sub1_Hi, DenomHi);
 | 
						|
  auto C1 = B.buildSExt(S32, CmpHi);
 | 
						|
 | 
						|
  auto CmpLo = B.buildICmp(CmpInst::ICMP_UGE, S1, Sub1_Lo, DenomLo);
 | 
						|
  auto C2 = B.buildSExt(S32, CmpLo);
 | 
						|
 | 
						|
  auto CmpEq = B.buildICmp(CmpInst::ICMP_EQ, S1, Sub1_Hi, DenomHi);
 | 
						|
  auto C3 = B.buildSelect(S32, CmpEq, C2, C1);
 | 
						|
 | 
						|
  // TODO: Here and below portions of the code can be enclosed into if/endif.
 | 
						|
  // Currently control flow is unconditional and we have 4 selects after
 | 
						|
  // potential endif to substitute PHIs.
 | 
						|
 | 
						|
  // if C3 != 0 ...
 | 
						|
  auto Sub2_Lo = B.buildUSubo(S32, S1, Sub1_Lo, DenomLo);
 | 
						|
  auto Sub2_Mi = B.buildUSube(S32, S1, Sub1_Mi, DenomHi, Sub1_Lo.getReg(1));
 | 
						|
  auto Sub2_Hi = B.buildUSube(S32, S1, Sub2_Mi, Zero32, Sub2_Lo.getReg(1));
 | 
						|
  auto Sub2 = B.buildMerge(S64, {Sub2_Lo, Sub2_Hi});
 | 
						|
 | 
						|
  auto One64 = B.buildConstant(S64, 1);
 | 
						|
  auto Add3 = B.buildAdd(S64, MulHi3, One64);
 | 
						|
 | 
						|
  auto C4 =
 | 
						|
      B.buildSExt(S32, B.buildICmp(CmpInst::ICMP_UGE, S1, Sub2_Hi, DenomHi));
 | 
						|
  auto C5 =
 | 
						|
      B.buildSExt(S32, B.buildICmp(CmpInst::ICMP_UGE, S1, Sub2_Lo, DenomLo));
 | 
						|
  auto C6 = B.buildSelect(
 | 
						|
      S32, B.buildICmp(CmpInst::ICMP_EQ, S1, Sub2_Hi, DenomHi), C5, C4);
 | 
						|
 | 
						|
  // if (C6 != 0)
 | 
						|
  auto Add4 = B.buildAdd(S64, Add3, One64);
 | 
						|
  auto Sub3_Lo = B.buildUSubo(S32, S1, Sub2_Lo, DenomLo);
 | 
						|
 | 
						|
  auto Sub3_Mi = B.buildUSube(S32, S1, Sub2_Mi, DenomHi, Sub2_Lo.getReg(1));
 | 
						|
  auto Sub3_Hi = B.buildUSube(S32, S1, Sub3_Mi, Zero32, Sub3_Lo.getReg(1));
 | 
						|
  auto Sub3 = B.buildMerge(S64, {Sub3_Lo, Sub3_Hi});
 | 
						|
 | 
						|
  // endif C6
 | 
						|
  // endif C3
 | 
						|
 | 
						|
  if (IsDiv) {
 | 
						|
    auto Sel1 = B.buildSelect(
 | 
						|
        S64, B.buildICmp(CmpInst::ICMP_NE, S1, C6, Zero32), Add4, Add3);
 | 
						|
    B.buildSelect(DstReg,
 | 
						|
                  B.buildICmp(CmpInst::ICMP_NE, S1, C3, Zero32), Sel1, MulHi3);
 | 
						|
  } else {
 | 
						|
    auto Sel2 = B.buildSelect(
 | 
						|
        S64, B.buildICmp(CmpInst::ICMP_NE, S1, C6, Zero32), Sub3, Sub2);
 | 
						|
    B.buildSelect(DstReg,
 | 
						|
                  B.buildICmp(CmpInst::ICMP_NE, S1, C3, Zero32), Sel2, Sub1);
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeUDIV_UREM(MachineInstr &MI,
 | 
						|
                                            MachineRegisterInfo &MRI,
 | 
						|
                                            MachineIRBuilder &B) const {
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  const bool IsDiv = MI.getOpcode() == AMDGPU::G_UDIV;
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register Num = MI.getOperand(1).getReg();
 | 
						|
  Register Den = MI.getOperand(2).getReg();
 | 
						|
  LLT Ty = MRI.getType(DstReg);
 | 
						|
 | 
						|
  if (Ty == S32)
 | 
						|
    legalizeUDIV_UREM32Impl(B, DstReg, Num, Den, IsDiv);
 | 
						|
  else if (Ty == S64)
 | 
						|
    legalizeUDIV_UREM64Impl(B, DstReg, Num, Den, IsDiv);
 | 
						|
  else
 | 
						|
    return false;
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeSDIV_SREM(MachineInstr &MI,
 | 
						|
                                            MachineRegisterInfo &MRI,
 | 
						|
                                            MachineIRBuilder &B) const {
 | 
						|
  const LLT S64 = LLT::scalar(64);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  const LLT Ty = MRI.getType(DstReg);
 | 
						|
  if (Ty != S32 && Ty != S64)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const bool IsDiv = MI.getOpcode() == AMDGPU::G_SDIV;
 | 
						|
 | 
						|
  Register LHS = MI.getOperand(1).getReg();
 | 
						|
  Register RHS = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
  auto SignBitOffset = B.buildConstant(S32, Ty.getSizeInBits() - 1);
 | 
						|
  auto LHSign = B.buildAShr(Ty, LHS, SignBitOffset);
 | 
						|
  auto RHSign = B.buildAShr(Ty, RHS, SignBitOffset);
 | 
						|
 | 
						|
  LHS = B.buildAdd(Ty, LHS, LHSign).getReg(0);
 | 
						|
  RHS = B.buildAdd(Ty, RHS, RHSign).getReg(0);
 | 
						|
 | 
						|
  LHS = B.buildXor(Ty, LHS, LHSign).getReg(0);
 | 
						|
  RHS = B.buildXor(Ty, RHS, RHSign).getReg(0);
 | 
						|
 | 
						|
  Register UDivRem = MRI.createGenericVirtualRegister(Ty);
 | 
						|
  if (Ty == S32)
 | 
						|
    legalizeUDIV_UREM32Impl(B, UDivRem, LHS, RHS, IsDiv);
 | 
						|
  else
 | 
						|
    legalizeUDIV_UREM64Impl(B, UDivRem, LHS, RHS, IsDiv);
 | 
						|
 | 
						|
  Register Sign;
 | 
						|
  if (IsDiv)
 | 
						|
    Sign = B.buildXor(Ty, LHSign, RHSign).getReg(0);
 | 
						|
  else
 | 
						|
    Sign = LHSign.getReg(0); // Remainder sign is the same as LHS
 | 
						|
 | 
						|
  UDivRem = B.buildXor(Ty, UDivRem, Sign).getReg(0);
 | 
						|
  B.buildSub(DstReg, UDivRem, Sign);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
 | 
						|
                                                 MachineRegisterInfo &MRI,
 | 
						|
                                                 MachineIRBuilder &B) const {
 | 
						|
  Register Res = MI.getOperand(0).getReg();
 | 
						|
  Register LHS = MI.getOperand(1).getReg();
 | 
						|
  Register RHS = MI.getOperand(2).getReg();
 | 
						|
  uint16_t Flags = MI.getFlags();
 | 
						|
  LLT ResTy = MRI.getType(Res);
 | 
						|
 | 
						|
  const MachineFunction &MF = B.getMF();
 | 
						|
  bool AllowInaccurateRcp = MF.getTarget().Options.UnsafeFPMath ||
 | 
						|
                            MI.getFlag(MachineInstr::FmAfn);
 | 
						|
 | 
						|
  if (!AllowInaccurateRcp)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (auto CLHS = getConstantFPVRegVal(LHS, MRI)) {
 | 
						|
    // 1 / x -> RCP(x)
 | 
						|
    if (CLHS->isExactlyValue(1.0)) {
 | 
						|
      B.buildIntrinsic(Intrinsic::amdgcn_rcp, Res, false)
 | 
						|
        .addUse(RHS)
 | 
						|
        .setMIFlags(Flags);
 | 
						|
 | 
						|
      MI.eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    // -1 / x -> RCP( FNEG(x) )
 | 
						|
    if (CLHS->isExactlyValue(-1.0)) {
 | 
						|
      auto FNeg = B.buildFNeg(ResTy, RHS, Flags);
 | 
						|
      B.buildIntrinsic(Intrinsic::amdgcn_rcp, Res, false)
 | 
						|
        .addUse(FNeg.getReg(0))
 | 
						|
        .setMIFlags(Flags);
 | 
						|
 | 
						|
      MI.eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // x / y -> x * (1.0 / y)
 | 
						|
  auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {ResTy}, false)
 | 
						|
    .addUse(RHS)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
  B.buildFMul(Res, LHS, RCP, Flags);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV64(MachineInstr &MI,
 | 
						|
                                                   MachineRegisterInfo &MRI,
 | 
						|
                                                   MachineIRBuilder &B) const {
 | 
						|
  Register Res = MI.getOperand(0).getReg();
 | 
						|
  Register X = MI.getOperand(1).getReg();
 | 
						|
  Register Y = MI.getOperand(2).getReg();
 | 
						|
  uint16_t Flags = MI.getFlags();
 | 
						|
  LLT ResTy = MRI.getType(Res);
 | 
						|
 | 
						|
  const MachineFunction &MF = B.getMF();
 | 
						|
  bool AllowInaccurateRcp = MF.getTarget().Options.UnsafeFPMath ||
 | 
						|
                            MI.getFlag(MachineInstr::FmAfn);
 | 
						|
 | 
						|
  if (!AllowInaccurateRcp)
 | 
						|
    return false;
 | 
						|
 | 
						|
  auto NegY = B.buildFNeg(ResTy, Y);
 | 
						|
  auto One = B.buildFConstant(ResTy, 1.0);
 | 
						|
 | 
						|
  auto R = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {ResTy}, false)
 | 
						|
    .addUse(Y)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  auto Tmp0 = B.buildFMA(ResTy, NegY, R, One);
 | 
						|
  R = B.buildFMA(ResTy, Tmp0, R, R);
 | 
						|
 | 
						|
  auto Tmp1 = B.buildFMA(ResTy, NegY, R, One);
 | 
						|
  R = B.buildFMA(ResTy, Tmp1, R, R);
 | 
						|
 | 
						|
  auto Ret = B.buildFMul(ResTy, X, R);
 | 
						|
  auto Tmp2 = B.buildFMA(ResTy, NegY, Ret, X);
 | 
						|
 | 
						|
  B.buildFMA(Res, Tmp2, R, Ret);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFDIV16(MachineInstr &MI,
 | 
						|
                                         MachineRegisterInfo &MRI,
 | 
						|
                                         MachineIRBuilder &B) const {
 | 
						|
  if (legalizeFastUnsafeFDIV(MI, MRI, B))
 | 
						|
    return true;
 | 
						|
 | 
						|
  Register Res = MI.getOperand(0).getReg();
 | 
						|
  Register LHS = MI.getOperand(1).getReg();
 | 
						|
  Register RHS = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
  uint16_t Flags = MI.getFlags();
 | 
						|
 | 
						|
  LLT S16 = LLT::scalar(16);
 | 
						|
  LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  auto LHSExt = B.buildFPExt(S32, LHS, Flags);
 | 
						|
  auto RHSExt = B.buildFPExt(S32, RHS, Flags);
 | 
						|
 | 
						|
  auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
 | 
						|
    .addUse(RHSExt.getReg(0))
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  auto QUOT = B.buildFMul(S32, LHSExt, RCP, Flags);
 | 
						|
  auto RDst = B.buildFPTrunc(S16, QUOT, Flags);
 | 
						|
 | 
						|
  B.buildIntrinsic(Intrinsic::amdgcn_div_fixup, Res, false)
 | 
						|
    .addUse(RDst.getReg(0))
 | 
						|
    .addUse(RHS)
 | 
						|
    .addUse(LHS)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// Enable or disable FP32 denorm mode. When 'Enable' is true, emit instructions
 | 
						|
// to enable denorm mode. When 'Enable' is false, disable denorm mode.
 | 
						|
static void toggleSPDenormMode(bool Enable,
 | 
						|
                               MachineIRBuilder &B,
 | 
						|
                               const GCNSubtarget &ST,
 | 
						|
                               AMDGPU::SIModeRegisterDefaults Mode) {
 | 
						|
  // Set SP denorm mode to this value.
 | 
						|
  unsigned SPDenormMode =
 | 
						|
    Enable ? FP_DENORM_FLUSH_NONE : Mode.fpDenormModeSPValue();
 | 
						|
 | 
						|
  if (ST.hasDenormModeInst()) {
 | 
						|
    // Preserve default FP64FP16 denorm mode while updating FP32 mode.
 | 
						|
    uint32_t DPDenormModeDefault = Mode.fpDenormModeDPValue();
 | 
						|
 | 
						|
    uint32_t NewDenormModeValue = SPDenormMode | (DPDenormModeDefault << 2);
 | 
						|
    B.buildInstr(AMDGPU::S_DENORM_MODE)
 | 
						|
      .addImm(NewDenormModeValue);
 | 
						|
 | 
						|
  } else {
 | 
						|
    // Select FP32 bit field in mode register.
 | 
						|
    unsigned SPDenormModeBitField = AMDGPU::Hwreg::ID_MODE |
 | 
						|
                                    (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
 | 
						|
                                    (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
 | 
						|
 | 
						|
    B.buildInstr(AMDGPU::S_SETREG_IMM32_B32)
 | 
						|
      .addImm(SPDenormMode)
 | 
						|
      .addImm(SPDenormModeBitField);
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFDIV32(MachineInstr &MI,
 | 
						|
                                         MachineRegisterInfo &MRI,
 | 
						|
                                         MachineIRBuilder &B) const {
 | 
						|
  if (legalizeFastUnsafeFDIV(MI, MRI, B))
 | 
						|
    return true;
 | 
						|
 | 
						|
  Register Res = MI.getOperand(0).getReg();
 | 
						|
  Register LHS = MI.getOperand(1).getReg();
 | 
						|
  Register RHS = MI.getOperand(2).getReg();
 | 
						|
  const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
 | 
						|
  AMDGPU::SIModeRegisterDefaults Mode = MFI->getMode();
 | 
						|
 | 
						|
  uint16_t Flags = MI.getFlags();
 | 
						|
 | 
						|
  LLT S32 = LLT::scalar(32);
 | 
						|
  LLT S1 = LLT::scalar(1);
 | 
						|
 | 
						|
  auto One = B.buildFConstant(S32, 1.0f);
 | 
						|
 | 
						|
  auto DenominatorScaled =
 | 
						|
    B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S32, S1}, false)
 | 
						|
      .addUse(LHS)
 | 
						|
      .addUse(RHS)
 | 
						|
      .addImm(0)
 | 
						|
      .setMIFlags(Flags);
 | 
						|
  auto NumeratorScaled =
 | 
						|
    B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S32, S1}, false)
 | 
						|
      .addUse(LHS)
 | 
						|
      .addUse(RHS)
 | 
						|
      .addImm(1)
 | 
						|
      .setMIFlags(Flags);
 | 
						|
 | 
						|
  auto ApproxRcp = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
 | 
						|
    .addUse(DenominatorScaled.getReg(0))
 | 
						|
    .setMIFlags(Flags);
 | 
						|
  auto NegDivScale0 = B.buildFNeg(S32, DenominatorScaled, Flags);
 | 
						|
 | 
						|
  // FIXME: Doesn't correctly model the FP mode switch, and the FP operations
 | 
						|
  // aren't modeled as reading it.
 | 
						|
  if (!Mode.allFP32Denormals())
 | 
						|
    toggleSPDenormMode(true, B, ST, Mode);
 | 
						|
 | 
						|
  auto Fma0 = B.buildFMA(S32, NegDivScale0, ApproxRcp, One, Flags);
 | 
						|
  auto Fma1 = B.buildFMA(S32, Fma0, ApproxRcp, ApproxRcp, Flags);
 | 
						|
  auto Mul = B.buildFMul(S32, NumeratorScaled, Fma1, Flags);
 | 
						|
  auto Fma2 = B.buildFMA(S32, NegDivScale0, Mul, NumeratorScaled, Flags);
 | 
						|
  auto Fma3 = B.buildFMA(S32, Fma2, Fma1, Mul, Flags);
 | 
						|
  auto Fma4 = B.buildFMA(S32, NegDivScale0, Fma3, NumeratorScaled, Flags);
 | 
						|
 | 
						|
  if (!Mode.allFP32Denormals())
 | 
						|
    toggleSPDenormMode(false, B, ST, Mode);
 | 
						|
 | 
						|
  auto Fmas = B.buildIntrinsic(Intrinsic::amdgcn_div_fmas, {S32}, false)
 | 
						|
    .addUse(Fma4.getReg(0))
 | 
						|
    .addUse(Fma1.getReg(0))
 | 
						|
    .addUse(Fma3.getReg(0))
 | 
						|
    .addUse(NumeratorScaled.getReg(1))
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  B.buildIntrinsic(Intrinsic::amdgcn_div_fixup, Res, false)
 | 
						|
    .addUse(Fmas.getReg(0))
 | 
						|
    .addUse(RHS)
 | 
						|
    .addUse(LHS)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFDIV64(MachineInstr &MI,
 | 
						|
                                         MachineRegisterInfo &MRI,
 | 
						|
                                         MachineIRBuilder &B) const {
 | 
						|
  if (legalizeFastUnsafeFDIV64(MI, MRI, B))
 | 
						|
    return true;
 | 
						|
 | 
						|
  Register Res = MI.getOperand(0).getReg();
 | 
						|
  Register LHS = MI.getOperand(1).getReg();
 | 
						|
  Register RHS = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
  uint16_t Flags = MI.getFlags();
 | 
						|
 | 
						|
  LLT S64 = LLT::scalar(64);
 | 
						|
  LLT S1 = LLT::scalar(1);
 | 
						|
 | 
						|
  auto One = B.buildFConstant(S64, 1.0);
 | 
						|
 | 
						|
  auto DivScale0 = B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S64, S1}, false)
 | 
						|
    .addUse(LHS)
 | 
						|
    .addUse(RHS)
 | 
						|
    .addImm(0)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  auto NegDivScale0 = B.buildFNeg(S64, DivScale0.getReg(0), Flags);
 | 
						|
 | 
						|
  auto Rcp = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S64}, false)
 | 
						|
    .addUse(DivScale0.getReg(0))
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  auto Fma0 = B.buildFMA(S64, NegDivScale0, Rcp, One, Flags);
 | 
						|
  auto Fma1 = B.buildFMA(S64, Rcp, Fma0, Rcp, Flags);
 | 
						|
  auto Fma2 = B.buildFMA(S64, NegDivScale0, Fma1, One, Flags);
 | 
						|
 | 
						|
  auto DivScale1 = B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S64, S1}, false)
 | 
						|
    .addUse(LHS)
 | 
						|
    .addUse(RHS)
 | 
						|
    .addImm(1)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  auto Fma3 = B.buildFMA(S64, Fma1, Fma2, Fma1, Flags);
 | 
						|
  auto Mul = B.buildFMul(S64, DivScale1.getReg(0), Fma3, Flags);
 | 
						|
  auto Fma4 = B.buildFMA(S64, NegDivScale0, Mul, DivScale1.getReg(0), Flags);
 | 
						|
 | 
						|
  Register Scale;
 | 
						|
  if (!ST.hasUsableDivScaleConditionOutput()) {
 | 
						|
    // Workaround a hardware bug on SI where the condition output from div_scale
 | 
						|
    // is not usable.
 | 
						|
 | 
						|
    LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
    auto NumUnmerge = B.buildUnmerge(S32, LHS);
 | 
						|
    auto DenUnmerge = B.buildUnmerge(S32, RHS);
 | 
						|
    auto Scale0Unmerge = B.buildUnmerge(S32, DivScale0);
 | 
						|
    auto Scale1Unmerge = B.buildUnmerge(S32, DivScale1);
 | 
						|
 | 
						|
    auto CmpNum = B.buildICmp(ICmpInst::ICMP_EQ, S1, NumUnmerge.getReg(1),
 | 
						|
                              Scale1Unmerge.getReg(1));
 | 
						|
    auto CmpDen = B.buildICmp(ICmpInst::ICMP_EQ, S1, DenUnmerge.getReg(1),
 | 
						|
                              Scale0Unmerge.getReg(1));
 | 
						|
    Scale = B.buildXor(S1, CmpNum, CmpDen).getReg(0);
 | 
						|
  } else {
 | 
						|
    Scale = DivScale1.getReg(1);
 | 
						|
  }
 | 
						|
 | 
						|
  auto Fmas = B.buildIntrinsic(Intrinsic::amdgcn_div_fmas, {S64}, false)
 | 
						|
    .addUse(Fma4.getReg(0))
 | 
						|
    .addUse(Fma3.getReg(0))
 | 
						|
    .addUse(Mul.getReg(0))
 | 
						|
    .addUse(Scale)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  B.buildIntrinsic(Intrinsic::amdgcn_div_fixup, makeArrayRef(Res), false)
 | 
						|
    .addUse(Fmas.getReg(0))
 | 
						|
    .addUse(RHS)
 | 
						|
    .addUse(LHS)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeFDIVFastIntrin(MachineInstr &MI,
 | 
						|
                                                 MachineRegisterInfo &MRI,
 | 
						|
                                                 MachineIRBuilder &B) const {
 | 
						|
  Register Res = MI.getOperand(0).getReg();
 | 
						|
  Register LHS = MI.getOperand(2).getReg();
 | 
						|
  Register RHS = MI.getOperand(3).getReg();
 | 
						|
  uint16_t Flags = MI.getFlags();
 | 
						|
 | 
						|
  LLT S32 = LLT::scalar(32);
 | 
						|
  LLT S1 = LLT::scalar(1);
 | 
						|
 | 
						|
  auto Abs = B.buildFAbs(S32, RHS, Flags);
 | 
						|
  const APFloat C0Val(1.0f);
 | 
						|
 | 
						|
  auto C0 = B.buildConstant(S32, 0x6f800000);
 | 
						|
  auto C1 = B.buildConstant(S32, 0x2f800000);
 | 
						|
  auto C2 = B.buildConstant(S32, FloatToBits(1.0f));
 | 
						|
 | 
						|
  auto CmpRes = B.buildFCmp(CmpInst::FCMP_OGT, S1, Abs, C0, Flags);
 | 
						|
  auto Sel = B.buildSelect(S32, CmpRes, C1, C2, Flags);
 | 
						|
 | 
						|
  auto Mul0 = B.buildFMul(S32, RHS, Sel, Flags);
 | 
						|
 | 
						|
  auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
 | 
						|
    .addUse(Mul0.getReg(0))
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  auto Mul1 = B.buildFMul(S32, LHS, RCP, Flags);
 | 
						|
 | 
						|
  B.buildFMul(Res, Sel, Mul1, Flags);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// Expand llvm.amdgcn.rsq.clamp on targets that don't support the instruction.
 | 
						|
// FIXME: Why do we handle this one but not other removed instructions?
 | 
						|
//
 | 
						|
// Reciprocal square root.  The clamp prevents infinite results, clamping
 | 
						|
// infinities to max_float.  D.f = 1.0 / sqrt(S0.f), result clamped to
 | 
						|
// +-max_float.
 | 
						|
bool AMDGPULegalizerInfo::legalizeRsqClampIntrinsic(MachineInstr &MI,
 | 
						|
                                                    MachineRegisterInfo &MRI,
 | 
						|
                                                    MachineIRBuilder &B) const {
 | 
						|
  if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
 | 
						|
    return true;
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register Src = MI.getOperand(2).getReg();
 | 
						|
  auto Flags = MI.getFlags();
 | 
						|
 | 
						|
  LLT Ty = MRI.getType(Dst);
 | 
						|
 | 
						|
  const fltSemantics *FltSemantics;
 | 
						|
  if (Ty == LLT::scalar(32))
 | 
						|
    FltSemantics = &APFloat::IEEEsingle();
 | 
						|
  else if (Ty == LLT::scalar(64))
 | 
						|
    FltSemantics = &APFloat::IEEEdouble();
 | 
						|
  else
 | 
						|
    return false;
 | 
						|
 | 
						|
  auto Rsq = B.buildIntrinsic(Intrinsic::amdgcn_rsq, {Ty}, false)
 | 
						|
    .addUse(Src)
 | 
						|
    .setMIFlags(Flags);
 | 
						|
 | 
						|
  // We don't need to concern ourselves with the snan handling difference, since
 | 
						|
  // the rsq quieted (or not) so use the one which will directly select.
 | 
						|
  const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
 | 
						|
  const bool UseIEEE = MFI->getMode().IEEE;
 | 
						|
 | 
						|
  auto MaxFlt = B.buildFConstant(Ty, APFloat::getLargest(*FltSemantics));
 | 
						|
  auto ClampMax = UseIEEE ? B.buildFMinNumIEEE(Ty, Rsq, MaxFlt, Flags) :
 | 
						|
                            B.buildFMinNum(Ty, Rsq, MaxFlt, Flags);
 | 
						|
 | 
						|
  auto MinFlt = B.buildFConstant(Ty, APFloat::getLargest(*FltSemantics, true));
 | 
						|
 | 
						|
  if (UseIEEE)
 | 
						|
    B.buildFMaxNumIEEE(Dst, ClampMax, MinFlt, Flags);
 | 
						|
  else
 | 
						|
    B.buildFMaxNum(Dst, ClampMax, MinFlt, Flags);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static unsigned getDSFPAtomicOpcode(Intrinsic::ID IID) {
 | 
						|
  switch (IID) {
 | 
						|
  case Intrinsic::amdgcn_ds_fadd:
 | 
						|
    return AMDGPU::G_ATOMICRMW_FADD;
 | 
						|
  case Intrinsic::amdgcn_ds_fmin:
 | 
						|
    return AMDGPU::G_AMDGPU_ATOMIC_FMIN;
 | 
						|
  case Intrinsic::amdgcn_ds_fmax:
 | 
						|
    return AMDGPU::G_AMDGPU_ATOMIC_FMAX;
 | 
						|
  default:
 | 
						|
    llvm_unreachable("not a DS FP intrinsic");
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeDSAtomicFPIntrinsic(LegalizerHelper &Helper,
 | 
						|
                                                      MachineInstr &MI,
 | 
						|
                                                      Intrinsic::ID IID) const {
 | 
						|
  GISelChangeObserver &Observer = Helper.Observer;
 | 
						|
  Observer.changingInstr(MI);
 | 
						|
 | 
						|
  MI.setDesc(ST.getInstrInfo()->get(getDSFPAtomicOpcode(IID)));
 | 
						|
 | 
						|
  // The remaining operands were used to set fields in the MemOperand on
 | 
						|
  // construction.
 | 
						|
  for (int I = 6; I > 3; --I)
 | 
						|
    MI.RemoveOperand(I);
 | 
						|
 | 
						|
  MI.RemoveOperand(1); // Remove the intrinsic ID.
 | 
						|
  Observer.changedInstr(MI);
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::getImplicitArgPtr(Register DstReg,
 | 
						|
                                            MachineRegisterInfo &MRI,
 | 
						|
                                            MachineIRBuilder &B) const {
 | 
						|
  uint64_t Offset =
 | 
						|
    ST.getTargetLowering()->getImplicitParameterOffset(
 | 
						|
      B.getMF(), AMDGPUTargetLowering::FIRST_IMPLICIT);
 | 
						|
  LLT DstTy = MRI.getType(DstReg);
 | 
						|
  LLT IdxTy = LLT::scalar(DstTy.getSizeInBits());
 | 
						|
 | 
						|
  Register KernargPtrReg = MRI.createGenericVirtualRegister(DstTy);
 | 
						|
  if (!loadInputValue(KernargPtrReg, B,
 | 
						|
                      AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR))
 | 
						|
    return false;
 | 
						|
 | 
						|
  // FIXME: This should be nuw
 | 
						|
  B.buildPtrAdd(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
 | 
						|
                                                 MachineRegisterInfo &MRI,
 | 
						|
                                                 MachineIRBuilder &B) const {
 | 
						|
  const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
 | 
						|
  if (!MFI->isEntryFunction()) {
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
 | 
						|
  }
 | 
						|
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  if (!getImplicitArgPtr(DstReg, MRI, B))
 | 
						|
    return false;
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeIsAddrSpace(MachineInstr &MI,
 | 
						|
                                              MachineRegisterInfo &MRI,
 | 
						|
                                              MachineIRBuilder &B,
 | 
						|
                                              unsigned AddrSpace) const {
 | 
						|
  Register ApertureReg = getSegmentAperture(AddrSpace, MRI, B);
 | 
						|
  auto Unmerge = B.buildUnmerge(LLT::scalar(32), MI.getOperand(2).getReg());
 | 
						|
  Register Hi32 = Unmerge.getReg(1);
 | 
						|
 | 
						|
  B.buildICmp(ICmpInst::ICMP_EQ, MI.getOperand(0), Hi32, ApertureReg);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
 | 
						|
// offset (the offset that is included in bounds checking and swizzling, to be
 | 
						|
// split between the instruction's voffset and immoffset fields) and soffset
 | 
						|
// (the offset that is excluded from bounds checking and swizzling, to go in
 | 
						|
// the instruction's soffset field).  This function takes the first kind of
 | 
						|
// offset and figures out how to split it between voffset and immoffset.
 | 
						|
std::tuple<Register, unsigned, unsigned>
 | 
						|
AMDGPULegalizerInfo::splitBufferOffsets(MachineIRBuilder &B,
 | 
						|
                                        Register OrigOffset) const {
 | 
						|
  const unsigned MaxImm = 4095;
 | 
						|
  Register BaseReg;
 | 
						|
  unsigned TotalConstOffset;
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  std::tie(BaseReg, TotalConstOffset) =
 | 
						|
      AMDGPU::getBaseWithConstantOffset(*B.getMRI(), OrigOffset);
 | 
						|
 | 
						|
  unsigned ImmOffset = TotalConstOffset;
 | 
						|
 | 
						|
  // If the immediate value is too big for the immoffset field, put the value
 | 
						|
  // and -4096 into the immoffset field so that the value that is copied/added
 | 
						|
  // for the voffset field is a multiple of 4096, and it stands more chance
 | 
						|
  // of being CSEd with the copy/add for another similar load/store.
 | 
						|
  // However, do not do that rounding down to a multiple of 4096 if that is a
 | 
						|
  // negative number, as it appears to be illegal to have a negative offset
 | 
						|
  // in the vgpr, even if adding the immediate offset makes it positive.
 | 
						|
  unsigned Overflow = ImmOffset & ~MaxImm;
 | 
						|
  ImmOffset -= Overflow;
 | 
						|
  if ((int32_t)Overflow < 0) {
 | 
						|
    Overflow += ImmOffset;
 | 
						|
    ImmOffset = 0;
 | 
						|
  }
 | 
						|
 | 
						|
  if (Overflow != 0) {
 | 
						|
    if (!BaseReg) {
 | 
						|
      BaseReg = B.buildConstant(S32, Overflow).getReg(0);
 | 
						|
    } else {
 | 
						|
      auto OverflowVal = B.buildConstant(S32, Overflow);
 | 
						|
      BaseReg = B.buildAdd(S32, BaseReg, OverflowVal).getReg(0);
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  if (!BaseReg)
 | 
						|
    BaseReg = B.buildConstant(S32, 0).getReg(0);
 | 
						|
 | 
						|
  return std::make_tuple(BaseReg, ImmOffset, TotalConstOffset);
 | 
						|
}
 | 
						|
 | 
						|
/// Handle register layout difference for f16 images for some subtargets.
 | 
						|
Register AMDGPULegalizerInfo::handleD16VData(MachineIRBuilder &B,
 | 
						|
                                             MachineRegisterInfo &MRI,
 | 
						|
                                             Register Reg,
 | 
						|
                                             bool ImageStore) const {
 | 
						|
  const LLT S16 = LLT::scalar(16);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  LLT StoreVT = MRI.getType(Reg);
 | 
						|
  assert(StoreVT.isVector() && StoreVT.getElementType() == S16);
 | 
						|
 | 
						|
  if (ST.hasUnpackedD16VMem()) {
 | 
						|
    auto Unmerge = B.buildUnmerge(S16, Reg);
 | 
						|
 | 
						|
    SmallVector<Register, 4> WideRegs;
 | 
						|
    for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
 | 
						|
      WideRegs.push_back(B.buildAnyExt(S32, Unmerge.getReg(I)).getReg(0));
 | 
						|
 | 
						|
    int NumElts = StoreVT.getNumElements();
 | 
						|
 | 
						|
    return B.buildBuildVector(LLT::vector(NumElts, S32), WideRegs).getReg(0);
 | 
						|
  }
 | 
						|
 | 
						|
  if (ImageStore && ST.hasImageStoreD16Bug()) {
 | 
						|
    if (StoreVT.getNumElements() == 2) {
 | 
						|
      SmallVector<Register, 4> PackedRegs;
 | 
						|
      Reg = B.buildBitcast(S32, Reg).getReg(0);
 | 
						|
      PackedRegs.push_back(Reg);
 | 
						|
      PackedRegs.resize(2, B.buildUndef(S32).getReg(0));
 | 
						|
      return B.buildBuildVector(LLT::vector(2, S32), PackedRegs).getReg(0);
 | 
						|
    }
 | 
						|
 | 
						|
    if (StoreVT.getNumElements() == 3) {
 | 
						|
      SmallVector<Register, 4> PackedRegs;
 | 
						|
      auto Unmerge = B.buildUnmerge(S16, Reg);
 | 
						|
      for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
 | 
						|
        PackedRegs.push_back(Unmerge.getReg(I));
 | 
						|
      PackedRegs.resize(6, B.buildUndef(S16).getReg(0));
 | 
						|
      Reg = B.buildBuildVector(LLT::vector(6, S16), PackedRegs).getReg(0);
 | 
						|
      return B.buildBitcast(LLT::vector(3, S32), Reg).getReg(0);
 | 
						|
    }
 | 
						|
 | 
						|
    if (StoreVT.getNumElements() == 4) {
 | 
						|
      SmallVector<Register, 4> PackedRegs;
 | 
						|
      Reg = B.buildBitcast(LLT::vector(2, S32), Reg).getReg(0);
 | 
						|
      auto Unmerge = B.buildUnmerge(S32, Reg);
 | 
						|
      for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
 | 
						|
        PackedRegs.push_back(Unmerge.getReg(I));
 | 
						|
      PackedRegs.resize(4, B.buildUndef(S32).getReg(0));
 | 
						|
      return B.buildBuildVector(LLT::vector(4, S32), PackedRegs).getReg(0);
 | 
						|
    }
 | 
						|
 | 
						|
    llvm_unreachable("invalid data type");
 | 
						|
  }
 | 
						|
 | 
						|
  return Reg;
 | 
						|
}
 | 
						|
 | 
						|
Register AMDGPULegalizerInfo::fixStoreSourceType(
 | 
						|
  MachineIRBuilder &B, Register VData, bool IsFormat) const {
 | 
						|
  MachineRegisterInfo *MRI = B.getMRI();
 | 
						|
  LLT Ty = MRI->getType(VData);
 | 
						|
 | 
						|
  const LLT S16 = LLT::scalar(16);
 | 
						|
 | 
						|
  // Fixup illegal register types for i8 stores.
 | 
						|
  if (Ty == LLT::scalar(8) || Ty == S16) {
 | 
						|
    Register AnyExt = B.buildAnyExt(LLT::scalar(32), VData).getReg(0);
 | 
						|
    return AnyExt;
 | 
						|
  }
 | 
						|
 | 
						|
  if (Ty.isVector()) {
 | 
						|
    if (Ty.getElementType() == S16 && Ty.getNumElements() <= 4) {
 | 
						|
      if (IsFormat)
 | 
						|
        return handleD16VData(B, *MRI, VData);
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  return VData;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeBufferStore(MachineInstr &MI,
 | 
						|
                                              MachineRegisterInfo &MRI,
 | 
						|
                                              MachineIRBuilder &B,
 | 
						|
                                              bool IsTyped,
 | 
						|
                                              bool IsFormat) const {
 | 
						|
  Register VData = MI.getOperand(1).getReg();
 | 
						|
  LLT Ty = MRI.getType(VData);
 | 
						|
  LLT EltTy = Ty.getScalarType();
 | 
						|
  const bool IsD16 = IsFormat && (EltTy.getSizeInBits() == 16);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  VData = fixStoreSourceType(B, VData, IsFormat);
 | 
						|
  Register RSrc = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
  MachineMemOperand *MMO = *MI.memoperands_begin();
 | 
						|
  const int MemSize = MMO->getSize();
 | 
						|
 | 
						|
  unsigned ImmOffset;
 | 
						|
  unsigned TotalOffset;
 | 
						|
 | 
						|
  // The typed intrinsics add an immediate after the registers.
 | 
						|
  const unsigned NumVIndexOps = IsTyped ? 8 : 7;
 | 
						|
 | 
						|
  // The struct intrinsic variants add one additional operand over raw.
 | 
						|
  const bool HasVIndex = MI.getNumOperands() == NumVIndexOps;
 | 
						|
  Register VIndex;
 | 
						|
  int OpOffset = 0;
 | 
						|
  if (HasVIndex) {
 | 
						|
    VIndex = MI.getOperand(3).getReg();
 | 
						|
    OpOffset = 1;
 | 
						|
  }
 | 
						|
 | 
						|
  Register VOffset = MI.getOperand(3 + OpOffset).getReg();
 | 
						|
  Register SOffset = MI.getOperand(4 + OpOffset).getReg();
 | 
						|
 | 
						|
  unsigned Format = 0;
 | 
						|
  if (IsTyped) {
 | 
						|
    Format = MI.getOperand(5 + OpOffset).getImm();
 | 
						|
    ++OpOffset;
 | 
						|
  }
 | 
						|
 | 
						|
  unsigned AuxiliaryData = MI.getOperand(5 + OpOffset).getImm();
 | 
						|
 | 
						|
  std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
 | 
						|
  if (TotalOffset != 0)
 | 
						|
    MMO = B.getMF().getMachineMemOperand(MMO, TotalOffset, MemSize);
 | 
						|
 | 
						|
  unsigned Opc;
 | 
						|
  if (IsTyped) {
 | 
						|
    Opc = IsD16 ? AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT_D16 :
 | 
						|
                  AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT;
 | 
						|
  } else if (IsFormat) {
 | 
						|
    Opc = IsD16 ? AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT_D16 :
 | 
						|
                  AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT;
 | 
						|
  } else {
 | 
						|
    switch (MemSize) {
 | 
						|
    case 1:
 | 
						|
      Opc = AMDGPU::G_AMDGPU_BUFFER_STORE_BYTE;
 | 
						|
      break;
 | 
						|
    case 2:
 | 
						|
      Opc = AMDGPU::G_AMDGPU_BUFFER_STORE_SHORT;
 | 
						|
      break;
 | 
						|
    default:
 | 
						|
      Opc = AMDGPU::G_AMDGPU_BUFFER_STORE;
 | 
						|
      break;
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  if (!VIndex)
 | 
						|
    VIndex = B.buildConstant(S32, 0).getReg(0);
 | 
						|
 | 
						|
  auto MIB = B.buildInstr(Opc)
 | 
						|
    .addUse(VData)              // vdata
 | 
						|
    .addUse(RSrc)               // rsrc
 | 
						|
    .addUse(VIndex)             // vindex
 | 
						|
    .addUse(VOffset)            // voffset
 | 
						|
    .addUse(SOffset)            // soffset
 | 
						|
    .addImm(ImmOffset);         // offset(imm)
 | 
						|
 | 
						|
  if (IsTyped)
 | 
						|
    MIB.addImm(Format);
 | 
						|
 | 
						|
  MIB.addImm(AuxiliaryData)      // cachepolicy, swizzled buffer(imm)
 | 
						|
     .addImm(HasVIndex ? -1 : 0) // idxen(imm)
 | 
						|
     .addMemOperand(MMO);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
 | 
						|
                                             MachineRegisterInfo &MRI,
 | 
						|
                                             MachineIRBuilder &B,
 | 
						|
                                             bool IsFormat,
 | 
						|
                                             bool IsTyped) const {
 | 
						|
  // FIXME: Verifier should enforce 1 MMO for these intrinsics.
 | 
						|
  MachineMemOperand *MMO = *MI.memoperands_begin();
 | 
						|
  const int MemSize = MMO->getSize();
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  Register RSrc = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
  // The typed intrinsics add an immediate after the registers.
 | 
						|
  const unsigned NumVIndexOps = IsTyped ? 8 : 7;
 | 
						|
 | 
						|
  // The struct intrinsic variants add one additional operand over raw.
 | 
						|
  const bool HasVIndex = MI.getNumOperands() == NumVIndexOps;
 | 
						|
  Register VIndex;
 | 
						|
  int OpOffset = 0;
 | 
						|
  if (HasVIndex) {
 | 
						|
    VIndex = MI.getOperand(3).getReg();
 | 
						|
    OpOffset = 1;
 | 
						|
  }
 | 
						|
 | 
						|
  Register VOffset = MI.getOperand(3 + OpOffset).getReg();
 | 
						|
  Register SOffset = MI.getOperand(4 + OpOffset).getReg();
 | 
						|
 | 
						|
  unsigned Format = 0;
 | 
						|
  if (IsTyped) {
 | 
						|
    Format = MI.getOperand(5 + OpOffset).getImm();
 | 
						|
    ++OpOffset;
 | 
						|
  }
 | 
						|
 | 
						|
  unsigned AuxiliaryData = MI.getOperand(5 + OpOffset).getImm();
 | 
						|
  unsigned ImmOffset;
 | 
						|
  unsigned TotalOffset;
 | 
						|
 | 
						|
  LLT Ty = MRI.getType(Dst);
 | 
						|
  LLT EltTy = Ty.getScalarType();
 | 
						|
  const bool IsD16 = IsFormat && (EltTy.getSizeInBits() == 16);
 | 
						|
  const bool Unpacked = ST.hasUnpackedD16VMem();
 | 
						|
 | 
						|
  std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
 | 
						|
  if (TotalOffset != 0)
 | 
						|
    MMO = B.getMF().getMachineMemOperand(MMO, TotalOffset, MemSize);
 | 
						|
 | 
						|
  unsigned Opc;
 | 
						|
 | 
						|
  if (IsTyped) {
 | 
						|
    Opc = IsD16 ? AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT_D16 :
 | 
						|
                  AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT;
 | 
						|
  } else if (IsFormat) {
 | 
						|
    Opc = IsD16 ? AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16 :
 | 
						|
                  AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT;
 | 
						|
  } else {
 | 
						|
    switch (MemSize) {
 | 
						|
    case 1:
 | 
						|
      Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE;
 | 
						|
      break;
 | 
						|
    case 2:
 | 
						|
      Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT;
 | 
						|
      break;
 | 
						|
    default:
 | 
						|
      Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD;
 | 
						|
      break;
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  Register LoadDstReg;
 | 
						|
 | 
						|
  bool IsExtLoad = (!IsD16 && MemSize < 4) || (IsD16 && !Ty.isVector());
 | 
						|
  LLT UnpackedTy = Ty.changeElementSize(32);
 | 
						|
 | 
						|
  if (IsExtLoad)
 | 
						|
    LoadDstReg = B.getMRI()->createGenericVirtualRegister(S32);
 | 
						|
  else if (Unpacked && IsD16 && Ty.isVector())
 | 
						|
    LoadDstReg = B.getMRI()->createGenericVirtualRegister(UnpackedTy);
 | 
						|
  else
 | 
						|
    LoadDstReg = Dst;
 | 
						|
 | 
						|
  if (!VIndex)
 | 
						|
    VIndex = B.buildConstant(S32, 0).getReg(0);
 | 
						|
 | 
						|
  auto MIB = B.buildInstr(Opc)
 | 
						|
    .addDef(LoadDstReg)         // vdata
 | 
						|
    .addUse(RSrc)               // rsrc
 | 
						|
    .addUse(VIndex)             // vindex
 | 
						|
    .addUse(VOffset)            // voffset
 | 
						|
    .addUse(SOffset)            // soffset
 | 
						|
    .addImm(ImmOffset);         // offset(imm)
 | 
						|
 | 
						|
  if (IsTyped)
 | 
						|
    MIB.addImm(Format);
 | 
						|
 | 
						|
  MIB.addImm(AuxiliaryData)      // cachepolicy, swizzled buffer(imm)
 | 
						|
     .addImm(HasVIndex ? -1 : 0) // idxen(imm)
 | 
						|
     .addMemOperand(MMO);
 | 
						|
 | 
						|
  if (LoadDstReg != Dst) {
 | 
						|
    B.setInsertPt(B.getMBB(), ++B.getInsertPt());
 | 
						|
 | 
						|
    // Widen result for extending loads was widened.
 | 
						|
    if (IsExtLoad)
 | 
						|
      B.buildTrunc(Dst, LoadDstReg);
 | 
						|
    else {
 | 
						|
      // Repack to original 16-bit vector result
 | 
						|
      // FIXME: G_TRUNC should work, but legalization currently fails
 | 
						|
      auto Unmerge = B.buildUnmerge(S32, LoadDstReg);
 | 
						|
      SmallVector<Register, 4> Repack;
 | 
						|
      for (unsigned I = 0, N = Unmerge->getNumOperands() - 1; I != N; ++I)
 | 
						|
        Repack.push_back(B.buildTrunc(EltTy, Unmerge.getReg(I)).getReg(0));
 | 
						|
      B.buildMerge(Dst, Repack);
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeAtomicIncDec(MachineInstr &MI,
 | 
						|
                                               MachineIRBuilder &B,
 | 
						|
                                               bool IsInc) const {
 | 
						|
  unsigned Opc = IsInc ? AMDGPU::G_AMDGPU_ATOMIC_INC :
 | 
						|
                         AMDGPU::G_AMDGPU_ATOMIC_DEC;
 | 
						|
  B.buildInstr(Opc)
 | 
						|
    .addDef(MI.getOperand(0).getReg())
 | 
						|
    .addUse(MI.getOperand(2).getReg())
 | 
						|
    .addUse(MI.getOperand(3).getReg())
 | 
						|
    .cloneMemRefs(MI);
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
static unsigned getBufferAtomicPseudo(Intrinsic::ID IntrID) {
 | 
						|
  switch (IntrID) {
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_swap:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_swap:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_add:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_add:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_sub:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_sub:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_smin:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_smin:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_umin:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_umin:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_smax:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_smax:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_umax:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_umax:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_and:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_and:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_or:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_or:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_xor:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_xor:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_inc:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_inc:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_dec:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_dec:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP;
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_fadd:
 | 
						|
    return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD;
 | 
						|
  default:
 | 
						|
    llvm_unreachable("unhandled atomic opcode");
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeBufferAtomic(MachineInstr &MI,
 | 
						|
                                               MachineIRBuilder &B,
 | 
						|
                                               Intrinsic::ID IID) const {
 | 
						|
  const bool IsCmpSwap = IID == Intrinsic::amdgcn_raw_buffer_atomic_cmpswap ||
 | 
						|
                         IID == Intrinsic::amdgcn_struct_buffer_atomic_cmpswap;
 | 
						|
  const bool HasReturn = MI.getNumExplicitDefs() != 0;
 | 
						|
 | 
						|
  Register Dst;
 | 
						|
 | 
						|
  int OpOffset = 0;
 | 
						|
  if (HasReturn) {
 | 
						|
    // A few FP atomics do not support return values.
 | 
						|
    Dst = MI.getOperand(0).getReg();
 | 
						|
  } else {
 | 
						|
    OpOffset = -1;
 | 
						|
  }
 | 
						|
 | 
						|
  Register VData = MI.getOperand(2 + OpOffset).getReg();
 | 
						|
  Register CmpVal;
 | 
						|
 | 
						|
  if (IsCmpSwap) {
 | 
						|
    CmpVal = MI.getOperand(3 + OpOffset).getReg();
 | 
						|
    ++OpOffset;
 | 
						|
  }
 | 
						|
 | 
						|
  Register RSrc = MI.getOperand(3 + OpOffset).getReg();
 | 
						|
  const unsigned NumVIndexOps = (IsCmpSwap ? 8 : 7) + HasReturn;
 | 
						|
 | 
						|
  // The struct intrinsic variants add one additional operand over raw.
 | 
						|
  const bool HasVIndex = MI.getNumOperands() == NumVIndexOps;
 | 
						|
  Register VIndex;
 | 
						|
  if (HasVIndex) {
 | 
						|
    VIndex = MI.getOperand(4 + OpOffset).getReg();
 | 
						|
    ++OpOffset;
 | 
						|
  }
 | 
						|
 | 
						|
  Register VOffset = MI.getOperand(4 + OpOffset).getReg();
 | 
						|
  Register SOffset = MI.getOperand(5 + OpOffset).getReg();
 | 
						|
  unsigned AuxiliaryData = MI.getOperand(6 + OpOffset).getImm();
 | 
						|
 | 
						|
  MachineMemOperand *MMO = *MI.memoperands_begin();
 | 
						|
 | 
						|
  unsigned ImmOffset;
 | 
						|
  unsigned TotalOffset;
 | 
						|
  std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
 | 
						|
  if (TotalOffset != 0)
 | 
						|
    MMO = B.getMF().getMachineMemOperand(MMO, TotalOffset, MMO->getSize());
 | 
						|
 | 
						|
  if (!VIndex)
 | 
						|
    VIndex = B.buildConstant(LLT::scalar(32), 0).getReg(0);
 | 
						|
 | 
						|
  auto MIB = B.buildInstr(getBufferAtomicPseudo(IID));
 | 
						|
 | 
						|
  if (HasReturn)
 | 
						|
    MIB.addDef(Dst);
 | 
						|
 | 
						|
  MIB.addUse(VData); // vdata
 | 
						|
 | 
						|
  if (IsCmpSwap)
 | 
						|
    MIB.addReg(CmpVal);
 | 
						|
 | 
						|
  MIB.addUse(RSrc)               // rsrc
 | 
						|
     .addUse(VIndex)             // vindex
 | 
						|
     .addUse(VOffset)            // voffset
 | 
						|
     .addUse(SOffset)            // soffset
 | 
						|
     .addImm(ImmOffset)          // offset(imm)
 | 
						|
     .addImm(AuxiliaryData)      // cachepolicy, swizzled buffer(imm)
 | 
						|
     .addImm(HasVIndex ? -1 : 0) // idxen(imm)
 | 
						|
     .addMemOperand(MMO);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
/// Turn a set of s16 typed registers in \p A16AddrRegs into a dword sized
 | 
						|
/// vector with s16 typed elements.
 | 
						|
static void packImageA16AddressToDwords(
 | 
						|
    MachineIRBuilder &B, MachineInstr &MI,
 | 
						|
    SmallVectorImpl<Register> &PackedAddrs, unsigned ArgOffset,
 | 
						|
    const AMDGPU::ImageDimIntrinsicInfo *Intr, unsigned EndIdx) {
 | 
						|
  const LLT S16 = LLT::scalar(16);
 | 
						|
  const LLT V2S16 = LLT::vector(2, 16);
 | 
						|
 | 
						|
  for (unsigned I = Intr->VAddrStart; I < EndIdx; I++) {
 | 
						|
    MachineOperand &SrcOp = MI.getOperand(ArgOffset + I);
 | 
						|
    if (!SrcOp.isReg())
 | 
						|
      continue; // _L to _LZ may have eliminated this.
 | 
						|
 | 
						|
    Register AddrReg = SrcOp.getReg();
 | 
						|
 | 
						|
    if (I < Intr->GradientStart) {
 | 
						|
      AddrReg = B.buildBitcast(V2S16, AddrReg).getReg(0);
 | 
						|
      PackedAddrs.push_back(AddrReg);
 | 
						|
    } else {
 | 
						|
      // Dz/dh, dz/dv and the last odd coord are packed with undef. Also, in 1D,
 | 
						|
      // derivatives dx/dh and dx/dv are packed with undef.
 | 
						|
      if (((I + 1) >= EndIdx) ||
 | 
						|
          ((Intr->NumGradients / 2) % 2 == 1 &&
 | 
						|
           (I == static_cast<unsigned>(Intr->GradientStart +
 | 
						|
                                       (Intr->NumGradients / 2) - 1) ||
 | 
						|
            I == static_cast<unsigned>(Intr->GradientStart +
 | 
						|
                                       Intr->NumGradients - 1))) ||
 | 
						|
          // Check for _L to _LZ optimization
 | 
						|
          !MI.getOperand(ArgOffset + I + 1).isReg()) {
 | 
						|
        PackedAddrs.push_back(
 | 
						|
            B.buildBuildVector(V2S16, {AddrReg, B.buildUndef(S16).getReg(0)})
 | 
						|
                .getReg(0));
 | 
						|
      } else {
 | 
						|
        PackedAddrs.push_back(
 | 
						|
            B.buildBuildVector(
 | 
						|
                 V2S16, {AddrReg, MI.getOperand(ArgOffset + I + 1).getReg()})
 | 
						|
                .getReg(0));
 | 
						|
        ++I;
 | 
						|
      }
 | 
						|
    }
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
/// Convert from separate vaddr components to a single vector address register,
 | 
						|
/// and replace the remaining operands with $noreg.
 | 
						|
static void convertImageAddrToPacked(MachineIRBuilder &B, MachineInstr &MI,
 | 
						|
                                     int DimIdx, int NumVAddrs) {
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  SmallVector<Register, 8> AddrRegs;
 | 
						|
  for (int I = 0; I != NumVAddrs; ++I) {
 | 
						|
    MachineOperand &SrcOp = MI.getOperand(DimIdx + I);
 | 
						|
    if (SrcOp.isReg()) {
 | 
						|
      AddrRegs.push_back(SrcOp.getReg());
 | 
						|
      assert(B.getMRI()->getType(SrcOp.getReg()) == S32);
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  int NumAddrRegs = AddrRegs.size();
 | 
						|
  if (NumAddrRegs != 1) {
 | 
						|
    // Round up to 8 elements for v5-v7
 | 
						|
    // FIXME: Missing intermediate sized register classes and instructions.
 | 
						|
    if (NumAddrRegs > 4 && !isPowerOf2_32(NumAddrRegs)) {
 | 
						|
      const int RoundedNumRegs = NextPowerOf2(NumAddrRegs);
 | 
						|
      auto Undef = B.buildUndef(S32);
 | 
						|
      AddrRegs.append(RoundedNumRegs - NumAddrRegs, Undef.getReg(0));
 | 
						|
      NumAddrRegs = RoundedNumRegs;
 | 
						|
    }
 | 
						|
 | 
						|
    auto VAddr = B.buildBuildVector(LLT::vector(NumAddrRegs, 32), AddrRegs);
 | 
						|
    MI.getOperand(DimIdx).setReg(VAddr.getReg(0));
 | 
						|
  }
 | 
						|
 | 
						|
  for (int I = 1; I != NumVAddrs; ++I) {
 | 
						|
    MachineOperand &SrcOp = MI.getOperand(DimIdx + I);
 | 
						|
    if (SrcOp.isReg())
 | 
						|
      MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister);
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
/// Rewrite image intrinsics to use register layouts expected by the subtarget.
 | 
						|
///
 | 
						|
/// Depending on the subtarget, load/store with 16-bit element data need to be
 | 
						|
/// rewritten to use the low half of 32-bit registers, or directly use a packed
 | 
						|
/// layout. 16-bit addresses should also sometimes be packed into 32-bit
 | 
						|
/// registers.
 | 
						|
///
 | 
						|
/// We don't want to directly select image instructions just yet, but also want
 | 
						|
/// to exposes all register repacking to the legalizer/combiners. We also don't
 | 
						|
/// want a selected instrution entering RegBankSelect. In order to avoid
 | 
						|
/// defining a multitude of intermediate image instructions, directly hack on
 | 
						|
/// the intrinsic's arguments. In cases like a16 addreses, this requires padding
 | 
						|
/// now unnecessary arguments with $noreg.
 | 
						|
bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
 | 
						|
    MachineInstr &MI, MachineIRBuilder &B, GISelChangeObserver &Observer,
 | 
						|
    const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
 | 
						|
 | 
						|
  const unsigned NumDefs = MI.getNumExplicitDefs();
 | 
						|
  const unsigned ArgOffset = NumDefs + 1;
 | 
						|
  bool IsTFE = NumDefs == 2;
 | 
						|
  // We are only processing the operands of d16 image operations on subtargets
 | 
						|
  // that use the unpacked register layout, or need to repack the TFE result.
 | 
						|
 | 
						|
  // TODO: Do we need to guard against already legalized intrinsics?
 | 
						|
  const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
 | 
						|
      AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
 | 
						|
 | 
						|
  MachineRegisterInfo *MRI = B.getMRI();
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
  const LLT S16 = LLT::scalar(16);
 | 
						|
  const LLT V2S16 = LLT::vector(2, 16);
 | 
						|
 | 
						|
  unsigned DMask = 0;
 | 
						|
 | 
						|
  // Check for 16 bit addresses and pack if true.
 | 
						|
  LLT GradTy =
 | 
						|
      MRI->getType(MI.getOperand(ArgOffset + Intr->GradientStart).getReg());
 | 
						|
  LLT AddrTy =
 | 
						|
      MRI->getType(MI.getOperand(ArgOffset + Intr->CoordStart).getReg());
 | 
						|
  const bool IsG16 = GradTy == S16;
 | 
						|
  const bool IsA16 = AddrTy == S16;
 | 
						|
 | 
						|
  int DMaskLanes = 0;
 | 
						|
  if (!BaseOpcode->Atomic) {
 | 
						|
    DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
 | 
						|
    if (BaseOpcode->Gather4) {
 | 
						|
      DMaskLanes = 4;
 | 
						|
    } else if (DMask != 0) {
 | 
						|
      DMaskLanes = countPopulation(DMask);
 | 
						|
    } else if (!IsTFE && !BaseOpcode->Store) {
 | 
						|
      // If dmask is 0, this is a no-op load. This can be eliminated.
 | 
						|
      B.buildUndef(MI.getOperand(0));
 | 
						|
      MI.eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  Observer.changingInstr(MI);
 | 
						|
  auto ChangedInstr = make_scope_exit([&] { Observer.changedInstr(MI); });
 | 
						|
 | 
						|
  unsigned NewOpcode = NumDefs == 0 ?
 | 
						|
    AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE : AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD;
 | 
						|
 | 
						|
  // Track that we legalized this
 | 
						|
  MI.setDesc(B.getTII().get(NewOpcode));
 | 
						|
 | 
						|
  // Expecting to get an error flag since TFC is on - and dmask is 0 Force
 | 
						|
  // dmask to be at least 1 otherwise the instruction will fail
 | 
						|
  if (IsTFE && DMask == 0) {
 | 
						|
    DMask = 0x1;
 | 
						|
    DMaskLanes = 1;
 | 
						|
    MI.getOperand(ArgOffset + Intr->DMaskIndex).setImm(DMask);
 | 
						|
  }
 | 
						|
 | 
						|
  if (BaseOpcode->Atomic) {
 | 
						|
    Register VData0 = MI.getOperand(2).getReg();
 | 
						|
    LLT Ty = MRI->getType(VData0);
 | 
						|
 | 
						|
    // TODO: Allow atomic swap and bit ops for v2s16/v4s16
 | 
						|
    if (Ty.isVector())
 | 
						|
      return false;
 | 
						|
 | 
						|
    if (BaseOpcode->AtomicX2) {
 | 
						|
      Register VData1 = MI.getOperand(3).getReg();
 | 
						|
      // The two values are packed in one register.
 | 
						|
      LLT PackedTy = LLT::vector(2, Ty);
 | 
						|
      auto Concat = B.buildBuildVector(PackedTy, {VData0, VData1});
 | 
						|
      MI.getOperand(2).setReg(Concat.getReg(0));
 | 
						|
      MI.getOperand(3).setReg(AMDGPU::NoRegister);
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  unsigned CorrectedNumVAddrs = Intr->NumVAddrs;
 | 
						|
 | 
						|
  // Optimize _L to _LZ when _L is zero
 | 
						|
  if (const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
 | 
						|
          AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode)) {
 | 
						|
    const ConstantFP *ConstantLod;
 | 
						|
 | 
						|
    if (mi_match(MI.getOperand(ArgOffset + Intr->LodIndex).getReg(), *MRI,
 | 
						|
                 m_GFCst(ConstantLod))) {
 | 
						|
      if (ConstantLod->isZero() || ConstantLod->isNegative()) {
 | 
						|
        // Set new opcode to _lz variant of _l, and change the intrinsic ID.
 | 
						|
        const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr =
 | 
						|
            AMDGPU::getImageDimInstrinsicByBaseOpcode(LZMappingInfo->LZ,
 | 
						|
                                                      Intr->Dim);
 | 
						|
 | 
						|
        // The starting indexes should remain in the same place.
 | 
						|
        --CorrectedNumVAddrs;
 | 
						|
 | 
						|
        MI.getOperand(MI.getNumExplicitDefs())
 | 
						|
            .setIntrinsicID(static_cast<Intrinsic::ID>(NewImageDimIntr->Intr));
 | 
						|
        MI.RemoveOperand(ArgOffset + Intr->LodIndex);
 | 
						|
        Intr = NewImageDimIntr;
 | 
						|
      }
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // Optimize _mip away, when 'lod' is zero
 | 
						|
  if (AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode)) {
 | 
						|
    int64_t ConstantLod;
 | 
						|
    if (mi_match(MI.getOperand(ArgOffset + Intr->MipIndex).getReg(), *MRI,
 | 
						|
                 m_ICst(ConstantLod))) {
 | 
						|
      if (ConstantLod == 0) {
 | 
						|
        // TODO: Change intrinsic opcode and remove operand instead or replacing
 | 
						|
        // it with 0, as the _L to _LZ handling is done above.
 | 
						|
        MI.getOperand(ArgOffset + Intr->MipIndex).ChangeToImmediate(0);
 | 
						|
        --CorrectedNumVAddrs;
 | 
						|
      }
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // Rewrite the addressing register layout before doing anything else.
 | 
						|
  if (IsA16 || IsG16) {
 | 
						|
    if (IsA16) {
 | 
						|
      // Target must support the feature and gradients need to be 16 bit too
 | 
						|
      if (!ST.hasA16() || !IsG16)
 | 
						|
        return false;
 | 
						|
    } else if (!ST.hasG16())
 | 
						|
      return false;
 | 
						|
 | 
						|
    if (Intr->NumVAddrs > 1) {
 | 
						|
      SmallVector<Register, 4> PackedRegs;
 | 
						|
      // Don't compress addresses for G16
 | 
						|
      const int PackEndIdx = IsA16 ? Intr->VAddrEnd : Intr->CoordStart;
 | 
						|
      packImageA16AddressToDwords(B, MI, PackedRegs, ArgOffset, Intr,
 | 
						|
                                  PackEndIdx);
 | 
						|
 | 
						|
      if (!IsA16) {
 | 
						|
        // Add uncompressed address
 | 
						|
        for (unsigned I = Intr->CoordStart; I < Intr->VAddrEnd; I++) {
 | 
						|
          int AddrReg = MI.getOperand(ArgOffset + I).getReg();
 | 
						|
          assert(B.getMRI()->getType(AddrReg) == LLT::scalar(32));
 | 
						|
          PackedRegs.push_back(AddrReg);
 | 
						|
        }
 | 
						|
      }
 | 
						|
 | 
						|
      // See also below in the non-a16 branch
 | 
						|
      const bool UseNSA = PackedRegs.size() >= 3 && ST.hasNSAEncoding();
 | 
						|
 | 
						|
      if (!UseNSA && PackedRegs.size() > 1) {
 | 
						|
        LLT PackedAddrTy = LLT::vector(2 * PackedRegs.size(), 16);
 | 
						|
        auto Concat = B.buildConcatVectors(PackedAddrTy, PackedRegs);
 | 
						|
        PackedRegs[0] = Concat.getReg(0);
 | 
						|
        PackedRegs.resize(1);
 | 
						|
      }
 | 
						|
 | 
						|
      const unsigned NumPacked = PackedRegs.size();
 | 
						|
      for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
 | 
						|
        MachineOperand &SrcOp = MI.getOperand(ArgOffset + I);
 | 
						|
        if (!SrcOp.isReg()) {
 | 
						|
          assert(SrcOp.isImm() && SrcOp.getImm() == 0);
 | 
						|
          continue;
 | 
						|
        }
 | 
						|
 | 
						|
        assert(SrcOp.getReg() != AMDGPU::NoRegister);
 | 
						|
 | 
						|
        if (I - Intr->VAddrStart < NumPacked)
 | 
						|
          SrcOp.setReg(PackedRegs[I - Intr->VAddrStart]);
 | 
						|
        else
 | 
						|
          SrcOp.setReg(AMDGPU::NoRegister);
 | 
						|
      }
 | 
						|
    }
 | 
						|
  } else {
 | 
						|
    // If the register allocator cannot place the address registers contiguously
 | 
						|
    // without introducing moves, then using the non-sequential address encoding
 | 
						|
    // is always preferable, since it saves VALU instructions and is usually a
 | 
						|
    // wash in terms of code size or even better.
 | 
						|
    //
 | 
						|
    // However, we currently have no way of hinting to the register allocator
 | 
						|
    // that MIMG addresses should be placed contiguously when it is possible to
 | 
						|
    // do so, so force non-NSA for the common 2-address case as a heuristic.
 | 
						|
    //
 | 
						|
    // SIShrinkInstructions will convert NSA encodings to non-NSA after register
 | 
						|
    // allocation when possible.
 | 
						|
    const bool UseNSA = CorrectedNumVAddrs >= 3 && ST.hasNSAEncoding();
 | 
						|
 | 
						|
    if (!UseNSA && Intr->NumVAddrs > 1)
 | 
						|
      convertImageAddrToPacked(B, MI, ArgOffset + Intr->VAddrStart,
 | 
						|
                               Intr->NumVAddrs);
 | 
						|
  }
 | 
						|
 | 
						|
  int Flags = 0;
 | 
						|
  if (IsA16)
 | 
						|
    Flags |= 1;
 | 
						|
  if (IsG16)
 | 
						|
    Flags |= 2;
 | 
						|
  MI.addOperand(MachineOperand::CreateImm(Flags));
 | 
						|
 | 
						|
  if (BaseOpcode->Store) { // No TFE for stores?
 | 
						|
    // TODO: Handle dmask trim
 | 
						|
    Register VData = MI.getOperand(1).getReg();
 | 
						|
    LLT Ty = MRI->getType(VData);
 | 
						|
    if (!Ty.isVector() || Ty.getElementType() != S16)
 | 
						|
      return true;
 | 
						|
 | 
						|
    Register RepackedReg = handleD16VData(B, *MRI, VData, true);
 | 
						|
    if (RepackedReg != VData) {
 | 
						|
      MI.getOperand(1).setReg(RepackedReg);
 | 
						|
    }
 | 
						|
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  LLT Ty = MRI->getType(DstReg);
 | 
						|
  const LLT EltTy = Ty.getScalarType();
 | 
						|
  const bool IsD16 = Ty.getScalarType() == S16;
 | 
						|
  const int NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
 | 
						|
 | 
						|
  // Confirm that the return type is large enough for the dmask specified
 | 
						|
  if (NumElts < DMaskLanes)
 | 
						|
    return false;
 | 
						|
 | 
						|
  if (NumElts > 4 || DMaskLanes > 4)
 | 
						|
    return false;
 | 
						|
 | 
						|
  const unsigned AdjustedNumElts = DMaskLanes == 0 ? 1 : DMaskLanes;
 | 
						|
  const LLT AdjustedTy = Ty.changeNumElements(AdjustedNumElts);
 | 
						|
 | 
						|
  // The raw dword aligned data component of the load. The only legal cases
 | 
						|
  // where this matters should be when using the packed D16 format, for
 | 
						|
  // s16 -> <2 x s16>, and <3 x s16> -> <4 x s16>,
 | 
						|
  LLT RoundedTy;
 | 
						|
 | 
						|
  // S32 vector to to cover all data, plus TFE result element.
 | 
						|
  LLT TFETy;
 | 
						|
 | 
						|
  // Register type to use for each loaded component. Will be S32 or V2S16.
 | 
						|
  LLT RegTy;
 | 
						|
 | 
						|
  if (IsD16 && ST.hasUnpackedD16VMem()) {
 | 
						|
    RoundedTy = LLT::scalarOrVector(AdjustedNumElts, 32);
 | 
						|
    TFETy = LLT::vector(AdjustedNumElts + 1, 32);
 | 
						|
    RegTy = S32;
 | 
						|
  } else {
 | 
						|
    unsigned EltSize = EltTy.getSizeInBits();
 | 
						|
    unsigned RoundedElts = (AdjustedTy.getSizeInBits() + 31) / 32;
 | 
						|
    unsigned RoundedSize = 32 * RoundedElts;
 | 
						|
    RoundedTy = LLT::scalarOrVector(RoundedSize / EltSize, EltSize);
 | 
						|
    TFETy = LLT::vector(RoundedSize / 32 + 1, S32);
 | 
						|
    RegTy = !IsTFE && EltSize == 16 ? V2S16 : S32;
 | 
						|
  }
 | 
						|
 | 
						|
  // The return type does not need adjustment.
 | 
						|
  // TODO: Should we change s16 case to s32 or <2 x s16>?
 | 
						|
  if (!IsTFE && (RoundedTy == Ty || !Ty.isVector()))
 | 
						|
    return true;
 | 
						|
 | 
						|
  Register Dst1Reg;
 | 
						|
 | 
						|
  // Insert after the instruction.
 | 
						|
  B.setInsertPt(*MI.getParent(), ++MI.getIterator());
 | 
						|
 | 
						|
  // TODO: For TFE with d16, if we used a TFE type that was a multiple of <2 x
 | 
						|
  // s16> instead of s32, we would only need 1 bitcast instead of multiple.
 | 
						|
  const LLT LoadResultTy = IsTFE ? TFETy : RoundedTy;
 | 
						|
  const int ResultNumRegs = LoadResultTy.getSizeInBits() / 32;
 | 
						|
 | 
						|
  Register NewResultReg = MRI->createGenericVirtualRegister(LoadResultTy);
 | 
						|
 | 
						|
  MI.getOperand(0).setReg(NewResultReg);
 | 
						|
 | 
						|
  // In the IR, TFE is supposed to be used with a 2 element struct return
 | 
						|
  // type. The intruction really returns these two values in one contiguous
 | 
						|
  // register, with one additional dword beyond the loaded data. Rewrite the
 | 
						|
  // return type to use a single register result.
 | 
						|
 | 
						|
  if (IsTFE) {
 | 
						|
    Dst1Reg = MI.getOperand(1).getReg();
 | 
						|
    if (MRI->getType(Dst1Reg) != S32)
 | 
						|
      return false;
 | 
						|
 | 
						|
    // TODO: Make sure the TFE operand bit is set.
 | 
						|
    MI.RemoveOperand(1);
 | 
						|
 | 
						|
    // Handle the easy case that requires no repack instructions.
 | 
						|
    if (Ty == S32) {
 | 
						|
      B.buildUnmerge({DstReg, Dst1Reg}, NewResultReg);
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  // Now figure out how to copy the new result register back into the old
 | 
						|
  // result.
 | 
						|
  SmallVector<Register, 5> ResultRegs(ResultNumRegs, Dst1Reg);
 | 
						|
 | 
						|
  const int NumDataRegs = IsTFE ? ResultNumRegs - 1  : ResultNumRegs;
 | 
						|
 | 
						|
  if (ResultNumRegs == 1) {
 | 
						|
    assert(!IsTFE);
 | 
						|
    ResultRegs[0] = NewResultReg;
 | 
						|
  } else {
 | 
						|
    // We have to repack into a new vector of some kind.
 | 
						|
    for (int I = 0; I != NumDataRegs; ++I)
 | 
						|
      ResultRegs[I] = MRI->createGenericVirtualRegister(RegTy);
 | 
						|
    B.buildUnmerge(ResultRegs, NewResultReg);
 | 
						|
 | 
						|
    // Drop the final TFE element to get the data part. The TFE result is
 | 
						|
    // directly written to the right place already.
 | 
						|
    if (IsTFE)
 | 
						|
      ResultRegs.resize(NumDataRegs);
 | 
						|
  }
 | 
						|
 | 
						|
  // For an s16 scalar result, we form an s32 result with a truncate regardless
 | 
						|
  // of packed vs. unpacked.
 | 
						|
  if (IsD16 && !Ty.isVector()) {
 | 
						|
    B.buildTrunc(DstReg, ResultRegs[0]);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  // Avoid a build/concat_vector of 1 entry.
 | 
						|
  if (Ty == V2S16 && NumDataRegs == 1 && !ST.hasUnpackedD16VMem()) {
 | 
						|
    B.buildBitcast(DstReg, ResultRegs[0]);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  assert(Ty.isVector());
 | 
						|
 | 
						|
  if (IsD16) {
 | 
						|
    // For packed D16 results with TFE enabled, all the data components are
 | 
						|
    // S32. Cast back to the expected type.
 | 
						|
    //
 | 
						|
    // TODO: We don't really need to use load s32 elements. We would only need one
 | 
						|
    // cast for the TFE result if a multiple of v2s16 was used.
 | 
						|
    if (RegTy != V2S16 && !ST.hasUnpackedD16VMem()) {
 | 
						|
      for (Register &Reg : ResultRegs)
 | 
						|
        Reg = B.buildBitcast(V2S16, Reg).getReg(0);
 | 
						|
    } else if (ST.hasUnpackedD16VMem()) {
 | 
						|
      for (Register &Reg : ResultRegs)
 | 
						|
        Reg = B.buildTrunc(S16, Reg).getReg(0);
 | 
						|
    }
 | 
						|
  }
 | 
						|
 | 
						|
  auto padWithUndef = [&](LLT Ty, int NumElts) {
 | 
						|
    if (NumElts == 0)
 | 
						|
      return;
 | 
						|
    Register Undef = B.buildUndef(Ty).getReg(0);
 | 
						|
    for (int I = 0; I != NumElts; ++I)
 | 
						|
      ResultRegs.push_back(Undef);
 | 
						|
  };
 | 
						|
 | 
						|
  // Pad out any elements eliminated due to the dmask.
 | 
						|
  LLT ResTy = MRI->getType(ResultRegs[0]);
 | 
						|
  if (!ResTy.isVector()) {
 | 
						|
    padWithUndef(ResTy, NumElts - ResultRegs.size());
 | 
						|
    B.buildBuildVector(DstReg, ResultRegs);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  assert(!ST.hasUnpackedD16VMem() && ResTy == V2S16);
 | 
						|
  const int RegsToCover = (Ty.getSizeInBits() + 31) / 32;
 | 
						|
 | 
						|
  // Deal with the one annoying legal case.
 | 
						|
  const LLT V3S16 = LLT::vector(3, 16);
 | 
						|
  if (Ty == V3S16) {
 | 
						|
    padWithUndef(ResTy, RegsToCover - ResultRegs.size() + 1);
 | 
						|
    auto Concat = B.buildConcatVectors(LLT::vector(6, 16), ResultRegs);
 | 
						|
    B.buildUnmerge({DstReg, MRI->createGenericVirtualRegister(V3S16)}, Concat);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
 | 
						|
  padWithUndef(ResTy, RegsToCover - ResultRegs.size());
 | 
						|
  B.buildConcatVectors(DstReg, ResultRegs);
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeSBufferLoad(
 | 
						|
  LegalizerHelper &Helper, MachineInstr &MI) const {
 | 
						|
  MachineIRBuilder &B = Helper.MIRBuilder;
 | 
						|
  GISelChangeObserver &Observer = Helper.Observer;
 | 
						|
 | 
						|
  Register Dst = MI.getOperand(0).getReg();
 | 
						|
  LLT Ty = B.getMRI()->getType(Dst);
 | 
						|
  unsigned Size = Ty.getSizeInBits();
 | 
						|
  MachineFunction &MF = B.getMF();
 | 
						|
 | 
						|
  Observer.changingInstr(MI);
 | 
						|
 | 
						|
  if (shouldBitcastLoadStoreType(ST, Ty, Size)) {
 | 
						|
    Ty = getBitcastRegisterType(Ty);
 | 
						|
    Helper.bitcastDst(MI, Ty, 0);
 | 
						|
    Dst = MI.getOperand(0).getReg();
 | 
						|
    B.setInsertPt(B.getMBB(), MI);
 | 
						|
  }
 | 
						|
 | 
						|
  // FIXME: We don't really need this intermediate instruction. The intrinsic
 | 
						|
  // should be fixed to have a memory operand. Since it's readnone, we're not
 | 
						|
  // allowed to add one.
 | 
						|
  MI.setDesc(B.getTII().get(AMDGPU::G_AMDGPU_S_BUFFER_LOAD));
 | 
						|
  MI.RemoveOperand(1); // Remove intrinsic ID
 | 
						|
 | 
						|
  // FIXME: When intrinsic definition is fixed, this should have an MMO already.
 | 
						|
  // TODO: Should this use datalayout alignment?
 | 
						|
  const unsigned MemSize = (Size + 7) / 8;
 | 
						|
  const Align MemAlign(4);
 | 
						|
  MachineMemOperand *MMO = MF.getMachineMemOperand(
 | 
						|
      MachinePointerInfo(),
 | 
						|
      MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
 | 
						|
          MachineMemOperand::MOInvariant,
 | 
						|
      MemSize, MemAlign);
 | 
						|
  MI.addMemOperand(MF, MMO);
 | 
						|
 | 
						|
  // There are no 96-bit result scalar loads, but widening to 128-bit should
 | 
						|
  // always be legal. We may need to restore this to a 96-bit result if it turns
 | 
						|
  // out this needs to be converted to a vector load during RegBankSelect.
 | 
						|
  if (!isPowerOf2_32(Size)) {
 | 
						|
    if (Ty.isVector())
 | 
						|
      Helper.moreElementsVectorDst(MI, getPow2VectorType(Ty), 0);
 | 
						|
    else
 | 
						|
      Helper.widenScalarDst(MI, getPow2ScalarType(Ty), 0);
 | 
						|
  }
 | 
						|
 | 
						|
  Observer.changedInstr(MI);
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
// TODO: Move to selection
 | 
						|
bool AMDGPULegalizerInfo::legalizeTrapIntrinsic(MachineInstr &MI,
 | 
						|
                                                MachineRegisterInfo &MRI,
 | 
						|
                                                MachineIRBuilder &B) const {
 | 
						|
  // Is non-HSA path or trap-handler disabled? then, insert s_endpgm instruction
 | 
						|
  if (ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
 | 
						|
      !ST.isTrapHandlerEnabled()) {
 | 
						|
    B.buildInstr(AMDGPU::S_ENDPGM).addImm(0);
 | 
						|
  } else {
 | 
						|
    // Pass queue pointer to trap handler as input, and insert trap instruction
 | 
						|
    // Reference: https://llvm.org/docs/AMDGPUUsage.html#trap-handler-abi
 | 
						|
    MachineRegisterInfo &MRI = *B.getMRI();
 | 
						|
 | 
						|
    Register LiveIn =
 | 
						|
      MRI.createGenericVirtualRegister(LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
 | 
						|
    if (!loadInputValue(LiveIn, B, AMDGPUFunctionArgInfo::QUEUE_PTR))
 | 
						|
      return false;
 | 
						|
 | 
						|
    Register SGPR01(AMDGPU::SGPR0_SGPR1);
 | 
						|
    B.buildCopy(SGPR01, LiveIn);
 | 
						|
    B.buildInstr(AMDGPU::S_TRAP)
 | 
						|
        .addImm(GCNSubtarget::TrapIDLLVMTrap)
 | 
						|
        .addReg(SGPR01, RegState::Implicit);
 | 
						|
  }
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeDebugTrapIntrinsic(
 | 
						|
    MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
 | 
						|
  // Is non-HSA path or trap-handler disabled? then, report a warning
 | 
						|
  // accordingly
 | 
						|
  if (ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
 | 
						|
      !ST.isTrapHandlerEnabled()) {
 | 
						|
    DiagnosticInfoUnsupported NoTrap(B.getMF().getFunction(),
 | 
						|
                                     "debugtrap handler not supported",
 | 
						|
                                     MI.getDebugLoc(), DS_Warning);
 | 
						|
    LLVMContext &Ctx = B.getMF().getFunction().getContext();
 | 
						|
    Ctx.diagnose(NoTrap);
 | 
						|
  } else {
 | 
						|
    // Insert debug-trap instruction
 | 
						|
    B.buildInstr(AMDGPU::S_TRAP).addImm(GCNSubtarget::TrapIDLLVMDebugTrap);
 | 
						|
  }
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
 | 
						|
                                               MachineIRBuilder &B) const {
 | 
						|
  MachineRegisterInfo &MRI = *B.getMRI();
 | 
						|
  const LLT S16 = LLT::scalar(16);
 | 
						|
  const LLT S32 = LLT::scalar(32);
 | 
						|
 | 
						|
  Register DstReg = MI.getOperand(0).getReg();
 | 
						|
  Register NodePtr = MI.getOperand(2).getReg();
 | 
						|
  Register RayExtent = MI.getOperand(3).getReg();
 | 
						|
  Register RayOrigin = MI.getOperand(4).getReg();
 | 
						|
  Register RayDir = MI.getOperand(5).getReg();
 | 
						|
  Register RayInvDir = MI.getOperand(6).getReg();
 | 
						|
  Register TDescr = MI.getOperand(7).getReg();
 | 
						|
 | 
						|
  bool IsA16 = MRI.getType(RayDir).getElementType().getSizeInBits() == 16;
 | 
						|
  bool Is64 =  MRI.getType(NodePtr).getSizeInBits() == 64;
 | 
						|
  unsigned Opcode = IsA16 ? Is64 ? AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_nsa
 | 
						|
                                 : AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_nsa
 | 
						|
                          : Is64 ? AMDGPU::IMAGE_BVH64_INTERSECT_RAY_nsa
 | 
						|
                                 : AMDGPU::IMAGE_BVH_INTERSECT_RAY_nsa;
 | 
						|
 | 
						|
  SmallVector<Register, 12> Ops;
 | 
						|
  if (Is64) {
 | 
						|
    auto Unmerge = B.buildUnmerge({S32, S32}, NodePtr);
 | 
						|
    Ops.push_back(Unmerge.getReg(0));
 | 
						|
    Ops.push_back(Unmerge.getReg(1));
 | 
						|
  } else {
 | 
						|
    Ops.push_back(NodePtr);
 | 
						|
  }
 | 
						|
  Ops.push_back(RayExtent);
 | 
						|
 | 
						|
  auto packLanes = [&Ops, &S32, &B] (Register Src) {
 | 
						|
    auto Unmerge = B.buildUnmerge({S32, S32, S32, S32}, Src);
 | 
						|
    Ops.push_back(Unmerge.getReg(0));
 | 
						|
    Ops.push_back(Unmerge.getReg(1));
 | 
						|
    Ops.push_back(Unmerge.getReg(2));
 | 
						|
  };
 | 
						|
 | 
						|
  packLanes(RayOrigin);
 | 
						|
  if (IsA16) {
 | 
						|
    auto UnmergeRayDir = B.buildUnmerge({S16, S16, S16, S16}, RayDir);
 | 
						|
    auto UnmergeRayInvDir = B.buildUnmerge({S16, S16, S16, S16}, RayInvDir);
 | 
						|
    Register R1 = MRI.createGenericVirtualRegister(S32);
 | 
						|
    Register R2 = MRI.createGenericVirtualRegister(S32);
 | 
						|
    Register R3 = MRI.createGenericVirtualRegister(S32);
 | 
						|
    B.buildMerge(R1, {UnmergeRayDir.getReg(0), UnmergeRayDir.getReg(1)});
 | 
						|
    B.buildMerge(R2, {UnmergeRayDir.getReg(2), UnmergeRayInvDir.getReg(0)});
 | 
						|
    B.buildMerge(R3, {UnmergeRayInvDir.getReg(1), UnmergeRayInvDir.getReg(2)});
 | 
						|
    Ops.push_back(R1);
 | 
						|
    Ops.push_back(R2);
 | 
						|
    Ops.push_back(R3);
 | 
						|
  } else {
 | 
						|
    packLanes(RayDir);
 | 
						|
    packLanes(RayInvDir);
 | 
						|
  }
 | 
						|
 | 
						|
  auto MIB = B.buildInstr(AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY)
 | 
						|
    .addDef(DstReg)
 | 
						|
    .addImm(Opcode);
 | 
						|
 | 
						|
  for (Register R : Ops) {
 | 
						|
    MIB.addUse(R);
 | 
						|
  }
 | 
						|
 | 
						|
  MIB.addUse(TDescr)
 | 
						|
     .addImm(IsA16 ? 1 : 0)
 | 
						|
     .cloneMemRefs(MI);
 | 
						|
 | 
						|
  MI.eraseFromParent();
 | 
						|
  return true;
 | 
						|
}
 | 
						|
 | 
						|
bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
 | 
						|
                                            MachineInstr &MI) const {
 | 
						|
  MachineIRBuilder &B = Helper.MIRBuilder;
 | 
						|
  MachineRegisterInfo &MRI = *B.getMRI();
 | 
						|
 | 
						|
  // Replace the use G_BRCOND with the exec manipulate and branch pseudos.
 | 
						|
  auto IntrID = MI.getIntrinsicID();
 | 
						|
  switch (IntrID) {
 | 
						|
  case Intrinsic::amdgcn_if:
 | 
						|
  case Intrinsic::amdgcn_else: {
 | 
						|
    MachineInstr *Br = nullptr;
 | 
						|
    MachineBasicBlock *UncondBrTarget = nullptr;
 | 
						|
    bool Negated = false;
 | 
						|
    if (MachineInstr *BrCond =
 | 
						|
            verifyCFIntrinsic(MI, MRI, Br, UncondBrTarget, Negated)) {
 | 
						|
      const SIRegisterInfo *TRI
 | 
						|
        = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
 | 
						|
 | 
						|
      Register Def = MI.getOperand(1).getReg();
 | 
						|
      Register Use = MI.getOperand(3).getReg();
 | 
						|
 | 
						|
      MachineBasicBlock *CondBrTarget = BrCond->getOperand(1).getMBB();
 | 
						|
 | 
						|
      if (Negated)
 | 
						|
        std::swap(CondBrTarget, UncondBrTarget);
 | 
						|
 | 
						|
      B.setInsertPt(B.getMBB(), BrCond->getIterator());
 | 
						|
      if (IntrID == Intrinsic::amdgcn_if) {
 | 
						|
        B.buildInstr(AMDGPU::SI_IF)
 | 
						|
          .addDef(Def)
 | 
						|
          .addUse(Use)
 | 
						|
          .addMBB(UncondBrTarget);
 | 
						|
      } else {
 | 
						|
        B.buildInstr(AMDGPU::SI_ELSE)
 | 
						|
            .addDef(Def)
 | 
						|
            .addUse(Use)
 | 
						|
            .addMBB(UncondBrTarget);
 | 
						|
      }
 | 
						|
 | 
						|
      if (Br) {
 | 
						|
        Br->getOperand(0).setMBB(CondBrTarget);
 | 
						|
      } else {
 | 
						|
        // The IRTranslator skips inserting the G_BR for fallthrough cases, but
 | 
						|
        // since we're swapping branch targets it needs to be reinserted.
 | 
						|
        // FIXME: IRTranslator should probably not do this
 | 
						|
        B.buildBr(*CondBrTarget);
 | 
						|
      }
 | 
						|
 | 
						|
      MRI.setRegClass(Def, TRI->getWaveMaskRegClass());
 | 
						|
      MRI.setRegClass(Use, TRI->getWaveMaskRegClass());
 | 
						|
      MI.eraseFromParent();
 | 
						|
      BrCond->eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    return false;
 | 
						|
  }
 | 
						|
  case Intrinsic::amdgcn_loop: {
 | 
						|
    MachineInstr *Br = nullptr;
 | 
						|
    MachineBasicBlock *UncondBrTarget = nullptr;
 | 
						|
    bool Negated = false;
 | 
						|
    if (MachineInstr *BrCond =
 | 
						|
            verifyCFIntrinsic(MI, MRI, Br, UncondBrTarget, Negated)) {
 | 
						|
      const SIRegisterInfo *TRI
 | 
						|
        = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
 | 
						|
 | 
						|
      MachineBasicBlock *CondBrTarget = BrCond->getOperand(1).getMBB();
 | 
						|
      Register Reg = MI.getOperand(2).getReg();
 | 
						|
 | 
						|
      if (Negated)
 | 
						|
        std::swap(CondBrTarget, UncondBrTarget);
 | 
						|
 | 
						|
      B.setInsertPt(B.getMBB(), BrCond->getIterator());
 | 
						|
      B.buildInstr(AMDGPU::SI_LOOP)
 | 
						|
        .addUse(Reg)
 | 
						|
        .addMBB(UncondBrTarget);
 | 
						|
 | 
						|
      if (Br)
 | 
						|
        Br->getOperand(0).setMBB(CondBrTarget);
 | 
						|
      else
 | 
						|
        B.buildBr(*CondBrTarget);
 | 
						|
 | 
						|
      MI.eraseFromParent();
 | 
						|
      BrCond->eraseFromParent();
 | 
						|
      MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    return false;
 | 
						|
  }
 | 
						|
  case Intrinsic::amdgcn_kernarg_segment_ptr:
 | 
						|
    if (!AMDGPU::isKernel(B.getMF().getFunction().getCallingConv())) {
 | 
						|
      // This only makes sense to call in a kernel, so just lower to null.
 | 
						|
      B.buildConstant(MI.getOperand(0).getReg(), 0);
 | 
						|
      MI.eraseFromParent();
 | 
						|
      return true;
 | 
						|
    }
 | 
						|
 | 
						|
    return legalizePreloadedArgIntrin(
 | 
						|
      MI, MRI, B, AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
 | 
						|
  case Intrinsic::amdgcn_implicitarg_ptr:
 | 
						|
    return legalizeImplicitArgPtr(MI, MRI, B);
 | 
						|
  case Intrinsic::amdgcn_workitem_id_x:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::WORKITEM_ID_X);
 | 
						|
  case Intrinsic::amdgcn_workitem_id_y:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
 | 
						|
  case Intrinsic::amdgcn_workitem_id_z:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
 | 
						|
  case Intrinsic::amdgcn_workgroup_id_x:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
 | 
						|
  case Intrinsic::amdgcn_workgroup_id_y:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
 | 
						|
  case Intrinsic::amdgcn_workgroup_id_z:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
 | 
						|
  case Intrinsic::amdgcn_dispatch_ptr:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::DISPATCH_PTR);
 | 
						|
  case Intrinsic::amdgcn_queue_ptr:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::QUEUE_PTR);
 | 
						|
  case Intrinsic::amdgcn_implicit_buffer_ptr:
 | 
						|
    return legalizePreloadedArgIntrin(
 | 
						|
      MI, MRI, B, AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
 | 
						|
  case Intrinsic::amdgcn_dispatch_id:
 | 
						|
    return legalizePreloadedArgIntrin(MI, MRI, B,
 | 
						|
                                      AMDGPUFunctionArgInfo::DISPATCH_ID);
 | 
						|
  case Intrinsic::amdgcn_fdiv_fast:
 | 
						|
    return legalizeFDIVFastIntrin(MI, MRI, B);
 | 
						|
  case Intrinsic::amdgcn_is_shared:
 | 
						|
    return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::LOCAL_ADDRESS);
 | 
						|
  case Intrinsic::amdgcn_is_private:
 | 
						|
    return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::PRIVATE_ADDRESS);
 | 
						|
  case Intrinsic::amdgcn_wavefrontsize: {
 | 
						|
    B.buildConstant(MI.getOperand(0), ST.getWavefrontSize());
 | 
						|
    MI.eraseFromParent();
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
  case Intrinsic::amdgcn_s_buffer_load:
 | 
						|
    return legalizeSBufferLoad(Helper, MI);
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_store:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_store:
 | 
						|
    return legalizeBufferStore(MI, MRI, B, false, false);
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_store_format:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_store_format:
 | 
						|
    return legalizeBufferStore(MI, MRI, B, false, true);
 | 
						|
  case Intrinsic::amdgcn_raw_tbuffer_store:
 | 
						|
  case Intrinsic::amdgcn_struct_tbuffer_store:
 | 
						|
    return legalizeBufferStore(MI, MRI, B, true, true);
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_load:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_load:
 | 
						|
    return legalizeBufferLoad(MI, MRI, B, false, false);
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_load_format:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_load_format:
 | 
						|
    return legalizeBufferLoad(MI, MRI, B, true, false);
 | 
						|
  case Intrinsic::amdgcn_raw_tbuffer_load:
 | 
						|
  case Intrinsic::amdgcn_struct_tbuffer_load:
 | 
						|
    return legalizeBufferLoad(MI, MRI, B, true, true);
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_swap:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_swap:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_add:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_add:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_sub:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_sub:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_smin:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_smin:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_umin:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_umin:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_smax:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_smax:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_umax:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_umax:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_and:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_and:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_or:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_or:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_xor:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_xor:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_inc:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_inc:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_dec:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_dec:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_fadd:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_fadd:
 | 
						|
  case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
 | 
						|
  case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap:
 | 
						|
    return legalizeBufferAtomic(MI, B, IntrID);
 | 
						|
  case Intrinsic::amdgcn_atomic_inc:
 | 
						|
    return legalizeAtomicIncDec(MI, B, true);
 | 
						|
  case Intrinsic::amdgcn_atomic_dec:
 | 
						|
    return legalizeAtomicIncDec(MI, B, false);
 | 
						|
  case Intrinsic::trap:
 | 
						|
    return legalizeTrapIntrinsic(MI, MRI, B);
 | 
						|
  case Intrinsic::debugtrap:
 | 
						|
    return legalizeDebugTrapIntrinsic(MI, MRI, B);
 | 
						|
  case Intrinsic::amdgcn_rsq_clamp:
 | 
						|
    return legalizeRsqClampIntrinsic(MI, MRI, B);
 | 
						|
  case Intrinsic::amdgcn_ds_fadd:
 | 
						|
  case Intrinsic::amdgcn_ds_fmin:
 | 
						|
  case Intrinsic::amdgcn_ds_fmax:
 | 
						|
    return legalizeDSAtomicFPIntrinsic(Helper, MI, IntrID);
 | 
						|
  case Intrinsic::amdgcn_image_bvh_intersect_ray:
 | 
						|
    return legalizeBVHIntrinsic(MI, B);
 | 
						|
  default: {
 | 
						|
    if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
 | 
						|
            AMDGPU::getImageDimIntrinsicInfo(IntrID))
 | 
						|
      return legalizeImageIntrinsic(MI, B, Helper.Observer, ImageDimIntr);
 | 
						|
    return true;
 | 
						|
  }
 | 
						|
  }
 | 
						|
 | 
						|
  return true;
 | 
						|
}
 |