forked from OSchip/llvm-project
Roughly cleaned RVV instruction selection.
This commit is contained in:
parent
35633e31e3
commit
521e83631d
|
@ -157,8 +157,10 @@ IsaVersion getIsaVersion(StringRef GPU);
|
||||||
|
|
||||||
namespace RISCV {
|
namespace RISCV {
|
||||||
|
|
||||||
|
// ARIES: FIXME: Change this to 32 to support zve32* which is needed by
|
||||||
|
// ventus-gpgpu.
|
||||||
// We use 64 bits as the known part in the scalable vector types.
|
// We use 64 bits as the known part in the scalable vector types.
|
||||||
static constexpr unsigned RVVBitsPerBlock = 64;
|
static constexpr unsigned RVVBitsPerBlock = 32;
|
||||||
|
|
||||||
enum CPUKind : unsigned {
|
enum CPUKind : unsigned {
|
||||||
#define PROC(ENUM, NAME, FEATURES, DEFAULT_MARCH) CK_##ENUM,
|
#define PROC(ENUM, NAME, FEATURES, DEFAULT_MARCH) CK_##ENUM,
|
||||||
|
|
|
@ -211,49 +211,6 @@ static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
|
||||||
return SDValue(nullptr, 0);
|
return SDValue(nullptr, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
|
|
||||||
SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
|
|
||||||
bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
|
|
||||||
bool IsLoad, MVT *IndexVT) {
|
|
||||||
SDValue Chain = Node->getOperand(0);
|
|
||||||
SDValue Glue;
|
|
||||||
|
|
||||||
Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
|
|
||||||
|
|
||||||
if (IsStridedOrIndexed) {
|
|
||||||
Operands.push_back(Node->getOperand(CurOp++)); // Index.
|
|
||||||
if (IndexVT)
|
|
||||||
*IndexVT = Operands.back()->getSimpleValueType(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IsMasked) {
|
|
||||||
// Mask needs to be copied to V0.
|
|
||||||
SDValue Mask = Node->getOperand(CurOp++);
|
|
||||||
Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
|
|
||||||
Glue = Chain.getValue(1);
|
|
||||||
Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
|
|
||||||
}
|
|
||||||
SDValue VL;
|
|
||||||
selectVLOp(Node->getOperand(CurOp++), VL);
|
|
||||||
Operands.push_back(VL);
|
|
||||||
|
|
||||||
MVT XLenVT = Subtarget->getXLenVT();
|
|
||||||
SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
|
|
||||||
Operands.push_back(SEWOp);
|
|
||||||
|
|
||||||
// Masked load has the tail policy argument.
|
|
||||||
if (IsMasked && IsLoad) {
|
|
||||||
// Policy must be a constant.
|
|
||||||
uint64_t Policy = Node->getConstantOperandVal(CurOp++);
|
|
||||||
SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
|
|
||||||
Operands.push_back(PolicyOp);
|
|
||||||
}
|
|
||||||
|
|
||||||
Operands.push_back(Chain); // Chain.
|
|
||||||
if (Glue)
|
|
||||||
Operands.push_back(Glue);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool isAllUndef(ArrayRef<SDValue> Values) {
|
static bool isAllUndef(ArrayRef<SDValue> Values) {
|
||||||
return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
|
return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
|
||||||
}
|
}
|
||||||
|
@ -766,388 +723,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
||||||
ReplaceNode(Node, MULHU);
|
ReplaceNode(Node, MULHU);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case ISD::INTRINSIC_WO_CHAIN: {
|
|
||||||
unsigned IntNo = Node->getConstantOperandVal(0);
|
|
||||||
switch (IntNo) {
|
|
||||||
// By default we do not custom select any intrinsic.
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
case Intrinsic::riscv_vmsgeu:
|
|
||||||
case Intrinsic::riscv_vmsge: {
|
|
||||||
assert(0 && "TODO");
|
|
||||||
SDValue Src1 = Node->getOperand(1);
|
|
||||||
SDValue Src2 = Node->getOperand(2);
|
|
||||||
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
|
|
||||||
bool IsCmpUnsignedZero = false;
|
|
||||||
// Only custom select scalar second operand.
|
|
||||||
if (Src2.getValueType() != XLenVT)
|
|
||||||
break;
|
|
||||||
// Small constants are handled with patterns.
|
|
||||||
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
|
|
||||||
int64_t CVal = C->getSExtValue();
|
|
||||||
if (CVal >= -15 && CVal <= 16) {
|
|
||||||
if (!IsUnsigned || CVal != 0)
|
|
||||||
break;
|
|
||||||
IsCmpUnsignedZero = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MVT Src1VT = Src1.getSimpleValueType();
|
|
||||||
unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
|
|
||||||
SDValue SEW = CurDAG->getTargetConstant(
|
|
||||||
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
|
|
||||||
SDValue VL;
|
|
||||||
selectVLOp(Node->getOperand(3), VL);
|
|
||||||
|
|
||||||
// If vmsgeu with 0 immediate, expand it to vmset.
|
|
||||||
if (IsCmpUnsignedZero) {
|
|
||||||
ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expand to
|
|
||||||
// vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
|
|
||||||
SDValue Cmp = SDValue(
|
|
||||||
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
|
|
||||||
0);
|
|
||||||
ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
|
|
||||||
{Cmp, Cmp, VL, SEW}));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
case Intrinsic::riscv_vmsgeu_mask:
|
|
||||||
case Intrinsic::riscv_vmsge_mask: {
|
|
||||||
assert(0 && "TODO");
|
|
||||||
SDValue Src1 = Node->getOperand(2);
|
|
||||||
SDValue Src2 = Node->getOperand(3);
|
|
||||||
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
|
|
||||||
bool IsCmpUnsignedZero = false;
|
|
||||||
// Only custom select scalar second operand.
|
|
||||||
if (Src2.getValueType() != XLenVT)
|
|
||||||
break;
|
|
||||||
// Small constants are handled with patterns.
|
|
||||||
if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
|
|
||||||
int64_t CVal = C->getSExtValue();
|
|
||||||
if (CVal >= -15 && CVal <= 16) {
|
|
||||||
if (!IsUnsigned || CVal != 0)
|
|
||||||
break;
|
|
||||||
IsCmpUnsignedZero = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MVT Src1VT = Src1.getSimpleValueType();
|
|
||||||
unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
|
|
||||||
VMOROpcode;
|
|
||||||
|
|
||||||
SDValue SEW = CurDAG->getTargetConstant(
|
|
||||||
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
|
|
||||||
SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
|
|
||||||
SDValue VL;
|
|
||||||
selectVLOp(Node->getOperand(5), VL);
|
|
||||||
SDValue MaskedOff = Node->getOperand(1);
|
|
||||||
SDValue Mask = Node->getOperand(4);
|
|
||||||
|
|
||||||
// If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
|
|
||||||
if (IsCmpUnsignedZero) {
|
|
||||||
// We don't need vmor if the MaskedOff and the Mask are the same
|
|
||||||
// value.
|
|
||||||
if (Mask == MaskedOff) {
|
|
||||||
ReplaceUses(Node, Mask.getNode());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ReplaceNode(Node,
|
|
||||||
CurDAG->getMachineNode(VMOROpcode, DL, VT,
|
|
||||||
{Mask, MaskedOff, VL, MaskSEW}));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the MaskedOff value and the Mask are the same value use
|
|
||||||
// vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
|
|
||||||
// This avoids needing to copy v0 to vd before starting the next sequence.
|
|
||||||
if (Mask == MaskedOff) {
|
|
||||||
SDValue Cmp = SDValue(
|
|
||||||
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
|
|
||||||
0);
|
|
||||||
ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
|
|
||||||
{Mask, Cmp, VL, MaskSEW}));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mask needs to be copied to V0.
|
|
||||||
SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
|
|
||||||
RISCV::V0, Mask, SDValue());
|
|
||||||
SDValue Glue = Chain.getValue(1);
|
|
||||||
SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
|
|
||||||
|
|
||||||
// Otherwise use
|
|
||||||
// vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
|
|
||||||
// The result is mask undisturbed.
|
|
||||||
// We use the same instructions to emulate mask agnostic behavior, because
|
|
||||||
// the agnostic result can be either undisturbed or all 1.
|
|
||||||
SDValue Cmp = SDValue(
|
|
||||||
CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
|
|
||||||
{MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
|
|
||||||
0);
|
|
||||||
// vmxor.mm vd, vd, v0 is used to update active value.
|
|
||||||
ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
|
|
||||||
{Cmp, Mask, VL, MaskSEW}));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case ISD::INTRINSIC_W_CHAIN: {
|
|
||||||
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
|
|
||||||
switch (IntNo) {
|
|
||||||
// By default we do not custom select any intrinsic.
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
case Intrinsic::riscv_vlm:
|
|
||||||
case Intrinsic::riscv_vle:
|
|
||||||
case Intrinsic::riscv_vle_mask:
|
|
||||||
case Intrinsic::riscv_vlse:
|
|
||||||
case Intrinsic::riscv_vlse_mask: {
|
|
||||||
bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
|
|
||||||
IntNo == Intrinsic::riscv_vlse_mask;
|
|
||||||
bool IsStrided =
|
|
||||||
IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
|
|
||||||
|
|
||||||
MVT VT = Node->getSimpleValueType(0);
|
|
||||||
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
|
|
||||||
|
|
||||||
unsigned CurOp = 2;
|
|
||||||
// The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
|
|
||||||
bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
|
|
||||||
// Masked intrinsic only have TU version pseduo instructions.
|
|
||||||
bool IsTU = HasPassthruOperand &&
|
|
||||||
(IsMasked || !Node->getOperand(CurOp).isUndef());
|
|
||||||
SmallVector<SDValue, 8> Operands;
|
|
||||||
if (IsTU)
|
|
||||||
Operands.push_back(Node->getOperand(CurOp++));
|
|
||||||
else if (HasPassthruOperand)
|
|
||||||
// Skip the undef passthru operand for nomask TA version pseudo
|
|
||||||
CurOp++;
|
|
||||||
|
|
||||||
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
|
|
||||||
Operands, /*IsLoad=*/true);
|
|
||||||
|
|
||||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
|
||||||
assert(0 && "TODO: cut!");
|
|
||||||
/*
|
|
||||||
const RISCV::VLEPseudo *P =
|
|
||||||
RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, false, Log2SEW,
|
|
||||||
static_cast<unsigned>(LMUL));
|
|
||||||
MachineSDNode *Load =
|
|
||||||
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
|
|
||||||
|
|
||||||
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
|
|
||||||
CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
|
|
||||||
|
|
||||||
ReplaceNode(Node, Load);
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case ISD::INTRINSIC_VOID: {
|
|
||||||
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
|
|
||||||
switch (IntNo) {
|
|
||||||
case Intrinsic::riscv_vsm:
|
|
||||||
case Intrinsic::riscv_vse: {
|
|
||||||
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
|
|
||||||
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
|
|
||||||
|
|
||||||
unsigned CurOp = 2;
|
|
||||||
SmallVector<SDValue, 8> Operands;
|
|
||||||
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
|
|
||||||
|
|
||||||
addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, 0, 0,
|
|
||||||
Operands);
|
|
||||||
|
|
||||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
|
||||||
assert(0 && "TODO: Gen vALU load/store inst.");
|
|
||||||
/*
|
|
||||||
const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
|
|
||||||
0, 0, Log2SEW, static_cast<unsigned>(LMUL));
|
|
||||||
MachineSDNode *Store =
|
|
||||||
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
|
|
||||||
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
|
|
||||||
CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
|
|
||||||
|
|
||||||
ReplaceNode(Node, Store);
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case ISD::BITCAST: {
|
|
||||||
MVT SrcVT = Node->getOperand(0).getSimpleValueType();
|
|
||||||
// Just drop bitcasts between vectors if both are fixed or both are
|
|
||||||
// scalable.
|
|
||||||
if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
|
|
||||||
(VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
|
|
||||||
ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
|
|
||||||
CurDAG->RemoveDeadNode(Node);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case ISD::INSERT_SUBVECTOR: {
|
|
||||||
SDValue V = Node->getOperand(0);
|
|
||||||
SDValue SubV = Node->getOperand(1);
|
|
||||||
SDLoc DL(SubV);
|
|
||||||
auto Idx = Node->getConstantOperandVal(2);
|
|
||||||
MVT SubVecVT = SubV.getSimpleValueType();
|
|
||||||
|
|
||||||
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
|
|
||||||
MVT SubVecContainerVT = SubVecVT;
|
|
||||||
// Establish the correct scalable-vector types for any fixed-length type.
|
|
||||||
if (SubVecVT.isFixedLengthVector())
|
|
||||||
SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
|
|
||||||
if (VT.isFixedLengthVector())
|
|
||||||
VT = TLI.getContainerForFixedLengthVector(VT);
|
|
||||||
|
|
||||||
const auto *TRI = Subtarget->getRegisterInfo();
|
|
||||||
unsigned SubRegIdx;
|
|
||||||
std::tie(SubRegIdx, Idx) =
|
|
||||||
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
|
|
||||||
VT, SubVecContainerVT, Idx, TRI);
|
|
||||||
|
|
||||||
// If the Idx hasn't been completely eliminated then this is a subvector
|
|
||||||
// insert which doesn't naturally align to a vector register. These must
|
|
||||||
// be handled using instructions to manipulate the vector registers.
|
|
||||||
if (Idx != 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
|
|
||||||
bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
|
|
||||||
SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
|
|
||||||
SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
|
|
||||||
(void)IsSubVecPartReg; // Silence unused variable warning without asserts.
|
|
||||||
assert((!IsSubVecPartReg || V.isUndef()) &&
|
|
||||||
"Expecting lowering to have created legal INSERT_SUBVECTORs when "
|
|
||||||
"the subvector is smaller than a full-sized register");
|
|
||||||
|
|
||||||
// If we haven't set a SubRegIdx, then we must be going between
|
|
||||||
// equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
|
|
||||||
if (SubRegIdx == RISCV::NoSubRegister) {
|
|
||||||
unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
|
|
||||||
assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
|
|
||||||
InRegClassID &&
|
|
||||||
"Unexpected subvector extraction");
|
|
||||||
SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
|
|
||||||
SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
|
|
||||||
DL, VT, SubV, RC);
|
|
||||||
ReplaceNode(Node, NewNode);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
|
|
||||||
ReplaceNode(Node, Insert.getNode());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
case ISD::EXTRACT_SUBVECTOR: {
|
|
||||||
SDValue V = Node->getOperand(0);
|
|
||||||
auto Idx = Node->getConstantOperandVal(1);
|
|
||||||
MVT InVT = V.getSimpleValueType();
|
|
||||||
SDLoc DL(V);
|
|
||||||
|
|
||||||
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
|
|
||||||
MVT SubVecContainerVT = VT;
|
|
||||||
// Establish the correct scalable-vector types for any fixed-length type.
|
|
||||||
if (VT.isFixedLengthVector())
|
|
||||||
SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
|
|
||||||
if (InVT.isFixedLengthVector())
|
|
||||||
InVT = TLI.getContainerForFixedLengthVector(InVT);
|
|
||||||
|
|
||||||
const auto *TRI = Subtarget->getRegisterInfo();
|
|
||||||
unsigned SubRegIdx;
|
|
||||||
std::tie(SubRegIdx, Idx) =
|
|
||||||
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
|
|
||||||
InVT, SubVecContainerVT, Idx, TRI);
|
|
||||||
|
|
||||||
// If the Idx hasn't been completely eliminated then this is a subvector
|
|
||||||
// extract which doesn't naturally align to a vector register. These must
|
|
||||||
// be handled using instructions to manipulate the vector registers.
|
|
||||||
if (Idx != 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
// If we haven't set a SubRegIdx, then we must be going between
|
|
||||||
// equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
|
|
||||||
if (SubRegIdx == RISCV::NoSubRegister) {
|
|
||||||
unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
|
|
||||||
assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
|
|
||||||
InRegClassID &&
|
|
||||||
"Unexpected subvector extraction");
|
|
||||||
SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
|
|
||||||
SDNode *NewNode =
|
|
||||||
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
|
|
||||||
ReplaceNode(Node, NewNode);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
|
|
||||||
ReplaceNode(Node, Extract.getNode());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
case RISCVISD::VMV_S_X_VL:
|
|
||||||
case RISCVISD::VFMV_S_F_VL:
|
|
||||||
case RISCVISD::VMV_V_X_VL:
|
|
||||||
case RISCVISD::VFMV_V_F_VL: {
|
|
||||||
// Only if we have optimized zero-stride vector load.
|
|
||||||
if (!Subtarget->hasOptimizedZeroStrideLoad())
|
|
||||||
break;
|
|
||||||
|
|
||||||
// Try to match splat of a scalar load to a strided load with stride of x0.
|
|
||||||
bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
|
|
||||||
Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
|
|
||||||
if (!Node->getOperand(0).isUndef())
|
|
||||||
break;
|
|
||||||
SDValue Src = Node->getOperand(1);
|
|
||||||
auto *Ld = dyn_cast<LoadSDNode>(Src);
|
|
||||||
if (!Ld)
|
|
||||||
break;
|
|
||||||
EVT MemVT = Ld->getMemoryVT();
|
|
||||||
// The memory VT should be the same size as the element type.
|
|
||||||
if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
|
|
||||||
break;
|
|
||||||
if (!IsProfitableToFold(Src, Node, Node) ||
|
|
||||||
!IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
|
|
||||||
break;
|
|
||||||
|
|
||||||
SDValue VL;
|
|
||||||
if (IsScalarMove) {
|
|
||||||
// We could deal with more VL if we update the VSETVLI insert pass to
|
|
||||||
// avoid introducing more VSETVLI.
|
|
||||||
if (!isOneConstant(Node->getOperand(2)))
|
|
||||||
break;
|
|
||||||
selectVLOp(Node->getOperand(2), VL);
|
|
||||||
} else
|
|
||||||
selectVLOp(Node->getOperand(2), VL);
|
|
||||||
|
|
||||||
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
|
|
||||||
SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
|
|
||||||
|
|
||||||
SDValue Operands[] = {Ld->getBasePtr(),
|
|
||||||
CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
|
|
||||||
Ld->getChain()};
|
|
||||||
|
|
||||||
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
|
|
||||||
assert(0 && "TODO: cut!");
|
|
||||||
/*
|
|
||||||
const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
|
|
||||||
false, false, true, false, Log2SEW, static_cast<unsigned>(LMUL));
|
|
||||||
MachineSDNode *Load =
|
|
||||||
CurDAG->getMachineNode(P->Pseudo, DL, {VT, MVT::Other}, Operands);
|
|
||||||
// Update the chain.
|
|
||||||
ReplaceUses(Src.getValue(1), SDValue(Load, 1));
|
|
||||||
// Record the mem-refs
|
|
||||||
CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
|
|
||||||
// Replace the splat with the vlse.
|
|
||||||
ReplaceNode(Node, Load);
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Select the default instruction.
|
// Select the default instruction.
|
||||||
SelectCode(Node);
|
SelectCode(Node);
|
||||||
|
@ -1710,126 +1285,6 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Select VL as a 5 bit immediate or a value that will become a register. This
|
|
||||||
// allows us to choose betwen VSETIVLI or VSETVLI later.
|
|
||||||
bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
|
|
||||||
auto *C = dyn_cast<ConstantSDNode>(N);
|
|
||||||
if (C && isUInt<5>(C->getZExtValue())) {
|
|
||||||
VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
|
|
||||||
N->getValueType(0));
|
|
||||||
} else if (C && C->isAllOnesValue()) {
|
|
||||||
// Treat all ones as VLMax.
|
|
||||||
VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
|
|
||||||
N->getValueType(0));
|
|
||||||
} else if (isa<RegisterSDNode>(N) &&
|
|
||||||
cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
|
|
||||||
// All our VL operands use an operand that allows GPRNoX0 or an immediate
|
|
||||||
// as the register class. Convert X0 to a special immediate to pass the
|
|
||||||
// MachineVerifier. This is recognized specially by the vsetvli insertion
|
|
||||||
// pass.
|
|
||||||
VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
|
|
||||||
N->getValueType(0));
|
|
||||||
} else {
|
|
||||||
VL = N;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
|
|
||||||
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
|
|
||||||
return false;
|
|
||||||
assert(N.getNumOperands() == 3 && "Unexpected number of operands");
|
|
||||||
SplatVal = N.getOperand(1);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
using ValidateFn = bool (*)(int64_t);
|
|
||||||
|
|
||||||
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
|
|
||||||
SelectionDAG &DAG,
|
|
||||||
const RISCVSubtarget &Subtarget,
|
|
||||||
ValidateFn ValidateImm) {
|
|
||||||
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
|
|
||||||
!isa<ConstantSDNode>(N.getOperand(1)))
|
|
||||||
return false;
|
|
||||||
assert(N.getNumOperands() == 3 && "Unexpected number of operands");
|
|
||||||
|
|
||||||
int64_t SplatImm =
|
|
||||||
cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
|
|
||||||
|
|
||||||
// The semantics of RISCVISD::VMV_V_X_VL is that when the operand
|
|
||||||
// type is wider than the resulting vector element type: an implicit
|
|
||||||
// truncation first takes place. Therefore, perform a manual
|
|
||||||
// truncation/sign-extension in order to ignore any truncated bits and catch
|
|
||||||
// any zero-extended immediate.
|
|
||||||
// For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
|
|
||||||
// sign-extending to (XLenVT -1).
|
|
||||||
MVT XLenVT = Subtarget.getXLenVT();
|
|
||||||
assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
|
|
||||||
"Unexpected splat operand type");
|
|
||||||
MVT EltVT = N.getSimpleValueType().getVectorElementType();
|
|
||||||
if (EltVT.bitsLT(XLenVT))
|
|
||||||
SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
|
|
||||||
|
|
||||||
if (!ValidateImm(SplatImm))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
|
|
||||||
return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
|
|
||||||
[](int64_t Imm) { return isInt<5>(Imm); });
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
|
|
||||||
return selectVSplatSimmHelper(
|
|
||||||
N, SplatVal, *CurDAG, *Subtarget,
|
|
||||||
[](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
|
|
||||||
SDValue &SplatVal) {
|
|
||||||
return selectVSplatSimmHelper(
|
|
||||||
N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
|
|
||||||
return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
|
|
||||||
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
|
|
||||||
!isa<ConstantSDNode>(N.getOperand(1)))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
int64_t SplatImm =
|
|
||||||
cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
|
|
||||||
|
|
||||||
if (!isUInt<5>(SplatImm))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
SplatVal =
|
|
||||||
CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
|
|
||||||
SDValue &Imm) {
|
|
||||||
if (auto *C = dyn_cast<ConstantSDNode>(N)) {
|
|
||||||
int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
|
|
||||||
|
|
||||||
if (!isInt<5>(ImmVal))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to remove sext.w if the input is a W instruction or can be made into
|
// Try to remove sext.w if the input is a W instruction or can be made into
|
||||||
// a W instruction cheaply.
|
// a W instruction cheaply.
|
||||||
bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
|
bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
|
||||||
|
@ -1892,307 +1347,6 @@ bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return true if we can make sure mask of N is all-ones mask.
|
|
||||||
static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx) {
|
|
||||||
// Check that we're using V0 as a mask register.
|
|
||||||
if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
|
|
||||||
cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// The glued user defines V0.
|
|
||||||
const auto *Glued = N->getGluedNode();
|
|
||||||
|
|
||||||
if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// Check that we're defining V0 as a mask register.
|
|
||||||
if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
|
|
||||||
cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// Check the instruction defining V0; it needs to be a VMSET pseudo.
|
|
||||||
SDValue MaskSetter = Glued->getOperand(2);
|
|
||||||
|
|
||||||
const auto IsVMSet = [](unsigned Opc) {
|
|
||||||
return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
|
|
||||||
Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
|
|
||||||
Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
|
|
||||||
Opc == RISCV::PseudoVMSET_M_B8;
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: Check that the VMSET is the expected bitwidth? The pseudo has
|
|
||||||
// undefined behaviour if it's the wrong bitwidth, so we could choose to
|
|
||||||
// assume that it's all-ones? Same applies to its VL.
|
|
||||||
return MaskSetter->isMachineOpcode() &&
|
|
||||||
IsVMSet(MaskSetter.getMachineOpcode());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Optimize masked RVV pseudo instructions with a known all-ones mask to their
|
|
||||||
// corresponding "unmasked" pseudo versions. The mask we're interested in will
|
|
||||||
// take the form of a V0 physical register operand, with a glued
|
|
||||||
// register-setting instruction.
|
|
||||||
bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
|
|
||||||
const RISCV::RISCVMaskedPseudoInfo *I =
|
|
||||||
RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
|
|
||||||
if (!I)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
unsigned MaskOpIdx = I->MaskOpIdx;
|
|
||||||
|
|
||||||
if (!usesAllOnesMask(N, MaskOpIdx))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// Retrieve the tail policy operand index, if any.
|
|
||||||
std::optional<unsigned> TailPolicyOpIdx;
|
|
||||||
const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
|
|
||||||
const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
|
|
||||||
|
|
||||||
bool IsTA = true;
|
|
||||||
if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
|
|
||||||
TailPolicyOpIdx = getVecPolicyOpIdx(N, MaskedMCID);
|
|
||||||
if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
|
|
||||||
RISCVII::TAIL_AGNOSTIC)) {
|
|
||||||
// Keep the true-masked instruction when there is no unmasked TU
|
|
||||||
// instruction
|
|
||||||
if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
|
|
||||||
return false;
|
|
||||||
// We can't use TA if the tie-operand is not IMPLICIT_DEF
|
|
||||||
if (!N->getOperand(0).isUndef())
|
|
||||||
IsTA = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
|
|
||||||
|
|
||||||
// Check that we're dropping the mask operand and any policy operand
|
|
||||||
// when we transform to this unmasked pseudo. Additionally, if this insturtion
|
|
||||||
// is tail agnostic, the unmasked instruction should not have a merge op.
|
|
||||||
uint64_t TSFlags = TII.get(Opc).TSFlags;
|
|
||||||
assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
|
|
||||||
RISCVII::hasDummyMaskOp(TSFlags) &&
|
|
||||||
!RISCVII::hasVecPolicyOp(TSFlags) &&
|
|
||||||
"Unexpected pseudo to transform to");
|
|
||||||
(void)TSFlags;
|
|
||||||
|
|
||||||
SmallVector<SDValue, 8> Ops;
|
|
||||||
// Skip the merge operand at index 0 if IsTA
|
|
||||||
for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
|
|
||||||
// Skip the mask, the policy, and the Glue.
|
|
||||||
SDValue Op = N->getOperand(I);
|
|
||||||
if (I == MaskOpIdx || I == TailPolicyOpIdx ||
|
|
||||||
Op.getValueType() == MVT::Glue)
|
|
||||||
continue;
|
|
||||||
Ops.push_back(Op);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transitively apply any node glued to our new node.
|
|
||||||
const auto *Glued = N->getGluedNode();
|
|
||||||
if (auto *TGlued = Glued->getGluedNode())
|
|
||||||
Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
|
|
||||||
|
|
||||||
SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
|
|
||||||
Result->setFlags(N->getFlags());
|
|
||||||
ReplaceUses(N, Result);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to fold VMERGE_VVM with unmasked intrinsic to masked intrinsic. The
|
|
||||||
// peephole only deals with VMERGE_VVM which is TU and has false operand same as
|
|
||||||
// its true operand now. E.g. (VMERGE_VVM_M1_TU False, False, (VADD_M1 ...),
|
|
||||||
// ...) -> (VADD_VV_M1_MASK)
|
|
||||||
bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N, bool IsTA) {
|
|
||||||
unsigned Offset = IsTA ? 0 : 1;
|
|
||||||
uint64_t Policy = IsTA ? RISCVII::TAIL_AGNOSTIC : /*TUMU*/ 0;
|
|
||||||
|
|
||||||
SDValue False = N->getOperand(0 + Offset);
|
|
||||||
SDValue True = N->getOperand(1 + Offset);
|
|
||||||
SDValue Mask = N->getOperand(2 + Offset);
|
|
||||||
SDValue VL = N->getOperand(3 + Offset);
|
|
||||||
|
|
||||||
assert(True.getResNo() == 0 &&
|
|
||||||
"Expect True is the first output of an instruction.");
|
|
||||||
|
|
||||||
// Need N is the exactly one using True.
|
|
||||||
if (!True.hasOneUse())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (!True.isMachineOpcode())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
unsigned TrueOpc = True.getMachineOpcode();
|
|
||||||
|
|
||||||
// Skip if True has merge operand.
|
|
||||||
// TODO: Deal with True having same merge operand with N.
|
|
||||||
if (RISCVII::hasMergeOp(TII->get(TrueOpc).TSFlags))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// Skip if True has side effect.
|
|
||||||
// TODO: Support velff and vlsegff.
|
|
||||||
if (TII->get(TrueOpc).hasUnmodeledSideEffects())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// Only deal with True when True is unmasked intrinsic now.
|
|
||||||
const RISCV::RISCVMaskedPseudoInfo *Info =
|
|
||||||
RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
|
|
||||||
|
|
||||||
if (!Info)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// The last operand of unmasked intrinsic should be sew or chain.
|
|
||||||
bool HasChainOp =
|
|
||||||
True.getOperand(True.getNumOperands() - 1).getValueType() == MVT::Other;
|
|
||||||
|
|
||||||
if (HasChainOp) {
|
|
||||||
// Avoid creating cycles in the DAG. We must ensure that none of the other
|
|
||||||
// operands depend on True through it's Chain.
|
|
||||||
SmallVector<const SDNode *, 4> LoopWorklist;
|
|
||||||
SmallPtrSet<const SDNode *, 16> Visited;
|
|
||||||
LoopWorklist.push_back(False.getNode());
|
|
||||||
LoopWorklist.push_back(Mask.getNode());
|
|
||||||
LoopWorklist.push_back(VL.getNode());
|
|
||||||
if (SDNode *Glued = N->getGluedNode())
|
|
||||||
LoopWorklist.push_back(Glued);
|
|
||||||
if (SDNode::hasPredecessorHelper(True.getNode(), Visited, LoopWorklist))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need True has same VL with N.
|
|
||||||
unsigned TrueVLIndex = True.getNumOperands() - HasChainOp - 2;
|
|
||||||
SDValue TrueVL = True.getOperand(TrueVLIndex);
|
|
||||||
|
|
||||||
auto IsNoFPExcept = [this](SDValue N) {
|
|
||||||
return !this->mayRaiseFPException(N.getNode()) ||
|
|
||||||
N->getFlags().hasNoFPExcept();
|
|
||||||
};
|
|
||||||
|
|
||||||
// Allow the peephole for non-exception True with VLMAX vector length, since
|
|
||||||
// all the values after VL of N are dependent on Merge. VLMAX should be
|
|
||||||
// lowered to (XLenVT -1).
|
|
||||||
if (TrueVL != VL && !(IsNoFPExcept(True) && isAllOnesConstant(TrueVL)))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
SDLoc DL(N);
|
|
||||||
unsigned MaskedOpc = Info->MaskedPseudo;
|
|
||||||
assert(RISCVII::hasVecPolicyOp(TII->get(MaskedOpc).TSFlags) &&
|
|
||||||
"Expected instructions with mask have policy operand.");
|
|
||||||
assert(RISCVII::hasMergeOp(TII->get(MaskedOpc).TSFlags) &&
|
|
||||||
"Expected instructions with mask have merge operand.");
|
|
||||||
|
|
||||||
SmallVector<SDValue, 8> Ops;
|
|
||||||
Ops.push_back(False);
|
|
||||||
Ops.append(True->op_begin(), True->op_begin() + TrueVLIndex);
|
|
||||||
Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
|
|
||||||
Ops.push_back(CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()));
|
|
||||||
|
|
||||||
// Result node should have chain operand of True.
|
|
||||||
if (HasChainOp)
|
|
||||||
Ops.push_back(True.getOperand(True.getNumOperands() - 1));
|
|
||||||
|
|
||||||
// Result node should take over glued node of N.
|
|
||||||
if (N->getGluedNode())
|
|
||||||
Ops.push_back(N->getOperand(N->getNumOperands() - 1));
|
|
||||||
|
|
||||||
SDNode *Result =
|
|
||||||
CurDAG->getMachineNode(MaskedOpc, DL, True->getVTList(), Ops);
|
|
||||||
Result->setFlags(True->getFlags());
|
|
||||||
|
|
||||||
// Replace vmerge.vvm node by Result.
|
|
||||||
ReplaceUses(SDValue(N, 0), SDValue(Result, 0));
|
|
||||||
|
|
||||||
// Replace another value of True. E.g. chain and VL.
|
|
||||||
for (unsigned Idx = 1; Idx < True->getNumValues(); ++Idx)
|
|
||||||
ReplaceUses(True.getValue(Idx), SDValue(Result, Idx));
|
|
||||||
|
|
||||||
// Try to transform Result to unmasked intrinsic.
|
|
||||||
doPeepholeMaskedRVV(Result);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform (VMERGE_VVM_<LMUL>_TU false, false, true, allones, vl, sew) to
|
|
||||||
// (VADD_VI_<LMUL>_TU false, true, 0, vl, sew). It may decrease uses of VMSET.
|
|
||||||
bool RISCVDAGToDAGISel::performVMergeToVAdd(SDNode *N) {
|
|
||||||
unsigned NewOpc;
|
|
||||||
switch (N->getMachineOpcode()) {
|
|
||||||
default:
|
|
||||||
llvm_unreachable("Expected VMERGE_VVM_<LMUL>_TU instruction.");
|
|
||||||
case RISCV::PseudoVMERGE_VVM_MF8_TU:
|
|
||||||
NewOpc = RISCV::PseudoVADD_VI_MF8_TU;
|
|
||||||
break;
|
|
||||||
case RISCV::PseudoVMERGE_VVM_MF4_TU:
|
|
||||||
NewOpc = RISCV::PseudoVADD_VI_MF4_TU;
|
|
||||||
break;
|
|
||||||
case RISCV::PseudoVMERGE_VVM_MF2_TU:
|
|
||||||
NewOpc = RISCV::PseudoVADD_VI_MF2_TU;
|
|
||||||
break;
|
|
||||||
case RISCV::PseudoVMERGE_VVM_M1_TU:
|
|
||||||
NewOpc = RISCV::PseudoVADD_VI_M1_TU;
|
|
||||||
break;
|
|
||||||
case RISCV::PseudoVMERGE_VVM_M2_TU:
|
|
||||||
NewOpc = RISCV::PseudoVADD_VI_M2_TU;
|
|
||||||
break;
|
|
||||||
case RISCV::PseudoVMERGE_VVM_M4_TU:
|
|
||||||
NewOpc = RISCV::PseudoVADD_VI_M4_TU;
|
|
||||||
break;
|
|
||||||
case RISCV::PseudoVMERGE_VVM_M8_TU:
|
|
||||||
NewOpc = RISCV::PseudoVADD_VI_M8_TU;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!usesAllOnesMask(N, /* MaskOpIdx */ 3))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
SDLoc DL(N);
|
|
||||||
EVT VT = N->getValueType(0);
|
|
||||||
SDValue Ops[] = {N->getOperand(1), N->getOperand(2),
|
|
||||||
CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT()),
|
|
||||||
N->getOperand(4), N->getOperand(5)};
|
|
||||||
SDNode *Result = CurDAG->getMachineNode(NewOpc, DL, VT, Ops);
|
|
||||||
ReplaceUses(N, Result);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
|
|
||||||
bool MadeChange = false;
|
|
||||||
SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
|
|
||||||
|
|
||||||
while (Position != CurDAG->allnodes_begin()) {
|
|
||||||
SDNode *N = &*--Position;
|
|
||||||
if (N->use_empty() || !N->isMachineOpcode())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
auto IsVMergeTU = [](unsigned Opcode) {
|
|
||||||
return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto IsVMergeTA = [](unsigned Opcode) {
|
|
||||||
return Opcode == RISCV::PseudoVMERGE_VVM_MF8 ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_MF4 ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_MF2 ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M1 ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M2 ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M4 ||
|
|
||||||
Opcode == RISCV::PseudoVMERGE_VVM_M8;
|
|
||||||
};
|
|
||||||
|
|
||||||
unsigned Opc = N->getMachineOpcode();
|
|
||||||
// The following optimizations require that the merge operand of N is same
|
|
||||||
// as the false operand of N.
|
|
||||||
if ((IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1)) ||
|
|
||||||
IsVMergeTA(Opc))
|
|
||||||
MadeChange |= performCombineVMergeAndVOps(N, IsVMergeTA(Opc));
|
|
||||||
if (IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1))
|
|
||||||
MadeChange |= performVMergeToVAdd(N);
|
|
||||||
}
|
|
||||||
return MadeChange;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This pass converts a legalized DAG into a RISCV-specific DAG, ready
|
// This pass converts a legalized DAG into a RISCV-specific DAG, ready
|
||||||
// for instruction scheduling.
|
// for instruction scheduling.
|
||||||
FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
|
FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
|
||||||
|
|
|
@ -79,25 +79,6 @@ public:
|
||||||
bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 16); }
|
bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 16); }
|
||||||
bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); }
|
bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); }
|
||||||
|
|
||||||
bool selectVLOp(SDValue N, SDValue &VL);
|
|
||||||
|
|
||||||
bool selectVSplat(SDValue N, SDValue &SplatVal);
|
|
||||||
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
|
|
||||||
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);
|
|
||||||
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal);
|
|
||||||
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal);
|
|
||||||
|
|
||||||
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm);
|
|
||||||
template <unsigned Width> bool selectRVVSimm5(SDValue N, SDValue &Imm) {
|
|
||||||
return selectRVVSimm5(N, Width, Imm);
|
|
||||||
}
|
|
||||||
|
|
||||||
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm,
|
|
||||||
const SDLoc &DL, unsigned CurOp,
|
|
||||||
bool IsMasked, bool IsStridedOrIndexed,
|
|
||||||
SmallVectorImpl<SDValue> &Operands,
|
|
||||||
bool IsLoad = false, MVT *IndexVT = nullptr);
|
|
||||||
|
|
||||||
// Return the RISC-V condition code that matches the given DAG integer
|
// Return the RISC-V condition code that matches the given DAG integer
|
||||||
// condition code. The CondCode must be one of those supported by the RISC-V
|
// condition code. The CondCode must be one of those supported by the RISC-V
|
||||||
// ISA (see translateSetCCForBranch).
|
// ISA (see translateSetCCForBranch).
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -638,19 +638,13 @@ private:
|
||||||
SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
|
SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||||
SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
|
SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
|
||||||
SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
|
SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
|
||||||
SDValue lowerVectorFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
|
|
||||||
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
||||||
SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
|
||||||
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
|
|
||||||
SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
|
SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
|
||||||
SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
|
SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
|
||||||
SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
|
SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
|
||||||
|
|
||||||
SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
|
SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
|
||||||
|
|
||||||
SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
|
|
||||||
SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
|
|
||||||
|
|
||||||
bool isEligibleForTailCallOptimization(
|
bool isEligibleForTailCallOptimization(
|
||||||
CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
|
CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
|
||||||
const SmallVector<CCValAssign, 16> &ArgLocs) const;
|
const SmallVector<CCValAssign, 16> &ArgLocs) const;
|
||||||
|
@ -661,18 +655,6 @@ private:
|
||||||
const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
|
const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
|
||||||
MachineFunction &MF) const;
|
MachineFunction &MF) const;
|
||||||
|
|
||||||
bool useRVVForFixedLengthVectorVT(MVT VT) const;
|
|
||||||
|
|
||||||
MVT getVPExplicitVectorLengthTy() const override;
|
|
||||||
|
|
||||||
/// RVV code generation for fixed length vectors does not lower all
|
|
||||||
/// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
|
|
||||||
/// merge. However, merging them creates a BUILD_VECTOR that is just as
|
|
||||||
/// illegal as the original, thus leading to an infinite legalisation loop.
|
|
||||||
/// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
|
|
||||||
/// this override can be removed.
|
|
||||||
bool mergeStoresAfterLegalization(EVT VT) const override;
|
|
||||||
|
|
||||||
/// Disable normalizing
|
/// Disable normalizing
|
||||||
/// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
|
/// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
|
||||||
/// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
|
/// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
|
||||||
|
|
|
@ -178,25 +178,6 @@ void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB,
|
||||||
|
|
||||||
bool KillSrcReg = false;
|
bool KillSrcReg = false;
|
||||||
|
|
||||||
if (Offset.getScalable()) {
|
|
||||||
unsigned ScalableAdjOpc = RISCV::ADD;
|
|
||||||
int64_t ScalableValue = Offset.getScalable();
|
|
||||||
if (ScalableValue < 0) {
|
|
||||||
ScalableValue = -ScalableValue;
|
|
||||||
ScalableAdjOpc = RISCV::SUB;
|
|
||||||
}
|
|
||||||
// Get vlenb and multiply vlen with the number of vector registers.
|
|
||||||
Register ScratchReg = DestReg;
|
|
||||||
if (DestReg == SrcReg)
|
|
||||||
ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
||||||
TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag);
|
|
||||||
BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
|
|
||||||
.addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
|
|
||||||
.setMIFlag(Flag);
|
|
||||||
SrcReg = DestReg;
|
|
||||||
KillSrcReg = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t Val = Offset.getFixed();
|
int64_t Val = Offset.getFixed();
|
||||||
if (DestReg == SrcReg && Val == 0)
|
if (DestReg == SrcReg && Val == 0)
|
||||||
return;
|
return;
|
||||||
|
@ -264,64 +245,35 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||||
Register FrameReg;
|
Register FrameReg;
|
||||||
StackOffset Offset =
|
StackOffset Offset =
|
||||||
getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
|
getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
|
||||||
bool IsRVVSpill = RISCV::isRVVSpill(MI);
|
|
||||||
if (!IsRVVSpill)
|
|
||||||
Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
|
|
||||||
|
|
||||||
if (Offset.getScalable() &&
|
Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
|
||||||
ST.getRealMinVLen() == ST.getRealMaxVLen()) {
|
|
||||||
// For an exact VLEN value, scalable offsets become constant and thus
|
|
||||||
// can be converted entirely into fixed offsets.
|
|
||||||
int64_t FixedValue = Offset.getFixed();
|
|
||||||
int64_t ScalableValue = Offset.getScalable();
|
|
||||||
assert(ScalableValue % 8 == 0 &&
|
|
||||||
"Scalable offset is not a multiple of a single vector size.");
|
|
||||||
int64_t NumOfVReg = ScalableValue / 8;
|
|
||||||
int64_t VLENB = ST.getRealMinVLen() / 8;
|
|
||||||
Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isInt<32>(Offset.getFixed())) {
|
if (!isInt<32>(Offset.getFixed())) {
|
||||||
report_fatal_error(
|
report_fatal_error(
|
||||||
"Frame offsets outside of the signed 32-bit range not supported");
|
"Frame offsets outside of the signed 32-bit range not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IsRVVSpill) {
|
if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
|
||||||
if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
|
// We chose to emit the canonical immediate sequence rather than folding
|
||||||
// We chose to emit the canonical immediate sequence rather than folding
|
// the offset into the using add under the theory that doing so doesn't
|
||||||
// the offset into the using add under the theory that doing so doesn't
|
// save dynamic instruction count and some target may fuse the canonical
|
||||||
// save dynamic instruction count and some target may fuse the canonical
|
// 32 bit immediate sequence. We still need to clear the portion of the
|
||||||
// 32 bit immediate sequence. We still need to clear the portion of the
|
// offset encoded in the immediate.
|
||||||
// offset encoded in the immediate.
|
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
|
||||||
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
|
} else {
|
||||||
} else {
|
// We can encode an add with 12 bit signed immediate in the immediate
|
||||||
// We can encode an add with 12 bit signed immediate in the immediate
|
// operand of our user instruction. As a result, the remaining
|
||||||
// operand of our user instruction. As a result, the remaining
|
// offset can by construction, at worst, a LUI and a ADD.
|
||||||
// offset can by construction, at worst, a LUI and a ADD.
|
int64_t Val = Offset.getFixed();
|
||||||
int64_t Val = Offset.getFixed();
|
int64_t Lo12 = SignExtend64<12>(Val);
|
||||||
int64_t Lo12 = SignExtend64<12>(Val);
|
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
|
||||||
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
|
Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12,
|
||||||
Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12,
|
Offset.getScalable());
|
||||||
Offset.getScalable());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Offset.getScalable() || Offset.getFixed()) {
|
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
|
||||||
Register DestReg;
|
/*IsImp*/false,
|
||||||
if (MI.getOpcode() == RISCV::ADDI)
|
/*IsKill*/false);
|
||||||
DestReg = MI.getOperand(0).getReg();
|
|
||||||
else
|
|
||||||
DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
||||||
adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
|
|
||||||
MachineInstr::NoFlags, std::nullopt);
|
|
||||||
MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
|
|
||||||
/*IsImp*/false,
|
|
||||||
/*IsKill*/true);
|
|
||||||
} else {
|
|
||||||
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
|
|
||||||
/*IsImp*/false,
|
|
||||||
/*IsKill*/false);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If after materializing the adjustment, we have a pointless ADDI, remove it
|
// If after materializing the adjustment, we have a pointless ADDI, remove it
|
||||||
if (MI.getOpcode() == RISCV::ADDI &&
|
if (MI.getOpcode() == RISCV::ADDI &&
|
||||||
|
@ -331,21 +283,6 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(MI.getOpcode());
|
|
||||||
if (ZvlssegInfo) {
|
|
||||||
MachineBasicBlock &MBB = *MI.getParent();
|
|
||||||
Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
||||||
BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
|
|
||||||
uint32_t ShiftAmount = Log2_32(ZvlssegInfo->second);
|
|
||||||
if (ShiftAmount != 0)
|
|
||||||
BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
|
|
||||||
.addReg(VL)
|
|
||||||
.addImm(ShiftAmount);
|
|
||||||
// The last argument of pseudo spilling opcode for zvlsseg is the length of
|
|
||||||
// one element of zvlsseg types. For example, for vint32m2x2_t, it will be
|
|
||||||
// the length of vint32m2_t.
|
|
||||||
MI.getOperand(FIOperandNum + 1).ChangeToRegister(VL, /*isDef=*/false);
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -244,323 +244,6 @@ RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
|
||||||
return getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
|
return getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Currently, these represent both throughput and codesize costs
|
|
||||||
// for the respective intrinsics. The costs in this table are simply
|
|
||||||
// instruction counts with the following adjustments made:
|
|
||||||
// * One vsetvli is considered free.
|
|
||||||
static const CostTblEntry VectorIntrinsicCostTable[]{
|
|
||||||
{Intrinsic::floor, MVT::v2f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::v4f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::v8f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::v16f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv1f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv2f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv4f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv8f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv16f32, 9},
|
|
||||||
{Intrinsic::floor, MVT::v2f64, 9},
|
|
||||||
{Intrinsic::floor, MVT::v4f64, 9},
|
|
||||||
{Intrinsic::floor, MVT::v8f64, 9},
|
|
||||||
{Intrinsic::floor, MVT::v16f64, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv1f64, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv2f64, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv4f64, 9},
|
|
||||||
{Intrinsic::floor, MVT::nxv8f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v2f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v4f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v8f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v16f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv1f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv2f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv4f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv8f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv16f32, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v2f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v4f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v8f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::v16f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv1f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv2f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv4f64, 9},
|
|
||||||
{Intrinsic::ceil, MVT::nxv8f64, 9},
|
|
||||||
{Intrinsic::trunc, MVT::v2f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::v4f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::v8f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::v16f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv1f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv2f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv4f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv8f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv16f32, 7},
|
|
||||||
{Intrinsic::trunc, MVT::v2f64, 7},
|
|
||||||
{Intrinsic::trunc, MVT::v4f64, 7},
|
|
||||||
{Intrinsic::trunc, MVT::v8f64, 7},
|
|
||||||
{Intrinsic::trunc, MVT::v16f64, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv1f64, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv2f64, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv4f64, 7},
|
|
||||||
{Intrinsic::trunc, MVT::nxv8f64, 7},
|
|
||||||
{Intrinsic::round, MVT::v2f32, 9},
|
|
||||||
{Intrinsic::round, MVT::v4f32, 9},
|
|
||||||
{Intrinsic::round, MVT::v8f32, 9},
|
|
||||||
{Intrinsic::round, MVT::v16f32, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv1f32, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv2f32, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv4f32, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv8f32, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv16f32, 9},
|
|
||||||
{Intrinsic::round, MVT::v2f64, 9},
|
|
||||||
{Intrinsic::round, MVT::v4f64, 9},
|
|
||||||
{Intrinsic::round, MVT::v8f64, 9},
|
|
||||||
{Intrinsic::round, MVT::v16f64, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv1f64, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv2f64, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv4f64, 9},
|
|
||||||
{Intrinsic::round, MVT::nxv8f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v2f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v4f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v8f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v16f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv1f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv2f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv4f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv8f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv16f32, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v2f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v4f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v8f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::v16f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv1f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv2f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv4f64, 9},
|
|
||||||
{Intrinsic::roundeven, MVT::nxv8f64, 9},
|
|
||||||
{Intrinsic::fabs, MVT::v2f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::v4f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::v8f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::v16f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv1f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv2f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv4f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv8f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv16f32, 1},
|
|
||||||
{Intrinsic::fabs, MVT::v2f64, 1},
|
|
||||||
{Intrinsic::fabs, MVT::v4f64, 1},
|
|
||||||
{Intrinsic::fabs, MVT::v8f64, 1},
|
|
||||||
{Intrinsic::fabs, MVT::v16f64, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv1f64, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv2f64, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv4f64, 1},
|
|
||||||
{Intrinsic::fabs, MVT::nxv8f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v2f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v4f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v8f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v16f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv1f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv2f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv4f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv8f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv16f32, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v2f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v4f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v8f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::v16f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv1f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv2f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv4f64, 1},
|
|
||||||
{Intrinsic::sqrt, MVT::nxv8f64, 1},
|
|
||||||
{Intrinsic::bswap, MVT::v2i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::v4i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::v8i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::v16i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::nxv1i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::nxv2i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::nxv4i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::nxv8i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::nxv16i16, 3},
|
|
||||||
{Intrinsic::bswap, MVT::v2i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::v4i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::v8i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::v16i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::nxv1i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::nxv2i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::nxv4i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::nxv8i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::nxv16i32, 12},
|
|
||||||
{Intrinsic::bswap, MVT::v2i64, 31},
|
|
||||||
{Intrinsic::bswap, MVT::v4i64, 31},
|
|
||||||
{Intrinsic::bswap, MVT::v8i64, 31},
|
|
||||||
{Intrinsic::bswap, MVT::v16i64, 31},
|
|
||||||
{Intrinsic::bswap, MVT::nxv1i64, 31},
|
|
||||||
{Intrinsic::bswap, MVT::nxv2i64, 31},
|
|
||||||
{Intrinsic::bswap, MVT::nxv4i64, 31},
|
|
||||||
{Intrinsic::bswap, MVT::nxv8i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v2i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v4i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v8i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v16i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv1i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv2i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv4i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv8i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv16i16, 3},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v2i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v4i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v8i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v16i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv1i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv2i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv4i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv8i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv16i32, 12},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v2i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v4i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v8i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::v16i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv1i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv2i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv4i64, 31},
|
|
||||||
{Intrinsic::vp_bswap, MVT::nxv8i64, 31},
|
|
||||||
{Intrinsic::bitreverse, MVT::v2i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::v4i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::v8i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::v16i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv1i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv2i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv4i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv8i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv16i8, 17},
|
|
||||||
{Intrinsic::bitreverse, MVT::v2i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::v4i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::v8i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::v16i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv1i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv2i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv4i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv8i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv16i16, 24},
|
|
||||||
{Intrinsic::bitreverse, MVT::v2i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::v4i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::v8i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::v16i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv1i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv2i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv4i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv8i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv16i32, 33},
|
|
||||||
{Intrinsic::bitreverse, MVT::v2i64, 52},
|
|
||||||
{Intrinsic::bitreverse, MVT::v4i64, 52},
|
|
||||||
{Intrinsic::bitreverse, MVT::v8i64, 52},
|
|
||||||
{Intrinsic::bitreverse, MVT::v16i64, 52},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv1i64, 52},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv2i64, 52},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv4i64, 52},
|
|
||||||
{Intrinsic::bitreverse, MVT::nxv8i64, 52},
|
|
||||||
{Intrinsic::ctpop, MVT::v2i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::v4i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::v8i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::v16i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv1i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv2i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv4i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv8i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv16i8, 12},
|
|
||||||
{Intrinsic::ctpop, MVT::v2i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::v4i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::v8i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::v16i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv1i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv2i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv4i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv8i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv16i16, 19},
|
|
||||||
{Intrinsic::ctpop, MVT::v2i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::v4i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::v8i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::v16i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv1i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv2i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv4i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv8i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv16i32, 20},
|
|
||||||
{Intrinsic::ctpop, MVT::v2i64, 21},
|
|
||||||
{Intrinsic::ctpop, MVT::v4i64, 21},
|
|
||||||
{Intrinsic::ctpop, MVT::v8i64, 21},
|
|
||||||
{Intrinsic::ctpop, MVT::v16i64, 21},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv1i64, 21},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv2i64, 21},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv4i64, 21},
|
|
||||||
{Intrinsic::ctpop, MVT::nxv8i64, 21},
|
|
||||||
};
|
|
||||||
|
|
||||||
InstructionCost
|
|
||||||
RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
|
|
||||||
TTI::TargetCostKind CostKind) {
|
|
||||||
auto *RetTy = ICA.getReturnType();
|
|
||||||
switch (ICA.getID()) {
|
|
||||||
case Intrinsic::ceil:
|
|
||||||
case Intrinsic::floor:
|
|
||||||
case Intrinsic::trunc:
|
|
||||||
case Intrinsic::rint:
|
|
||||||
case Intrinsic::round:
|
|
||||||
case Intrinsic::roundeven: {
|
|
||||||
// These all use the same code.
|
|
||||||
auto LT = getTypeLegalizationCost(RetTy);
|
|
||||||
if (!LT.second.isVector() && TLI->isOperationCustom(ISD::FCEIL, LT.second))
|
|
||||||
return LT.first * 8;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Intrinsic::umin:
|
|
||||||
case Intrinsic::umax:
|
|
||||||
case Intrinsic::smin:
|
|
||||||
case Intrinsic::smax: {
|
|
||||||
auto LT = getTypeLegalizationCost(RetTy);
|
|
||||||
if ((ST->hasVInstructions() && LT.second.isVector()) ||
|
|
||||||
(LT.second.isScalarInteger() && ST->hasStdExtZbb()))
|
|
||||||
return LT.first;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Intrinsic::sadd_sat:
|
|
||||||
case Intrinsic::ssub_sat:
|
|
||||||
case Intrinsic::uadd_sat:
|
|
||||||
case Intrinsic::usub_sat: {
|
|
||||||
auto LT = getTypeLegalizationCost(RetTy);
|
|
||||||
if (ST->hasVInstructions() && LT.second.isVector())
|
|
||||||
return LT.first;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// TODO: add more intrinsic
|
|
||||||
case Intrinsic::experimental_stepvector: {
|
|
||||||
unsigned Cost = 1; // vid
|
|
||||||
auto LT = getTypeLegalizationCost(RetTy);
|
|
||||||
return Cost + (LT.first - 1);
|
|
||||||
}
|
|
||||||
case Intrinsic::vp_rint: {
|
|
||||||
// RISC-V target uses at least 5 instructions to lower rounding intrinsics.
|
|
||||||
unsigned Cost = 5;
|
|
||||||
auto LT = getTypeLegalizationCost(RetTy);
|
|
||||||
if (TLI->isOperationCustom(ISD::VP_FRINT, LT.second))
|
|
||||||
return Cost * LT.first;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case Intrinsic::vp_nearbyint: {
|
|
||||||
// More one read and one write for fflags than vp_rint.
|
|
||||||
unsigned Cost = 7;
|
|
||||||
auto LT = getTypeLegalizationCost(RetTy);
|
|
||||||
if (TLI->isOperationCustom(ISD::VP_FRINT, LT.second))
|
|
||||||
return Cost * LT.first;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ST->hasVInstructions() && RetTy->isVectorTy()) {
|
|
||||||
auto LT = getTypeLegalizationCost(RetTy);
|
|
||||||
if (const auto *Entry = CostTableLookup(VectorIntrinsicCostTable,
|
|
||||||
ICA.getID(), LT.second))
|
|
||||||
return LT.first * Entry->Cost;
|
|
||||||
}
|
|
||||||
|
|
||||||
return BaseT::getIntrinsicInstrCost(ICA, CostKind);
|
|
||||||
}
|
|
||||||
|
|
||||||
InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
|
InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
|
||||||
Type *Src,
|
Type *Src,
|
||||||
|
|
|
@ -71,10 +71,8 @@ static_library("LLVMRISCVCodeGen") {
|
||||||
"RISCVExpandAtomicPseudoInsts.cpp",
|
"RISCVExpandAtomicPseudoInsts.cpp",
|
||||||
"RISCVExpandPseudoInsts.cpp",
|
"RISCVExpandPseudoInsts.cpp",
|
||||||
"RISCVFrameLowering.cpp",
|
"RISCVFrameLowering.cpp",
|
||||||
"RISCVGatherScatterLowering.cpp",
|
|
||||||
"RISCVISelDAGToDAG.cpp",
|
"RISCVISelDAGToDAG.cpp",
|
||||||
"RISCVISelLowering.cpp",
|
"RISCVISelLowering.cpp",
|
||||||
"RISCVInsertVSETVLI.cpp",
|
|
||||||
"RISCVInstrInfo.cpp",
|
"RISCVInstrInfo.cpp",
|
||||||
"RISCVMCInstLower.cpp",
|
"RISCVMCInstLower.cpp",
|
||||||
"RISCVMachineFunctionInfo.cpp",
|
"RISCVMachineFunctionInfo.cpp",
|
||||||
|
|
Loading…
Reference in New Issue