forked from OSchip/llvm-project
[RISCV] Implement vssseg intrinsics.
Define vlsseg intrinsics and pseudo instructions. Lower vlsseg intrinsics to pseudo instructions in RISCVDAGToDAGISel. Differential Revision: https://reviews.llvm.org/D94863
This commit is contained in:
parent
e5e329023b
commit
a8b96eadfd
|
|
@ -540,6 +540,26 @@ let TargetPrefix = "riscv" in {
|
|||
llvm_anyint_ty]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
|
||||
// For stride segment store
|
||||
// Input: (value, pointer, offset, vl)
|
||||
class RISCVSSegStore<int nf>
|
||||
: Intrinsic<[],
|
||||
!listconcat([llvm_anyvector_ty],
|
||||
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
||||
[LLVMPointerToElt<0>, llvm_anyint_ty,
|
||||
LLVMMatchType<1>]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
// For stride segment store with mask
|
||||
// Input: (value, pointer, offset, mask, vl)
|
||||
class RISCVSSegStoreMask<int nf>
|
||||
: Intrinsic<[],
|
||||
!listconcat([llvm_anyvector_ty],
|
||||
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
||||
[LLVMPointerToElt<0>, llvm_anyint_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMMatchType<1>]),
|
||||
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
||||
|
||||
multiclass RISCVUSLoad {
|
||||
def "int_riscv_" # NAME : RISCVUSLoad;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
|
||||
|
|
@ -654,6 +674,10 @@ let TargetPrefix = "riscv" in {
|
|||
def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
|
||||
}
|
||||
multiclass RISCVSSegStore<int nf> {
|
||||
def "int_riscv_" # NAME : RISCVSSegStore<nf>;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
|
||||
}
|
||||
|
||||
defm vle : RISCVUSLoad;
|
||||
defm vleff : RISCVUSLoad;
|
||||
|
|
@ -949,6 +973,7 @@ let TargetPrefix = "riscv" in {
|
|||
defm vlseg # nf : RISCVUSSegLoad<nf>;
|
||||
defm vlsseg # nf : RISCVSSegLoad<nf>;
|
||||
defm vsseg # nf : RISCVUSSegStore<nf>;
|
||||
defm vssseg # nf : RISCVSSegStore<nf>;
|
||||
}
|
||||
|
||||
} // TargetPrefix = "riscv"
|
||||
|
|
|
|||
|
|
@ -220,9 +220,12 @@ void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo,
|
|||
CurDAG->RemoveDeadNode(Node);
|
||||
}
|
||||
|
||||
void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) {
|
||||
void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo,
|
||||
bool IsStrided) {
|
||||
SDLoc DL(Node);
|
||||
unsigned NF = Node->getNumOperands() - 4;
|
||||
if (IsStrided)
|
||||
NF--;
|
||||
EVT VT = Node->getOperand(2)->getValueType(0);
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
MVT XLenVT = Subtarget->getXLenVT();
|
||||
|
|
@ -230,10 +233,17 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) {
|
|||
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
|
||||
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
|
||||
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
|
||||
SDValue Operands[] = {StoreVal,
|
||||
Node->getOperand(2 + NF), // Base pointer.
|
||||
Node->getOperand(3 + NF), // VL.
|
||||
SEW, Node->getOperand(0)}; // Chain
|
||||
SmallVector<SDValue, 6> Operands;
|
||||
Operands.push_back(StoreVal);
|
||||
Operands.push_back(Node->getOperand(2 + NF)); // Base pointer.
|
||||
if (IsStrided) {
|
||||
Operands.push_back(Node->getOperand(3 + NF)); // Stride.
|
||||
Operands.push_back(Node->getOperand(4 + NF)); // VL.
|
||||
} else {
|
||||
Operands.push_back(Node->getOperand(3 + NF)); // VL.
|
||||
}
|
||||
Operands.push_back(SEW);
|
||||
Operands.push_back(Node->getOperand(0)); // Chain.
|
||||
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
|
||||
IntNo, ScalarSize, static_cast<unsigned>(LMUL));
|
||||
SDNode *Store =
|
||||
|
|
@ -241,9 +251,12 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) {
|
|||
ReplaceNode(Node, Store);
|
||||
}
|
||||
|
||||
void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo) {
|
||||
void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo,
|
||||
bool IsStrided) {
|
||||
SDLoc DL(Node);
|
||||
unsigned NF = Node->getNumOperands() - 5;
|
||||
if (IsStrided)
|
||||
NF--;
|
||||
EVT VT = Node->getOperand(2)->getValueType(0);
|
||||
unsigned ScalarSize = VT.getScalarSizeInBits();
|
||||
MVT XLenVT = Subtarget->getXLenVT();
|
||||
|
|
@ -251,12 +264,19 @@ void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo) {
|
|||
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
|
||||
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
|
||||
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
|
||||
SDValue Operands[] = {StoreVal,
|
||||
Node->getOperand(2 + NF), // Base pointer.
|
||||
Node->getOperand(3 + NF), // Mask.
|
||||
Node->getOperand(4 + NF), // VL.
|
||||
SEW,
|
||||
Node->getOperand(0)}; // Chain
|
||||
SmallVector<SDValue, 7> Operands;
|
||||
Operands.push_back(StoreVal);
|
||||
Operands.push_back(Node->getOperand(2 + NF)); // Base pointer.
|
||||
if (IsStrided) {
|
||||
Operands.push_back(Node->getOperand(3 + NF)); // Stride.
|
||||
Operands.push_back(Node->getOperand(4 + NF)); // Mask.
|
||||
Operands.push_back(Node->getOperand(5 + NF)); // VL.
|
||||
} else {
|
||||
Operands.push_back(Node->getOperand(3 + NF)); // Mask.
|
||||
Operands.push_back(Node->getOperand(4 + NF)); // VL.
|
||||
}
|
||||
Operands.push_back(SEW);
|
||||
Operands.push_back(Node->getOperand(0)); // Chain.
|
||||
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
|
||||
IntNo, ScalarSize, static_cast<unsigned>(LMUL));
|
||||
SDNode *Store =
|
||||
|
|
@ -439,7 +459,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
case Intrinsic::riscv_vsseg6:
|
||||
case Intrinsic::riscv_vsseg7:
|
||||
case Intrinsic::riscv_vsseg8: {
|
||||
selectVSSEG(Node, IntNo);
|
||||
selectVSSEG(Node, IntNo, /*IsStrided=*/false);
|
||||
return;
|
||||
}
|
||||
case Intrinsic::riscv_vsseg2_mask:
|
||||
|
|
@ -449,7 +469,27 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
|
|||
case Intrinsic::riscv_vsseg6_mask:
|
||||
case Intrinsic::riscv_vsseg7_mask:
|
||||
case Intrinsic::riscv_vsseg8_mask: {
|
||||
selectVSSEGMask(Node, IntNo);
|
||||
selectVSSEGMask(Node, IntNo, /*IsStrided=*/false);
|
||||
return;
|
||||
}
|
||||
case Intrinsic::riscv_vssseg2:
|
||||
case Intrinsic::riscv_vssseg3:
|
||||
case Intrinsic::riscv_vssseg4:
|
||||
case Intrinsic::riscv_vssseg5:
|
||||
case Intrinsic::riscv_vssseg6:
|
||||
case Intrinsic::riscv_vssseg7:
|
||||
case Intrinsic::riscv_vssseg8: {
|
||||
selectVSSEG(Node, IntNo, /*IsStrided=*/true);
|
||||
return;
|
||||
}
|
||||
case Intrinsic::riscv_vssseg2_mask:
|
||||
case Intrinsic::riscv_vssseg3_mask:
|
||||
case Intrinsic::riscv_vssseg4_mask:
|
||||
case Intrinsic::riscv_vssseg5_mask:
|
||||
case Intrinsic::riscv_vssseg6_mask:
|
||||
case Intrinsic::riscv_vssseg7_mask:
|
||||
case Intrinsic::riscv_vssseg8_mask: {
|
||||
selectVSSEGMask(Node, IntNo, /*IsStrided=*/true);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,8 +57,8 @@ public:
|
|||
|
||||
void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsStrided);
|
||||
void selectVLSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided);
|
||||
void selectVSSEG(SDNode *Node, unsigned IntNo);
|
||||
void selectVSSEGMask(SDNode *Node, unsigned IntNo);
|
||||
void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStrided);
|
||||
void selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided);
|
||||
|
||||
// Include the pieces autogenerated from the target description.
|
||||
#include "RISCVGenDAGISel.inc"
|
||||
|
|
|
|||
|
|
@ -440,7 +440,8 @@ class PseudoToVInst<string PseudoInst> {
|
|||
class ToLowerCase<string Upper> {
|
||||
string L = !subst("VLSEG", "vlseg",
|
||||
!subst("VLSSEG", "vlsseg",
|
||||
!subst("VSSEG", "vsseg", Upper)));
|
||||
!subst("VSSEG", "vsseg",
|
||||
!subst("VSSSEG", "vssseg", Upper))));
|
||||
}
|
||||
|
||||
// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
|
||||
|
|
@ -1076,6 +1077,38 @@ class VPseudoUSSegStoreMask<VReg ValClass, bits<11> EEW>:
|
|||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoSSegStoreNoMask<VReg ValClass, bits<11> EEW>:
|
||||
Pseudo<(outs),
|
||||
(ins ValClass:$rd, GPR:$rs1, GPR: $offset, GPR:$vl, ixlenimm:$sew),[]>,
|
||||
RISCVVPseudo,
|
||||
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 1;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Uses = [VL, VTYPE];
|
||||
let HasVLOp = 1;
|
||||
let HasSEWOp = 1;
|
||||
let HasDummyMask = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoSSegStoreMask<VReg ValClass, bits<11> EEW>:
|
||||
Pseudo<(outs),
|
||||
(ins ValClass:$rd, GPR:$rs1, GPR: $offset,
|
||||
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
|
||||
RISCVVPseudo,
|
||||
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 1;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Uses = [VL, VTYPE];
|
||||
let HasVLOp = 1;
|
||||
let HasSEWOp = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
multiclass VPseudoUSLoad {
|
||||
foreach lmul = MxList.m in {
|
||||
defvar LInfo = lmul.MX;
|
||||
|
|
@ -1629,6 +1662,21 @@ multiclass VPseudoUSSegStore {
|
|||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoSSegStore {
|
||||
foreach eew = EEWList in {
|
||||
foreach lmul = MxSet<eew>.m in {
|
||||
defvar LInfo = lmul.MX;
|
||||
let VLMul = lmul.value in {
|
||||
foreach nf = NFSet<lmul>.L in {
|
||||
defvar vreg = SegRegClass<lmul, nf>.RC;
|
||||
def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew>;
|
||||
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew>;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Helpers to define the intrinsic patterns.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
|
@ -2830,6 +2878,7 @@ foreach eew = EEWList in {
|
|||
defm PseudoVLSEG : VPseudoUSSegLoad;
|
||||
defm PseudoVLSSEG : VPseudoSSegLoad;
|
||||
defm PseudoVSSEG : VPseudoUSSegStore;
|
||||
defm PseudoVSSSEG : VPseudoSSegStore;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 8. Vector AMO Operations
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue