[RISCV] Define vector vfwadd/vfwsub intrinsics.

Define vector vfwadd/vfwsub intrinsics and lower them to V
instructions.

We work with @rogfer01 from BSC to come out this patch.

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Hsiangkai Wang <kai.wang@sifive.com>

Differential Revision: https://reviews.llvm.org/D93583
This commit is contained in:
Hsiangkai Wang 2020-12-19 23:12:18 +08:00
parent bd576ac8d4
commit f86e61d886
10 changed files with 4591 additions and 54 deletions

View File

@ -388,6 +388,11 @@ let TargetPrefix = "riscv" in {
defm vfsub : RISCVBinaryAAX;
defm vfrsub : RISCVBinaryAAX;
defm vfwadd : RISCVBinaryABX;
defm vfwsub : RISCVBinaryABX;
defm vfwadd_w : RISCVBinaryAAX;
defm vfwsub_w : RISCVBinaryAAX;
defm vsaddu : RISCVSaturatingBinaryAAX;
defm vsadd : RISCVSaturatingBinaryAAX;
defm vssubu : RISCVSaturatingBinaryAAX;

View File

@ -201,6 +201,19 @@ defset list<VTypeInfoToWide> AllWidenableIntVectors = {
def : VTypeInfoToWide<VI32M4, VI64M8>;
}
defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
def : VTypeInfoToWide<VF16MF4, VF32MF2>;
def : VTypeInfoToWide<VF16MF2, VF32M1>;
def : VTypeInfoToWide<VF16M1, VF32M2>;
def : VTypeInfoToWide<VF16M2, VF32M4>;
def : VTypeInfoToWide<VF16M4, VF32M8>;
def : VTypeInfoToWide<VF32MF2, VF64M1>;
def : VTypeInfoToWide<VF32M1, VF64M2>;
def : VTypeInfoToWide<VF32M2, VF64M4>;
def : VTypeInfoToWide<VF32M4, VF64M8>;
}
// This class holds the record of the RISCVVPseudoTable below.
// This represents the information we need in codegen for each pseudo.
// The definition should be consistent with `struct PseudoInfo` in
@ -662,9 +675,10 @@ multiclass VPseudoBinaryW_VV {
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_VX {
multiclass VPseudoBinaryW_VX<bit IsFloat> {
foreach m = MxList.m[0-5] in
defm _VX : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
defm !if(!eq(IsFloat, 0), "_VX", "_VF") : VPseudoBinary<m.wvrclass, m.vrclass,
!if(!eq(IsFloat, 0), GPR, FPR32), m,
"@earlyclobber $rd">;
}
@ -674,9 +688,10 @@ multiclass VPseudoBinaryW_WV {
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_WX {
multiclass VPseudoBinaryW_WX<bit IsFloat> {
foreach m = MxList.m[0-5] in
defm _WX : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m,
defm !if(!eq(IsFloat, 0), "_WX", "_WF") : VPseudoBinary<m.wvrclass, m.wvrclass,
!if(!eq(IsFloat, 0), GPR, FPR32), m,
"@earlyclobber $rd">;
}
@ -757,14 +772,14 @@ multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VI<ImmType>;
}
multiclass VPseudoBinaryW_VV_VX {
multiclass VPseudoBinaryW_VV_VX<bit IsFloat = 0> {
defm "" : VPseudoBinaryW_VV;
defm "" : VPseudoBinaryW_VX;
defm "" : VPseudoBinaryW_VX<IsFloat>;
}
multiclass VPseudoBinaryW_WV_WX {
multiclass VPseudoBinaryW_WV_WX<bit IsFloat = 0> {
defm "" : VPseudoBinaryW_WV;
defm "" : VPseudoBinaryW_WX;
defm "" : VPseudoBinaryW_WX<IsFloat>;
}
multiclass VPseudoBinaryV_VM_XM_IM {
@ -1120,8 +1135,9 @@ multiclass VPatBinaryV_VI<string intrinsic, string instruction,
vti.RegClass, imm_type>;
}
multiclass VPatBinaryW_VV<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
multiclass VPatBinaryW_VV<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "VV",
@ -1131,19 +1147,22 @@ multiclass VPatBinaryW_VV<string intrinsic, string instruction> {
}
}
multiclass VPatBinaryW_VX<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
multiclass VPatBinaryW_VX<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "VX",
Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
defm : VPatBinary<intrinsic, instruction,
!if(!eq(Vti.Scalar, XLenVT), "VX", "VF"),
Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
Vti.SEW, Vti.LMul, Wti.RegClass,
Vti.RegClass, GPR>;
Vti.RegClass, Vti.ScalarRegClass>;
}
}
multiclass VPatBinaryW_WV<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
multiclass VPatBinaryW_WV<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "WV",
@ -1153,19 +1172,22 @@ multiclass VPatBinaryW_WV<string intrinsic, string instruction> {
}
}
multiclass VPatBinaryW_WX<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
multiclass VPatBinaryW_WX<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "WX",
Wti.Vector, Wti.Vector, XLenVT, Vti.Mask,
defm : VPatBinary<intrinsic, instruction,
!if(!eq(Vti.Scalar, XLenVT), "WX", "WF"),
Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
Vti.SEW, Vti.LMul, Wti.RegClass,
Wti.RegClass, GPR>;
Wti.RegClass, Vti.ScalarRegClass>;
}
}
multiclass VPatBinaryV_WV<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
multiclass VPatBinaryV_WV<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "WV",
@ -1175,19 +1197,22 @@ multiclass VPatBinaryV_WV<string intrinsic, string instruction> {
}
}
multiclass VPatBinaryV_WX<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
multiclass VPatBinaryV_WX<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "WX",
Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
defm : VPatBinary<intrinsic, instruction,
!if(!eq(Vti.Scalar, XLenVT), "WX", "WF"),
Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
Vti.SEW, Vti.LMul, Vti.RegClass,
Wti.RegClass, GPR>;
Wti.RegClass, Vti.ScalarRegClass>;
}
}
multiclass VPatBinaryV_WI<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
multiclass VPatBinaryV_WI<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "WI",
@ -1273,23 +1298,26 @@ multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
}
multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction>
multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist>
{
defm "" : VPatBinaryW_VV<intrinsic, instruction>;
defm "" : VPatBinaryW_VX<intrinsic, instruction>;
defm "" : VPatBinaryW_VV<intrinsic, instruction, vtilist>;
defm "" : VPatBinaryW_VX<intrinsic, instruction, vtilist>;
}
multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction>
multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist>
{
defm "" : VPatBinaryW_WV<intrinsic, instruction>;
defm "" : VPatBinaryW_WX<intrinsic, instruction>;
defm "" : VPatBinaryW_WV<intrinsic, instruction, vtilist>;
defm "" : VPatBinaryW_WX<intrinsic, instruction, vtilist>;
}
multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction>
multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist>
{
defm "" : VPatBinaryV_WV<intrinsic, instruction>;
defm "" : VPatBinaryV_WX<intrinsic, instruction>;
defm "" : VPatBinaryV_WI<intrinsic, instruction>;
defm "" : VPatBinaryV_WV<intrinsic, instruction, vtilist>;
defm "" : VPatBinaryV_WX<intrinsic, instruction, vtilist>;
defm "" : VPatBinaryV_WI<intrinsic, instruction, vtilist>;
}
multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
@ -1500,6 +1528,14 @@ defm PseudoVFADD : VPseudoBinaryV_VV_VX</*IsFloat=*/1>;
defm PseudoVFSUB : VPseudoBinaryV_VV_VX</*IsFloat=*/1>;
defm PseudoVFRSUB : VPseudoBinaryV_VX</*IsFloat=*/1>;
//===----------------------------------------------------------------------===//
// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
//===----------------------------------------------------------------------===//
defm PseudoVFWADD : VPseudoBinaryW_VV_VX</*IsFloat=*/1>;
defm PseudoVFWSUB : VPseudoBinaryW_VV_VX</*IsFloat=*/1>;
defm PseudoVFWADD : VPseudoBinaryW_WV_WX</*IsFloat=*/1>;
defm PseudoVFWSUB : VPseudoBinaryW_WV_WX</*IsFloat=*/1>;
//===----------------------------------------------------------------------===//
// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
//===----------------------------------------------------------------------===//
@ -1674,14 +1710,14 @@ defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>
//===----------------------------------------------------------------------===//
// 12.2. Vector Widening Integer Add/Subtract
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
//===----------------------------------------------------------------------===//
// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
@ -1707,8 +1743,8 @@ defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors
//===----------------------------------------------------------------------===//
// 12.7. Vector Narrowing Integer Right Shift Instructions
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL">;
defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA">;
defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
//===----------------------------------------------------------------------===//
// 12.9. Vector Integer Min/Max Instructions
@ -1737,9 +1773,9 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>;
//===----------------------------------------------------------------------===//
// 12.12. Vector Widening Integer Multiply Instructions
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
//===----------------------------------------------------------------------===//
// 12.17. Vector Integer Move Instructions
@ -1778,6 +1814,14 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>;
//===----------------------------------------------------------------------===//
// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,401 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
<vscale x 1 x half>,
half,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
<vscale x 2 x half>,
half,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
<vscale x 4 x half>,
half,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
<vscale x 8 x half>,
half,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
<vscale x 16 x half>,
half,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}

View File

@ -0,0 +1,721 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
<vscale x 1 x half>,
half,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
<vscale x 2 x half>,
half,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
<vscale x 4 x half>,
half,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
<vscale x 8 x half>,
half,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
<vscale x 16 x half>,
half,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32.f32(
<vscale x 1 x float>,
float,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32.f32(
<vscale x 1 x float> %0,
float %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32.f32(
<vscale x 2 x float>,
float,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32.f32(
<vscale x 2 x float> %0,
float %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32.f32(
<vscale x 4 x float>,
float,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32.f32(
<vscale x 4 x float> %0,
float %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32.f32(
<vscale x 8 x float>,
float,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32.f32(
<vscale x 8 x float> %0,
float %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}

View File

@ -0,0 +1,401 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
<vscale x 1 x float>,
half,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
<vscale x 1 x float> %0,
half %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
half,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
half %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
<vscale x 2 x float>,
half,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
<vscale x 2 x float> %0,
half %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
half,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
half %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
<vscale x 4 x float>,
half,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
<vscale x 4 x float> %0,
half %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
half,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
half %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
<vscale x 8 x float>,
half,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
<vscale x 8 x float> %0,
half %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
half,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
half %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
<vscale x 16 x float>,
half,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
<vscale x 16 x float> %0,
half %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
half,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
half %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}

View File

@ -0,0 +1,721 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f32(
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f32(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x float> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f32(
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f32(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x float> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f32(
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f32(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x float> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f32(
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f32(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
<vscale x 8 x float> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
<vscale x 1 x float>,
half,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
<vscale x 1 x float> %0,
half %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
half,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
half %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
<vscale x 2 x float>,
half,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
<vscale x 2 x float> %0,
half %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
half,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
half %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
<vscale x 4 x float>,
half,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
<vscale x 4 x float> %0,
half %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
half,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
half %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
<vscale x 8 x float>,
half,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
<vscale x 8 x float> %0,
half %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
half,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
half %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
<vscale x 16 x float>,
half,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
<vscale x 16 x float> %0,
half %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
half,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
half %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
<vscale x 1 x double>,
float,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
<vscale x 1 x double> %0,
float %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
<vscale x 1 x double>,
<vscale x 1 x double>,
float,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
float %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
<vscale x 2 x double>,
float,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
<vscale x 2 x double> %0,
float %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
<vscale x 2 x double>,
<vscale x 2 x double>,
float,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
float %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
<vscale x 4 x double>,
float,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
<vscale x 4 x double> %0,
float %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
<vscale x 4 x double>,
<vscale x 4 x double>,
float,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
float %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
<vscale x 8 x double>,
float,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
<vscale x 8 x double> %0,
float %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
<vscale x 8 x double>,
<vscale x 8 x double>,
float,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
float %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}

View File

@ -0,0 +1,401 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
<vscale x 1 x half>,
half,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
<vscale x 2 x half>,
half,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
<vscale x 4 x half>,
half,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
<vscale x 8 x half>,
half,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
<vscale x 16 x half>,
half,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}

View File

@ -0,0 +1,721 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32(
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32(
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32(
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32(
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
<vscale x 1 x half>,
half,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
<vscale x 2 x half>,
half,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
<vscale x 4 x half>,
half,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
<vscale x 8 x half>,
half,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
<vscale x 16 x half>,
half,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32.f32(
<vscale x 1 x float>,
float,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32.f32(
<vscale x 1 x float> %0,
float %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32.f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32.f32(
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32.f32(
<vscale x 2 x float>,
float,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32.f32(
<vscale x 2 x float> %0,
float %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32.f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32.f32(
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32.f32(
<vscale x 4 x float>,
float,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32.f32(
<vscale x 4 x float> %0,
float %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32.f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32.f32(
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32.f32(
<vscale x 8 x float>,
float,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32.f32(
<vscale x 8 x float> %0,
float %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32.f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32.f32(
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}

View File

@ -0,0 +1,401 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
<vscale x 1 x float>,
half,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
<vscale x 1 x float> %0,
half %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
half,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
half %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
<vscale x 2 x float>,
half,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
<vscale x 2 x float> %0,
half %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
half,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
half %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
<vscale x 4 x float>,
half,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
<vscale x 4 x float> %0,
half %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
half,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
half %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
<vscale x 8 x float>,
half,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
<vscale x 8 x float> %0,
half %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
half,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
half %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
<vscale x 16 x float>,
half,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
<vscale x 16 x float> %0,
half %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
half,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
half %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}

View File

@ -0,0 +1,721 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f32(
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f32(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x float> %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f32(
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f32(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x float> %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f32(
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f32(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x float> %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f32(
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f32(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
<vscale x 8 x float> %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
<vscale x 1 x float>,
half,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
<vscale x 1 x float> %0,
half %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
<vscale x 1 x float>,
<vscale x 1 x float>,
half,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
half %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
<vscale x 2 x float>,
half,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
<vscale x 2 x float> %0,
half %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
<vscale x 2 x float>,
<vscale x 2 x float>,
half,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
half %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
<vscale x 4 x float>,
half,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
<vscale x 4 x float> %0,
half %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
<vscale x 4 x float>,
<vscale x 4 x float>,
half,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
half %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
<vscale x 8 x float>,
half,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
<vscale x 8 x float> %0,
half %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
<vscale x 8 x float>,
<vscale x 8 x float>,
half,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
half %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
<vscale x 16 x float>,
half,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
<vscale x 16 x float> %0,
half %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
<vscale x 16 x float>,
<vscale x 16 x float>,
half,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
half %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
<vscale x 1 x double>,
float,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
<vscale x 1 x double> %0,
float %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
<vscale x 1 x double>,
<vscale x 1 x double>,
float,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
float %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
<vscale x 2 x double>,
float,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
<vscale x 2 x double> %0,
float %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
<vscale x 2 x double>,
<vscale x 2 x double>,
float,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
float %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
<vscale x 4 x double>,
float,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
<vscale x 4 x double> %0,
float %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
<vscale x 4 x double>,
<vscale x 4 x double>,
float,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
float %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
<vscale x 8 x double>,
float,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
<vscale x 8 x double> %0,
float %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
<vscale x 8 x double>,
<vscale x 8 x double>,
float,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
float %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}