[AArch64][SVE] Rename intrinsics for gather prefetch [NFC]

Summary:
In order to keep the names consistent with other SVE gather loads, the
intrinsics for gather prefetch are renamed as follows:
  * @llvm.aarch64.sve.gather.prfb -> @llvm.aarch64.sve.prfb.gather

Reviewed by: fpetrogalli

Differential Revision: https://reviews.llvm.org/D76421
This commit is contained in:
Andrzej Warzynski 2020-03-19 09:35:31 +00:00
parent 4a58996dd2
commit 0ea4fb5bb7
6 changed files with 267 additions and 267 deletions

View File

@ -1302,29 +1302,29 @@ def int_aarch64_sve_prf
// Scalar + 32-bit scaled offset vector, zero extend, packed and // Scalar + 32-bit scaled offset vector, zero extend, packed and
// unpacked. // unpacked.
def int_aarch64_sve_gather_prfb_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfb_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfh_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfh_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfw_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfw_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfd_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfd_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
// Scalar + 32-bit scaled offset vector, sign extend, packed and // Scalar + 32-bit scaled offset vector, sign extend, packed and
// unpacked. // unpacked.
def int_aarch64_sve_gather_prfb_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfb_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfw_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfw_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfh_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfh_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfd_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfd_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled;
// Scalar + 64-bit scaled offset vector. // Scalar + 64-bit scaled offset vector.
def int_aarch64_sve_gather_prfb_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfb_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfh_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfh_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfw_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfw_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
def int_aarch64_sve_gather_prfd_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; def int_aarch64_sve_prfd_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled;
// Vector + scalar. // Vector + scalar.
def int_aarch64_sve_gather_prfb : SVE_gather_prf_vector_base_scalar_offset; def int_aarch64_sve_prfb_gather : SVE_gather_prf_vector_base_scalar_offset;
def int_aarch64_sve_gather_prfh : SVE_gather_prf_vector_base_scalar_offset; def int_aarch64_sve_prfh_gather : SVE_gather_prf_vector_base_scalar_offset;
def int_aarch64_sve_gather_prfw : SVE_gather_prf_vector_base_scalar_offset; def int_aarch64_sve_prfw_gather : SVE_gather_prf_vector_base_scalar_offset;
def int_aarch64_sve_gather_prfd : SVE_gather_prf_vector_base_scalar_offset; def int_aarch64_sve_prfd_gather : SVE_gather_prf_vector_base_scalar_offset;
// //
// Scalar to vector operations // Scalar to vector operations

View File

@ -12995,9 +12995,9 @@ static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops); return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
} }
/// Combines a node carrying the intrinsic `aarch64_sve_gather_prf<T>` into a /// Combines a node carrying the intrinsic `aarch64_sve_prf_gather<T>` into a
/// node that uses `aarch64_sve_gather_prf<T>_scaled_uxtw` when the scalar /// node that uses `aarch64_sve_prf_gather<T>_scaled_uxtw` when the scalar
/// offset passed to `aarch64_sve_gather_prf<T>` is not a valid immediate for /// offset passed to `aarch64_sve_prf_gather<T>` is not a valid immediate for
/// the sve gather prefetch instruction with vector plus immediate addressing /// the sve gather prefetch instruction with vector plus immediate addressing
/// mode. /// mode.
static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG, static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
@ -13011,8 +13011,8 @@ static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
// ...otherwise swap the offset base with the offset... // ...otherwise swap the offset base with the offset...
SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end()); SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
std::swap(Ops[ImmPos], Ops[OffsetPos]); std::swap(Ops[ImmPos], Ops[OffsetPos]);
// ...and remap the intrinsic `aarch64_sve_gather_prf<T>` to // ...and remap the intrinsic `aarch64_sve_prf_gather<T>` to
// `aarch64_sve_gather_prf<T>_scaled_uxtw`. // `aarch64_sve_prf_gather<T>_scaled_uxtw`.
SDLoc DL(N); SDLoc DL(N);
Ops[1] = DAG.getConstant(NewIID, DL, MVT::i64); Ops[1] = DAG.getConstant(NewIID, DL, MVT::i64);
@ -13083,30 +13083,30 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: case ISD::INTRINSIC_W_CHAIN:
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
case Intrinsic::aarch64_sve_gather_prfb: case Intrinsic::aarch64_sve_prfb_gather:
return combineSVEPrefetchVecBaseImmOff( return combineSVEPrefetchVecBaseImmOff(
N, DAG, Intrinsic::aarch64_sve_gather_prfb_scaled_uxtw, N, DAG, Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw,
1 /*=ScalarSizeInBytes*/); 1 /*=ScalarSizeInBytes*/);
case Intrinsic::aarch64_sve_gather_prfh: case Intrinsic::aarch64_sve_prfh_gather:
return combineSVEPrefetchVecBaseImmOff( return combineSVEPrefetchVecBaseImmOff(
N, DAG, Intrinsic::aarch64_sve_gather_prfh_scaled_uxtw, N, DAG, Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw,
2 /*=ScalarSizeInBytes*/); 2 /*=ScalarSizeInBytes*/);
case Intrinsic::aarch64_sve_gather_prfw: case Intrinsic::aarch64_sve_prfw_gather:
return combineSVEPrefetchVecBaseImmOff( return combineSVEPrefetchVecBaseImmOff(
N, DAG, Intrinsic::aarch64_sve_gather_prfw_scaled_uxtw, N, DAG, Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw,
4 /*=ScalarSizeInBytes*/); 4 /*=ScalarSizeInBytes*/);
case Intrinsic::aarch64_sve_gather_prfd: case Intrinsic::aarch64_sve_prfd_gather:
return combineSVEPrefetchVecBaseImmOff( return combineSVEPrefetchVecBaseImmOff(
N, DAG, Intrinsic::aarch64_sve_gather_prfd_scaled_uxtw, N, DAG, Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw,
8 /*=ScalarSizeInBytes*/); 8 /*=ScalarSizeInBytes*/);
case Intrinsic::aarch64_sve_gather_prfb_scaled_uxtw: case Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw:
case Intrinsic::aarch64_sve_gather_prfb_scaled_sxtw: case Intrinsic::aarch64_sve_prfb_gather_scaled_sxtw:
case Intrinsic::aarch64_sve_gather_prfh_scaled_uxtw: case Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw:
case Intrinsic::aarch64_sve_gather_prfh_scaled_sxtw: case Intrinsic::aarch64_sve_prfh_gather_scaled_sxtw:
case Intrinsic::aarch64_sve_gather_prfw_scaled_uxtw: case Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw:
case Intrinsic::aarch64_sve_gather_prfw_scaled_sxtw: case Intrinsic::aarch64_sve_prfw_gather_scaled_sxtw:
case Intrinsic::aarch64_sve_gather_prfd_scaled_uxtw: case Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw:
case Intrinsic::aarch64_sve_gather_prfd_scaled_sxtw: case Intrinsic::aarch64_sve_prfd_gather_scaled_sxtw:
return legalizeSVEGatherPrefetchOffsVec(N, DAG); return legalizeSVEGatherPrefetchOffsVec(N, DAG);
case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld2:
case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_ld3:

View File

@ -880,37 +880,37 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
// Gather prefetch using scaled 32-bit offsets, e.g. // Gather prefetch using scaled 32-bit offsets, e.g.
// prfh pldl1keep, p0, [x0, z0.s, uxtw #1] // prfh pldl1keep, p0, [x0, z0.s, uxtw #1]
defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, int_aarch64_sve_gather_prfb_scaled_sxtw, int_aarch64_sve_gather_prfb_scaled_uxtw>; defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>;
defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16, ZPR32ExtUXTW16, int_aarch64_sve_gather_prfh_scaled_sxtw, int_aarch64_sve_gather_prfh_scaled_uxtw>; defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16, ZPR32ExtUXTW16, int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>;
defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32, ZPR32ExtUXTW32, int_aarch64_sve_gather_prfw_scaled_sxtw, int_aarch64_sve_gather_prfw_scaled_uxtw>; defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32, ZPR32ExtUXTW32, int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>;
defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64, ZPR32ExtUXTW64, int_aarch64_sve_gather_prfd_scaled_sxtw, int_aarch64_sve_gather_prfd_scaled_uxtw>; defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64, ZPR32ExtUXTW64, int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>;
// Gather prefetch using unpacked, scaled 32-bit offsets, e.g. // Gather prefetch using unpacked, scaled 32-bit offsets, e.g.
// prfh pldl1keep, p0, [x0, z0.d, uxtw #1] // prfh pldl1keep, p0, [x0, z0.d, uxtw #1]
defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_gather_prfb_scaled_sxtw, int_aarch64_sve_gather_prfb_scaled_uxtw>; defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>;
defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16, ZPR64ExtUXTW16, int_aarch64_sve_gather_prfh_scaled_sxtw, int_aarch64_sve_gather_prfh_scaled_uxtw>; defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16, ZPR64ExtUXTW16, int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>;
defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32, ZPR64ExtUXTW32, int_aarch64_sve_gather_prfw_scaled_sxtw, int_aarch64_sve_gather_prfw_scaled_uxtw>; defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32, ZPR64ExtUXTW32, int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>;
defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64, ZPR64ExtUXTW64, int_aarch64_sve_gather_prfd_scaled_sxtw, int_aarch64_sve_gather_prfd_scaled_uxtw>; defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64, ZPR64ExtUXTW64, int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>;
// Gather prefetch using scaled 64-bit offsets, e.g. // Gather prefetch using scaled 64-bit offsets, e.g.
// prfh pldl1keep, p0, [x0, z0.d, lsl #1] // prfh pldl1keep, p0, [x0, z0.d, lsl #1]
defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8, int_aarch64_sve_gather_prfb_scaled>; defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8, int_aarch64_sve_prfb_gather_scaled>;
defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_gather_prfh_scaled>; defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_prfh_gather_scaled>;
defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_gather_prfw_scaled>; defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_prfw_gather_scaled>;
defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_gather_prfd_scaled>; defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_prfd_gather_scaled>;
// Gather prefetch using 32/64-bit pointers with offset, e.g. // Gather prefetch using 32/64-bit pointers with offset, e.g.
// prfh pldl1keep, p0, [z0.s, #16] // prfh pldl1keep, p0, [z0.s, #16]
// prfh pldl1keep, p0, [z0.d, #16] // prfh pldl1keep, p0, [z0.d, #16]
defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_gather_prfb>; defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>;
defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_gather_prfh>; defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>;
defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_gather_prfw>; defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>;
defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_gather_prfd>; defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>;
defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_gather_prfb>; defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>;
defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_gather_prfh>; defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>;
defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_gather_prfw>; defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>;
defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_gather_prfd>; defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>;
defm ADR_SXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_sxtw<0b00, "adr">; defm ADR_SXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_sxtw<0b00, "adr">;
defm ADR_UXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_uxtw<0b01, "adr">; defm ADR_UXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_uxtw<0b01, "adr">;

View File

@ -1,200 +1,200 @@
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
define void @llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32:
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32:
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, sxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, sxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
define void @llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_uxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64:
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_sxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64:
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, sxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, sxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
define void @llvm_aarch64_sve_gather_prfb_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_scaled_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64:
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d] ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) call void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
ret void ret void
} }
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
define void @llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32:
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32:
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, sxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, sxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked scaled offset ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked scaled offset
define void @llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_uxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64:
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_sxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64:
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, sxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, sxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
define void @llvm_aarch64_sve_gather_prfh_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_scaled_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64:
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, lsl #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, lsl #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) call void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
ret void ret void
} }
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
define void @llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32:
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32:
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, sxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, sxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked scaled offset ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked scaled offset
define void @llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_uxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64:
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_sxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64:
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, sxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, sxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
define void @llvm_aarch64_sve_gather_prfw_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_scaled_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64:
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, lsl #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, lsl #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) call void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
ret void ret void
} }
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
define void @llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32:
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32:
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, sxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, sxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
ret void ret void
} }
; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked scaled offset ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked scaled offset
define void @llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_uxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64:
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind { define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_sxtw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64:
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, sxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, sxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1) call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
ret void ret void
} }
; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
define void @llvm_aarch64_sve_gather_prfd_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind { define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_scaled_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64:
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, lsl #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, lsl #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1) call void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
ret void ret void
} }
declare void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfb.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfb.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfb.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)

View File

@ -1,82 +1,82 @@
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element ; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
define void @llvm_aarch64_sve_gather_prfb_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32:
; CHECK-NEXT: prfb pldl1strm, p0, [z0.s, #7] ; CHECK-NEXT: prfb pldl1strm, p0, [z0.s, #7]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1)
ret void ret void
} }
; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element ; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
define void @llvm_aarch64_sve_gather_prfb_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64:
; CHECK-NEXT: prfb pldl1strm, p0, [z0.d, #7] ; CHECK-NEXT: prfb pldl1strm, p0, [z0.d, #7]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1)
ret void ret void
} }
; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element ; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
define void @llvm_aarch64_sve_gather_prfh_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32:
; CHECK-NEXT: prfh pldl1strm, p0, [z0.s, #6] ; CHECK-NEXT: prfh pldl1strm, p0, [z0.s, #6]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1)
ret void ret void
} }
; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element ; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
define void @llvm_aarch64_sve_gather_prfh_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64:
; CHECK-NEXT: prfh pldl1strm, p0, [z0.d, #6] ; CHECK-NEXT: prfh pldl1strm, p0, [z0.d, #6]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1)
ret void ret void
} }
; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element ; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
define void @llvm_aarch64_sve_gather_prfw_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32:
; CHECK-NEXT: prfw pldl1strm, p0, [z0.s, #12] ; CHECK-NEXT: prfw pldl1strm, p0, [z0.s, #12]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1)
ret void ret void
} }
; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element ; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
define void @llvm_aarch64_sve_gather_prfw_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64:
; CHECK-NEXT: prfw pldl1strm, p0, [z0.d, #12] ; CHECK-NEXT: prfw pldl1strm, p0, [z0.d, #12]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1)
ret void ret void
} }
; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element ; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
define void @llvm_aarch64_sve_gather_prfd_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32:
; CHECK-NEXT: prfd pldl1strm, p0, [z0.s, #16] ; CHECK-NEXT: prfd pldl1strm, p0, [z0.s, #16]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1)
ret void ret void
} }
; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element ; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
define void @llvm_aarch64_sve_gather_prfd_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64:
; CHECK-NEXT: prfd pldl1strm, p0, [z0.d, #16] ; CHECK-NEXT: prfd pldl1strm, p0, [z0.d, #16]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1)
ret void ret void
} }
declare void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)

View File

@ -1,286 +1,286 @@
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 1, ..., 31 ; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 1, ..., 31
define void @llvm_aarch64_sve_gather_prfb_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset:
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #32 ; CHECK-NEXT: mov w[[N:[0-9]+]], #32
; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.s, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.s, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 32, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 32, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
ret void ret void
} }
; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 1, ..., 31 ; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 1, ..., 31
define void @llvm_aarch64_sve_gather_prfb_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset:
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #32 ; CHECK-NEXT: mov w[[N:[0-9]+]], #32
; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.d, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.d, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 32, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 32, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfb_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw] ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
ret void ret void
} }
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 2, ..., 62 ; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 2, ..., 62
define void @llvm_aarch64_sve_gather_prfh_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset:
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #63 ; CHECK-NEXT: mov w[[N:[0-9]+]], #63
; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.s, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.s, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 63, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 63, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2:
; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: mov w[[N:[0-9]+]], #33
; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
ret void ret void
} }
; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 2, ..., 62 ; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 2, ..., 62
define void @llvm_aarch64_sve_gather_prfh_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset:
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #63 ; CHECK-NEXT: mov w[[N:[0-9]+]], #63
; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.d, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.d, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 63, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 63, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfh_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2: ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2:
; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: mov w[[N:[0-9]+]], #33
; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1] ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) call void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
ret void ret void
} }
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 4, ..., 124 ; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 4, ..., 124
define void @llvm_aarch64_sve_gather_prfw_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset:
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: mov w[[N:[0-9]+]], #125
; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.s, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.s, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4:
; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: mov w[[N:[0-9]+]], #33
; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
ret void ret void
} }
; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 124 ; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 124
define void @llvm_aarch64_sve_gather_prfw_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset:
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: mov w[[N:[0-9]+]], #125
; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.d, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.d, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfw_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4: ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4:
; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: mov w[[N:[0-9]+]], #33
; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2] ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) call void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
ret void ret void
} }
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 8, ..., 248 ; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element, imm = 0, 8, ..., 248
define void @llvm_aarch64_sve_gather_prfd_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset(<vscale x 4 x i32> %bases, i64 %imm, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset:
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: mov w[[N:[0-9]+]], #125
; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.s, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.s, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8:
; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: mov w[[N:[0-9]+]], #33
; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
ret void ret void
} }
; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 248 ; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element, imm = 0, 4, ..., 248
define void @llvm_aarch64_sve_gather_prfd_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset(<vscale x 2 x i64> %bases, i64 %imm, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_runtime_offset: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset:
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound:
; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: mov w[[N:[0-9]+]], #125
; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.d, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.d, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound:
; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1
; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
ret void ret void
} }
define void @llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
; CHECK-LABEL: llvm_aarch64_sve_gather_prfd_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8: ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8:
; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: mov w[[N:[0-9]+]], #33
; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3] ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3]
; CHECK-NEXT: ret ; CHECK-NEXT: ret
call void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1) call void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
ret void ret void
} }
declare void @llvm.aarch64.sve.gather.prfb.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfb.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfb.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfh.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfh.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfw.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfw.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %imm, i32 %prfop)
declare void @llvm.aarch64.sve.gather.prfd.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop) declare void @llvm.aarch64.sve.prfd.gather.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %imm, i32 %prfop)