[AArch64][SVE] Drop "argmemonly" from gather/scatter with vector base.

The intrinsics don't have any pointer arguments, so "argmemonly" makes
optimizations think they don't write to memory at all.

Differential Revision: https://reviews.llvm.org/D88186
This commit is contained in:
Eli Friedman 2020-09-23 15:09:47 -07:00
parent b65966cff6
commit 4600e21051
2 changed files with 51 additions and 2 deletions

View File

@ -1300,7 +1300,7 @@ class AdvSIMD_GatherLoad_VS_Intrinsic
llvm_anyvector_ty,
llvm_i64_ty
],
[IntrReadMem, IntrArgMemOnly]>;
[IntrReadMem]>;
class AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic
: Intrinsic<[],
@ -1329,7 +1329,7 @@ class AdvSIMD_ScatterStore_VS_Intrinsic
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyvector_ty, llvm_i64_ty
],
[IntrWriteMem, IntrArgMemOnly]>;
[IntrWriteMem]>;
class SVE_gather_prf_SV

View File

@ -26,5 +26,54 @@ for.end:
ret void
}
define void @no_hoist_gather(<vscale x 2 x i32>* %out_ptr, <vscale x 2 x i32>* %in_ptr, <vscale x 2 x i64> %ptr_vec, i64 %n, <vscale x 2 x i1> %pred) {
; CHECK-LABEL: @no_hoist_gather(
; CHECK: entry:
; CHECK-NOT: llvm.aarch64.sve.ld1.gather.scalar.offset
; CHECK: for.body:
; CHECK: llvm.aarch64.sve.ld1.gather.scalar.offset
entry:
br label %for.body
for.body:
%i = phi i64 [0, %entry], [%inc, %for.body]
%gather = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pred, <vscale x 2 x i64> %ptr_vec, i64 0)
%in_ptr_gep = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %in_ptr, i64 %i
%in_ptr_load = load <vscale x 2 x i32>, <vscale x 2 x i32>* %in_ptr_gep, align 8
%sum = add <vscale x 2 x i32> %gather, %in_ptr_load
%out_ptr_gep = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %out_ptr, i64 %i
store <vscale x 2 x i32> %sum, <vscale x 2 x i32>* %out_ptr_gep, align 8
%inc = add nuw nsw i64 %i, 1
%cmp = icmp ult i64 %inc, %n
br i1 %cmp, label %for.body, label %for.end
for.end:
ret void
}
define void @no_hoist_scatter(<vscale x 2 x i32>* %out_ptr, <vscale x 2 x i32>* %in_ptr, <vscale x 2 x i64> %ptr_vec, i64 %n, <vscale x 2 x i1> %pred) {
; CHECK-LABEL: @no_hoist_scatter(
; CHECK: entry:
; CHECK-NOT: load
; CHECK: for.body:
; CHECK: load
entry:
br label %for.body
for.body:
%i = phi i64 [0, %entry], [%inc, %for.body]
%in_ptr_load = load <vscale x 2 x i32>, <vscale x 2 x i32>* %in_ptr, align 8
call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %in_ptr_load, <vscale x 2 x i1> %pred, <vscale x 2 x i64> %ptr_vec, i64 %i)
%inc = add nuw nsw i64 %i, 1
%cmp = icmp ult i64 %inc, %n
br i1 %cmp, label %for.body, label %for.end
for.end:
ret void
}
declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)