[AMDGPU] Fixed incorrect uniform branch condition

Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.

There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.

This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.

Reviewers: arsenm, kzhuravl

Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle

Differential Revision: https://reviews.llvm.org/D41292

llvm-svn: 322119
This commit is contained in:
Tim Renouf 2018-01-09 21:34:43 +00:00
parent f0ef137bd0
commit 6eaad1e539
9 changed files with 91 additions and 13 deletions

View File

@ -1674,6 +1674,26 @@ void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
unsigned CondReg = UseSCCBr ? AMDGPU::SCC : AMDGPU::VCC;
SDLoc SL(N);
if (!UseSCCBr) {
// This is the case that we are selecting to S_CBRANCH_VCCNZ. We have not
// analyzed what generates the vcc value, so we do not know whether vcc
// bits for disabled lanes are 0. Thus we need to mask out bits for
// disabled lanes.
//
// For the case that we select S_CBRANCH_SCC1 and it gets
// changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
// SIInstrInfo::moveToVALU which inserts the S_AND).
//
// We could add an analysis of what generates the vcc value here and omit
// the S_AND when is unnecessary. But it would be better to add a separate
// pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
// catches both cases.
Cond = SDValue(CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1,
CurDAG->getRegister(AMDGPU::EXEC, MVT::i1),
Cond),
0);
}
SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
N->getOperand(2), // Basic Block

View File

@ -100,7 +100,8 @@ bb3:
; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_vcnd_branch:
; GCN: s_load_dword [[CND:s[0-9]+]]
; GCN-DAG: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
; GCN-DAG: v_cmp_eq_f32_e64 vcc, [[CND]], 0
; GCN-DAG: v_cmp_eq_f32_e64 [[UNMASKED:s\[[0-9]+:[0-9]+\]]], [[CND]], 0
; GCN-DAG: s_and_b64 vcc, exec, [[UNMASKED]]
; GCN: s_cbranch_vccz [[LONGBB:BB[0-9]+_[0-9]+]]
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
@ -500,8 +501,7 @@ ret:
; GCN: s_setpc_b64
; GCN: [[LONG_BR_DEST0]]
; GCN: v_cmp_ne_u32_e32
; GCN-NEXT: s_cbranch_vccz
; GCN: s_cbranch_vccz
; GCN: s_setpc_b64
; GCN: s_endpgm
@ -520,6 +520,11 @@ bb9: ; preds = %bb
br i1 %tmp12, label %bb19, label %bb14
bb13: ; preds = %bb
call void asm sideeffect
"v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64", ""() #0
br i1 %tmp6, label %bb19, label %bb14
bb14: ; preds = %bb13, %bb9

View File

@ -95,7 +95,7 @@ for.body:
; GCN-LABEL: {{^}}loop_arg_0:
; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; GCN: v_cmp_eq_u32_e32 vcc, 1,
; GCN: v_cmp_eq_u32{{[^,]*}}, 1,
; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]
; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80

View File

@ -63,8 +63,7 @@
; GCN-NEXT: s_cbranch_scc1
; FIXME: Should fold to unconditional branch?
; GCN: s_mov_b64 vcc, -1
; GCN-NEXT: ; implicit-def
; GCN: ; implicit-def
; GCN: s_cbranch_vccz
; GCN: ds_read_b32

View File

@ -0,0 +1,54 @@
; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -march=amdgcn -mcpu=gfx800 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
; This checks for a bug where uniform control flow can result in multiple
; v_cmp results being combined together with s_and_b64, s_or_b64 and s_xor_b64,
; using the resulting mask in s_cbranch_vccnz
; without ensuring that the resulting mask has bits clear for inactive lanes.
; The problematic case is s_xor_b64, as, unlike the other ops, it can actually
; set bits for inactive lanes.
;
; The check for an s_xor_b64 is just to check that this test tests what it is
; supposed to test. If the s_xor_b64 disappears due to some other case, it does
; not necessarily mean that the bug has reappeared.
;
; The check for "s_and_b64 vcc, exec, something" checks that the bug is fixed.
; CHECK: {{^}}main:
; CHECK: s_xor_b64
; CHECK: s_and_b64 vcc, exec,
define amdgpu_cs void @main(i32 inreg %arg) {
.entry:
%tmp44 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
%tmp16 = load volatile float, float addrspace(1)* undef
%tmp22 = load volatile float, float addrspace(1)* undef
%tmp25 = load volatile float, float addrspace(1)* undef
%tmp31 = fcmp olt float %tmp16, 0x3FA99999A0000000
br i1 %tmp31, label %bb, label %.exit.thread
bb: ; preds = %.entry
%tmp42 = fcmp olt float %tmp25, 0x3FA99999A0000000
br i1 %tmp42, label %bb43, label %.exit.thread
bb43:
%tmp46 = fcmp olt <2 x float> %tmp44, <float 0x3FA99999A0000000, float 0x3FA99999A0000000>
%tmp47 = extractelement <2 x i1> %tmp46, i32 0
%tmp48 = extractelement <2 x i1> %tmp46, i32 1
%tmp49 = and i1 %tmp47, %tmp48
br i1 %tmp49, label %bb50, label %.exit.thread
bb50:
%tmp53 = fcmp olt float %tmp22, 0x3FA99999A0000000
br i1 %tmp53, label %.exit3.i, label %.exit.thread
.exit3.i:
store volatile i32 0, i32 addrspace(1)* undef
br label %.exit.thread
.exit.thread:
ret void
}

View File

@ -134,8 +134,8 @@ define amdgpu_kernel void @opt_select_i64_or_cmp_f32(i64 addrspace(1)* %out, flo
}
; GCN-LABEL: {{^}}regression:
; GCN: v_cmp_neq_f32_e64 vcc
; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0
; GCN: v_cmp_neq_f32_e64
; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0
; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
define amdgpu_kernel void @regression(float addrspace(1)* %out, float %c0, float %c1) #0 {

View File

@ -267,7 +267,7 @@ exit:
; CHECK: [[PHIBB]]:
; CHECK: v_cmp_eq_f32_e32 vcc, 0, [[PHIREG]]
; CHECK-NEXT: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
; CHECK: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
; CHECK: ; %bb10
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 9
@ -302,14 +302,14 @@ end:
; CHECK-LABEL: {{^}}no_skip_no_successors:
; CHECK: v_cmp_nge_f32
; CHECK-NEXT: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]]
; CHECK: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]]
; CHECK: ; %bb6
; CHECK: s_mov_b64 exec, 0
; CHECK: [[SKIPKILL]]:
; CHECK: v_cmp_nge_f32_e32 vcc
; CHECK-NEXT: %bb.3: ; %bb5
; CHECK: %bb.3: ; %bb5
; CHECK-NEXT: .Lfunc_end{{[0-9]+}}
define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 {
bb:

View File

@ -4,7 +4,7 @@
; GCN-FUNC: {{^}}vccz_workaround:
; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0x0
; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0{{$}}
; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0{{$}}
; VCCZ-BUG: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VCCZ-BUG: s_mov_b64 vcc, vcc
; NOVCCZ-BUG-NOT: s_mov_b64 vcc, vcc

View File

@ -251,7 +251,7 @@ ENDIF: ; preds = %IF, %main_body
; GCN: s_load_dword [[COND:s[0-9]+]]
; GCN: s_cmp_lt_i32 [[COND]], 1
; GCN: s_cbranch_scc1 [[EXIT:[A-Za-z0-9_]+]]
; GCN: v_cmp_gt_i32_e64 vcc, [[COND]], 0{{$}}
; GCN: v_cmp_gt_i32_e64 {{[^,]*}}, [[COND]], 0{{$}}
; GCN: s_cbranch_vccz [[BODY:[A-Za-z0-9_]+]]
; GCN: {{^}}[[EXIT]]:
; GCN: s_endpgm