[X86][AVX] Add X86ISD::VALIGN target shuffle decode support
Allows us to combine VALIGN instructions with other shuffles - the combiner doesn't create VALIGN yet though.
This commit is contained in:
parent
b632bd88a6
commit
10439f9e32
|
|
@ -4752,6 +4752,7 @@ static bool isTargetShuffle(unsigned Opcode) {
|
||||||
case X86ISD::INSERTPS:
|
case X86ISD::INSERTPS:
|
||||||
case X86ISD::EXTRQI:
|
case X86ISD::EXTRQI:
|
||||||
case X86ISD::INSERTQI:
|
case X86ISD::INSERTQI:
|
||||||
|
case X86ISD::VALIGN:
|
||||||
case X86ISD::PALIGNR:
|
case X86ISD::PALIGNR:
|
||||||
case X86ISD::VSHLDQ:
|
case X86ISD::VSHLDQ:
|
||||||
case X86ISD::VSRLDQ:
|
case X86ISD::VSRLDQ:
|
||||||
|
|
@ -6732,6 +6733,17 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
|
||||||
DecodeMOVLHPSMask(NumElems, Mask);
|
DecodeMOVLHPSMask(NumElems, Mask);
|
||||||
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
|
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
|
||||||
break;
|
break;
|
||||||
|
case X86ISD::VALIGN:
|
||||||
|
assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
|
||||||
|
"Only 32-bit and 64-bit elements are supported!");
|
||||||
|
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
|
||||||
|
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
|
||||||
|
ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
|
||||||
|
DecodeVALIGNMask(NumElems, ImmN, Mask);
|
||||||
|
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
|
||||||
|
Ops.push_back(N->getOperand(1));
|
||||||
|
Ops.push_back(N->getOperand(0));
|
||||||
|
break;
|
||||||
case X86ISD::PALIGNR:
|
case X86ISD::PALIGNR:
|
||||||
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
|
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
|
||||||
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
|
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
|
||||||
|
|
@ -47251,6 +47263,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
|
||||||
case X86ISD::INSERTPS:
|
case X86ISD::INSERTPS:
|
||||||
case X86ISD::EXTRQI:
|
case X86ISD::EXTRQI:
|
||||||
case X86ISD::INSERTQI:
|
case X86ISD::INSERTQI:
|
||||||
|
case X86ISD::VALIGN:
|
||||||
case X86ISD::PALIGNR:
|
case X86ISD::PALIGNR:
|
||||||
case X86ISD::VSHLDQ:
|
case X86ISD::VSHLDQ:
|
||||||
case X86ISD::VSRLDQ:
|
case X86ISD::VSRLDQ:
|
||||||
|
|
|
||||||
|
|
@ -1583,11 +1583,18 @@ define <4 x i32> @shuffle_v4i32_2456(<4 x i32> %a, <4 x i32> %b) {
|
||||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v4i32_2456:
|
; AVX1OR2-LABEL: shuffle_v4i32_2456:
|
||||||
; AVX: # %bb.0:
|
; AVX1OR2: # %bb.0:
|
||||||
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
|
; AVX1OR2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
|
||||||
; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
|
; AVX1OR2-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
|
||||||
; AVX-NEXT: retq
|
; AVX1OR2-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v4i32_2456:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [6,0,1,2]
|
||||||
|
; AVX512VL-NEXT: vpermi2d %xmm0, %xmm1, %xmm2
|
||||||
|
; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%s1 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
|
%s1 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
|
||||||
%s2 = shufflevector <4 x i32> %s1, <4 x i32> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
|
%s2 = shufflevector <4 x i32> %s1, <4 x i32> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
|
||||||
ret <4 x i32> %s2
|
ret <4 x i32> %s2
|
||||||
|
|
|
||||||
|
|
@ -2524,11 +2524,18 @@ define <8 x i32> @shuffle_v8i32_089abcde(<8 x i32> %a, <8 x i32> %b) {
|
||||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8i32_089abcde:
|
; AVX512VL-SLOW-LABEL: shuffle_v8i32_089abcde:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: valignd {{.*#+}} ymm1 = ymm1[7,0,1,2,3,4,5,6]
|
; AVX512VL-SLOW-NEXT: valignd {{.*#+}} ymm1 = ymm1[7,0,1,2,3,4,5,6]
|
||||||
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i32_089abcde:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [8,0,1,2,3,4,5,6]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermi2d %ymm0, %ymm1, %ymm2
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa %ymm2, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
|
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
|
||||||
ret <8 x i32> %shuffle
|
ret <8 x i32> %shuffle
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -62,8 +62,9 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
|
||||||
; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1} {z}
|
; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1} {z}
|
||||||
; AVX512VL-NEXT: movq $-1, %rax
|
; AVX512VL-NEXT: movq $-1, %rax
|
||||||
; AVX512VL-NEXT: vmovq %rax, %xmm2
|
; AVX512VL-NEXT: vmovq %rax, %xmm2
|
||||||
; AVX512VL-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
|
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,2]
|
||||||
; AVX512VL-NEXT: vptestmq %xmm1, %xmm1, %k1
|
; AVX512VL-NEXT: vpermi2q %xmm2, %xmm1, %xmm3
|
||||||
|
; AVX512VL-NEXT: vptestmq %xmm3, %xmm3, %k1
|
||||||
; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
|
; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-NEXT: retq
|
||||||
;
|
;
|
||||||
|
|
@ -74,8 +75,9 @@ define <2 x i1> @shuf2i1_1_2(<2 x i1> %a) {
|
||||||
; VL_BW_DQ-NEXT: movq $-1, %rax
|
; VL_BW_DQ-NEXT: movq $-1, %rax
|
||||||
; VL_BW_DQ-NEXT: vmovq %rax, %xmm0
|
; VL_BW_DQ-NEXT: vmovq %rax, %xmm0
|
||||||
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm1
|
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm1
|
||||||
; VL_BW_DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
|
; VL_BW_DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2]
|
||||||
; VL_BW_DQ-NEXT: vpmovq2m %xmm0, %k0
|
; VL_BW_DQ-NEXT: vpermi2q %xmm0, %xmm1, %xmm2
|
||||||
|
; VL_BW_DQ-NEXT: vpmovq2m %xmm2, %k0
|
||||||
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
|
; VL_BW_DQ-NEXT: vpmovm2q %k0, %xmm0
|
||||||
; VL_BW_DQ-NEXT: retq
|
; VL_BW_DQ-NEXT: retq
|
||||||
%b = shufflevector <2 x i1> %a, <2 x i1> <i1 1, i1 0>, <2 x i32> <i32 1, i32 2>
|
%b = shufflevector <2 x i1> %a, <2 x i1> <i1 1, i1 0>, <2 x i32> <i32 1, i32 2>
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue