[X86][SSE] Combine 128-bit target shuffles to PACKSS/PACKUS.

llvm-svn: 316845
This commit is contained in:
Simon Pilgrim 2017-10-28 20:51:27 +00:00
parent bd3852aa5e
commit 294f88dfa0
4 changed files with 39 additions and 45 deletions

View File

@ -27698,6 +27698,16 @@ static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
}
}
// Attempt to match against either a unary or binary PACKSS/PACKUS shuffle.
// TODO add support for 256/512-bit types.
if ((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) {
if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
Subtarget)) {
DstVT = MaskVT;
return true;
}
}
// Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
(MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||

View File

@ -2747,24 +2747,19 @@ define void @avg_v16i16_const(<16 x i16>* %a) nounwind {
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [5,6,7,8]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,2,3,4]
; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,3,4]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [5,6,7,8]
; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm2
; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@ -2867,9 +2862,9 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [5,6,7,8]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [1,2,3,4]
; AVX1-NEXT: vpaddd %xmm0, %xmm7, %xmm9
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1,2,3,4]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [5,6,7,8]
; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpaddd %xmm0, %xmm5, %xmm5
; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4
@ -2877,31 +2872,22 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm8
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
; AVX1-NEXT: vpsrld $1, %xmm9, %xmm7
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm1[1],xmm7[2],xmm1[3],xmm7[4],xmm1[5],xmm7[6],xmm1[7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm1[1],xmm6[2],xmm1[3],xmm6[4],xmm1[5],xmm6[6],xmm1[7]
; AVX1-NEXT: vpackusdw %xmm7, %xmm6, %xmm6
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm1[1],xmm5[2],xmm1[3],xmm5[4],xmm1[5],xmm5[6],xmm1[7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
; AVX1-NEXT: vpackusdw %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0],xmm1[1],xmm8[2],xmm1[3],xmm8[4],xmm1[5],xmm8[6],xmm1[7]
; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm2
; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm5, %xmm3
; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm6, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm9, %xmm4
; AVX1-NEXT: vpackusdw %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rax)
; AVX1-NEXT: vmovups %ymm4, (%rax)
; AVX1-NEXT: vmovups %ymm2, (%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;

View File

@ -149,7 +149,7 @@ define void @load_8_i8(<8 x i8>* %A) {
; SSE41: # BB#0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE41-NEXT: packuswb %xmm0, %xmm0
; SSE41-NEXT: movq %xmm0, (%rdi)
; SSE41-NEXT: retq
%T = load <8 x i8>, <8 x i8>* %A

View File

@ -1156,9 +1156,8 @@ define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
; AVX1-NEXT: vzeroupper
@ -1228,8 +1227,8 @@ define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
; AVX1-NEXT: vzeroupper
@ -1958,11 +1957,10 @@ define void @PR34773(i16* %a0, i8* %a1) {
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm5, %xmm3, %xmm2
; AVX1-NEXT: vpackuswb %xmm0, %xmm3, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rsi)