685 lines
		
	
	
		
			25 KiB
		
	
	
	
		
			LLVM
		
	
	
	
			
		
		
	
	
			685 lines
		
	
	
		
			25 KiB
		
	
	
	
		
			LLVM
		
	
	
	
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-SLOW
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fast-hops | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-FAST
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
 | |
| 
 | |
| ;
 | |
| ; vXi64
 | |
| ;
 | |
| 
 | |
| define i64 @test_v2i64_v2i32(<2 x i32> %a0) {
 | |
| ; SSE2-LABEL: test_v2i64_v2i32:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    xorps %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
 | |
| ; SSE2-NEXT:    psrlq $32, %xmm0
 | |
| ; SSE2-NEXT:    paddq %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    movq %xmm0, %rax
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: test_v2i64_v2i32:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 | |
| ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; SSE41-NEXT:    paddq %xmm0, %xmm1
 | |
| ; SSE41-NEXT:    movq %xmm1, %rax
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: test_v2i64_v2i32:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 | |
| ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vmovq %xmm0, %rax
 | |
| ; AVX-NEXT:    retq
 | |
|   %1 = zext <2 x i32> %a0 to <2 x i64>
 | |
|   %2 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %1)
 | |
|   ret i64 %2
 | |
| }
 | |
| 
 | |
| define i64 @test_v4i64_v4i16(<4 x i16> %a0) {
 | |
| ; SSE2-LABEL: test_v4i64_v4i16:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 | |
| ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE2-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 | |
| ; SSE2-NEXT:    paddq %xmm2, %xmm0
 | |
| ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; SSE2-NEXT:    paddq %xmm0, %xmm1
 | |
| ; SSE2-NEXT:    movq %xmm1, %rax
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: test_v4i64_v4i16:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 | |
| ; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 | |
| ; SSE41-NEXT:    pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 | |
| ; SSE41-NEXT:    paddq %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; SSE41-NEXT:    paddq %xmm0, %xmm1
 | |
| ; SSE41-NEXT:    movq %xmm1, %rax
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: test_v4i64_v4i16:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 | |
| ; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 | |
| ; AVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vmovq %xmm0, %rax
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v4i64_v4i16:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 | |
| ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovq %xmm0, %rax
 | |
| ; AVX2-NEXT:    vzeroupper
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v4i64_v4i16:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 | |
| ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovq %xmm0, %rax
 | |
| ; AVX512-NEXT:    vzeroupper
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <4 x i16> %a0 to <4 x i64>
 | |
|   %2 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %1)
 | |
|   ret i64 %2
 | |
| }
 | |
| 
 | |
| define i64 @test_v8i64_v8i8(<8 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v8i64_v8i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm0, %xmm1
 | |
| ; SSE-NEXT:    movq %xmm1, %rax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: test_v8i64_v8i8:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vmovq %xmm0, %rax
 | |
| ; AVX-NEXT:    retq
 | |
|   %1 = zext <8 x i8> %a0 to <8 x i64>
 | |
|   %2 = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %1)
 | |
|   ret i64 %2
 | |
| }
 | |
| 
 | |
| define i64 @test_v16i64_v16i8(<16 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v16i64_v16i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm0, %xmm1
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
 | |
| ; SSE-NEXT:    paddq %xmm1, %xmm0
 | |
| ; SSE-NEXT:    movq %xmm0, %rax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: test_v16i64_v16i8:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vmovq %xmm0, %rax
 | |
| ; AVX-NEXT:    retq
 | |
|   %1 = zext <16 x i8> %a0 to <16 x i64>
 | |
|   %2 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %1)
 | |
|   ret i64 %2
 | |
| }
 | |
| 
 | |
| ;
 | |
| ; vXi32
 | |
| ;
 | |
| 
 | |
| define i32 @test_v2i32_v2i16(<2 x i16> %a0) {
 | |
| ; SSE2-LABEL: test_v2i32_v2i16:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 | |
| ; SSE2-NEXT:    psrld $16, %xmm0
 | |
| ; SSE2-NEXT:    paddd %xmm2, %xmm0
 | |
| ; SSE2-NEXT:    movd %xmm0, %eax
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: test_v2i32_v2i16:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | |
| ; SSE41-NEXT:    psrld $16, %xmm0
 | |
| ; SSE41-NEXT:    paddd %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    movd %xmm0, %eax
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-SLOW-LABEL: test_v2i32_v2i16:
 | |
| ; AVX1-SLOW:       # %bb.0:
 | |
| ; AVX1-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | |
| ; AVX1-SLOW-NEXT:    vpsrld $16, %xmm0, %xmm0
 | |
| ; AVX1-SLOW-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
 | |
| ; AVX1-SLOW-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-SLOW-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-FAST-LABEL: test_v2i32_v2i16:
 | |
| ; AVX1-FAST:       # %bb.0:
 | |
| ; AVX1-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | |
| ; AVX1-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
 | |
| ; AVX1-FAST-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-FAST-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v2i32_v2i16:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | |
| ; AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
 | |
| ; AVX2-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v2i32_v2i16:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | |
| ; AVX512-NEXT:    vpsrld $16, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
 | |
| ; AVX512-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <2 x i16> %a0 to <2 x i32>
 | |
|   %2 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %1)
 | |
|   ret i32 %2
 | |
| }
 | |
| 
 | |
| define i32 @test_v4i32(<4 x i8> %a0) {
 | |
| ; SSE2-LABEL: test_v4i32:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 | |
| ; SSE2-NEXT:    psadbw %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    movd %xmm0, %eax
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: test_v4i32:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 | |
| ; SSE41-NEXT:    psadbw %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    movd %xmm0, %eax
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: test_v4i32:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 | |
| ; AVX1-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v4i32:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 | |
| ; AVX2-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v4i32:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
 | |
| ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <4 x i8> %a0 to <4 x i32>
 | |
|   %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
 | |
|   ret i32 %2
 | |
| }
 | |
| 
 | |
| define i32 @test_v8i32_v8i8(<8 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v8i32_v8i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm0, %xmm1
 | |
| ; SSE-NEXT:    movd %xmm1, %eax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: test_v8i32_v8i8:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX-NEXT:    retq
 | |
|   %1 = zext <8 x i8> %a0 to <8 x i32>
 | |
|   %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
 | |
|   ret i32 %2
 | |
| }
 | |
| 
 | |
| define i32 @test_v16i32_v16i8(<16 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v16i32_v16i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm0, %xmm1
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
 | |
| ; SSE-NEXT:    paddq %xmm1, %xmm0
 | |
| ; SSE-NEXT:    movd %xmm0, %eax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: test_v16i32_v16i8:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX-NEXT:    retq
 | |
|   %1 = zext <16 x i8> %a0 to <16 x i32>
 | |
|   %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
 | |
|   ret i32 %2
 | |
| }
 | |
| 
 | |
| define i32 @test_v32i32_v32i8(<32 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v32i32_v32i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm2
 | |
| ; SSE-NEXT:    psadbw %xmm2, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm2, %xmm0
 | |
| ; SSE-NEXT:    paddq %xmm1, %xmm0
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; SSE-NEXT:    paddq %xmm0, %xmm1
 | |
| ; SSE-NEXT:    movd %xmm1, %eax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: test_v32i32_v32i8:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpsadbw %xmm2, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpsadbw %xmm2, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-NEXT:    vzeroupper
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v32i32_v32i8:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 | |
| ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX2-NEXT:    vzeroupper
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v32i32_v32i8:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 | |
| ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX512-NEXT:    vzeroupper
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <32 x i8> %a0 to <32 x i32>
 | |
|   %2 = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %1)
 | |
|   ret i32 %2
 | |
| }
 | |
| 
 | |
| ;
 | |
| ; vXi16
 | |
| ;
 | |
| 
 | |
| define i16 @test_v2i16_v2i8(<2 x i8> %a0) {
 | |
| ; SSE2-LABEL: test_v2i16_v2i8:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 | |
| ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 | |
| ; SSE2-NEXT:    psrld $16, %xmm1
 | |
| ; SSE2-NEXT:    paddw %xmm0, %xmm1
 | |
| ; SSE2-NEXT:    movd %xmm1, %eax
 | |
| ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: test_v2i16_v2i8:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 | |
| ; SSE41-NEXT:    psrld $16, %xmm1
 | |
| ; SSE41-NEXT:    paddw %xmm0, %xmm1
 | |
| ; SSE41-NEXT:    movd %xmm1, %eax
 | |
| ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-SLOW-LABEL: test_v2i16_v2i8:
 | |
| ; AVX1-SLOW:       # %bb.0:
 | |
| ; AVX1-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX1-SLOW-NEXT:    vpsrld $16, %xmm0, %xmm1
 | |
| ; AVX1-SLOW-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-SLOW-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX1-SLOW-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-FAST-LABEL: test_v2i16_v2i8:
 | |
| ; AVX1-FAST:       # %bb.0:
 | |
| ; AVX1-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX1-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
 | |
| ; AVX1-FAST-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX1-FAST-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v2i16_v2i8:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX2-NEXT:    vpsrld $16, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v2i16_v2i8:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX512-NEXT:    vpsrld $16, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <2 x i8> %a0 to <2 x i16>
 | |
|   %2 = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %1)
 | |
|   ret i16 %2
 | |
| }
 | |
| 
 | |
| define i16 @test_v4i16_v4i8(<4 x i8> %a0) {
 | |
| ; SSE2-LABEL: test_v4i16_v4i8:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 | |
| ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 | |
| ; SSE2-NEXT:    paddw %xmm0, %xmm1
 | |
| ; SSE2-NEXT:    movdqa %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    psrld $16, %xmm0
 | |
| ; SSE2-NEXT:    paddw %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    movd %xmm0, %eax
 | |
| ; SSE2-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: test_v4i16_v4i8:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 | |
| ; SSE41-NEXT:    paddw %xmm0, %xmm1
 | |
| ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    psrld $16, %xmm0
 | |
| ; SSE41-NEXT:    paddw %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    movd %xmm0, %eax
 | |
| ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-SLOW-LABEL: test_v4i16_v4i8:
 | |
| ; AVX1-SLOW:       # %bb.0:
 | |
| ; AVX1-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 | |
| ; AVX1-SLOW-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-SLOW-NEXT:    vpsrld $16, %xmm0, %xmm1
 | |
| ; AVX1-SLOW-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-SLOW-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX1-SLOW-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-FAST-LABEL: test_v4i16_v4i8:
 | |
| ; AVX1-FAST:       # %bb.0:
 | |
| ; AVX1-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 | |
| ; AVX1-FAST-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
 | |
| ; AVX1-FAST-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX1-FAST-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v4i16_v4i8:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 | |
| ; AVX2-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpsrld $16, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v4i16_v4i8:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | |
| ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 | |
| ; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpsrld $16, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <4 x i8> %a0 to <4 x i16>
 | |
|   %2 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %1)
 | |
|   ret i16 %2
 | |
| 
 | |
| }
 | |
| 
 | |
| define i16 @test_v8i16_v8i8(<8 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v8i16_v8i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm0, %xmm1
 | |
| ; SSE-NEXT:    movd %xmm1, %eax
 | |
| ; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: test_v8i16_v8i8:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX-NEXT:    retq
 | |
|   %1 = zext <8 x i8> %a0 to <8 x i16>
 | |
|   %2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %1)
 | |
|   ret i16 %2
 | |
| }
 | |
| 
 | |
| define i16 @test_v16i16_v16i8(<16 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v16i16_v16i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm0, %xmm1
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
 | |
| ; SSE-NEXT:    paddq %xmm1, %xmm0
 | |
| ; SSE-NEXT:    movd %xmm0, %eax
 | |
| ; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: test_v16i16_v16i8:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX-NEXT:    retq
 | |
|   %1 = zext <16 x i8> %a0 to <16 x i16>
 | |
|   %2 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %1)
 | |
|   ret i16 %2
 | |
| }
 | |
| 
 | |
| define i16 @test_v32i16_v32i8(<32 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v32i16_v32i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm2
 | |
| ; SSE-NEXT:    psadbw %xmm2, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm2, %xmm0
 | |
| ; SSE-NEXT:    paddq %xmm1, %xmm0
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; SSE-NEXT:    paddq %xmm0, %xmm1
 | |
| ; SSE-NEXT:    movd %xmm1, %eax
 | |
| ; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: test_v32i16_v32i8:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpsadbw %xmm2, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpsadbw %xmm2, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX1-NEXT:    vzeroupper
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v32i16_v32i8:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 | |
| ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX2-NEXT:    vzeroupper
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v32i16_v32i8:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vpsadbw %ymm1, %ymm0, %ymm0
 | |
| ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX512-NEXT:    vzeroupper
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <32 x i8> %a0 to <32 x i16>
 | |
|   %2 = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %1)
 | |
|   ret i16 %2
 | |
| }
 | |
| 
 | |
| define i16 @test_v64i16_v64i8(<64 x i8> %a0) {
 | |
| ; SSE-LABEL: test_v64i16_v64i8:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm4, %xmm4
 | |
| ; SSE-NEXT:    psadbw %xmm4, %xmm3
 | |
| ; SSE-NEXT:    psadbw %xmm4, %xmm1
 | |
| ; SSE-NEXT:    paddq %xmm3, %xmm1
 | |
| ; SSE-NEXT:    psadbw %xmm4, %xmm2
 | |
| ; SSE-NEXT:    paddq %xmm1, %xmm2
 | |
| ; SSE-NEXT:    psadbw %xmm4, %xmm0
 | |
| ; SSE-NEXT:    paddq %xmm2, %xmm0
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; SSE-NEXT:    paddq %xmm0, %xmm1
 | |
| ; SSE-NEXT:    movd %xmm1, %eax
 | |
| ; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: test_v64i16_v64i8:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | |
| ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 | |
| ; AVX1-NEXT:    vpsadbw %xmm3, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 | |
| ; AVX1-NEXT:    vpsadbw %xmm3, %xmm4, %xmm4
 | |
| ; AVX1-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
 | |
| ; AVX1-NEXT:    vpsadbw %xmm3, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpsadbw %xmm3, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX1-NEXT:    vzeroupper
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: test_v64i16_v64i8:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpsadbw %ymm2, %ymm1, %ymm1
 | |
| ; AVX2-NEXT:    vpsadbw %ymm2, %ymm0, %ymm0
 | |
| ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 | |
| ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX2-NEXT:    vzeroupper
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: test_v64i16_v64i8:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vpsadbw %zmm1, %zmm0, %zmm0
 | |
| ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 | |
| ; AVX512-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 | |
| ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 | |
| ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovd %xmm0, %eax
 | |
| ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 | |
| ; AVX512-NEXT:    vzeroupper
 | |
| ; AVX512-NEXT:    retq
 | |
|   %1 = zext <64 x i8> %a0 to <64 x i16>
 | |
|   %2 = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %1)
 | |
|   ret i16 %2
 | |
| 
 | |
| }
 | |
| 
 | |
| declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
 | |
| declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
 | |
| declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
 | |
| declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
 | |
| 
 | |
| declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>)
 | |
| declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
 | |
| declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
 | |
| declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
 | |
| declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>)
 | |
| 
 | |
| declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>)
 | |
| declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
 | |
| declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
 | |
| declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
 | |
| declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>)
 | |
| declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>)
 | |
| 
 | |
| declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
 | |
| declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>)
 | |
| declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
 | |
| declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
 | |
| declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>)
 | |
| declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>)
 | |
| declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>)
 |