524 lines
		
	
	
		
			38 KiB
		
	
	
	
		
			LLVM
		
	
	
	
			
		
		
	
	
			524 lines
		
	
	
		
			38 KiB
		
	
	
	
		
			LLVM
		
	
	
	
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 | |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2-SLOW %s
 | |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2-FAST,AVX2-FAST-ALL %s
 | |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2-FAST,AVX2-FAST-PERLANE %s
 | |
| 
 | |
| ; These patterns are produced by LoopVectorizer for interleaved loads.
 | |
| 
 | |
| define void @vf2(<10 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1, <2 x i16>* %out.vec2, <2 x i16>* %out.vec3, <2 x i16>* %out.vec4) nounwind {
 | |
| ; AVX2-SLOW-LABEL: vf2:
 | |
| ; AVX2-SLOW:       # %bb.0:
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,3,2,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,3,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,2,2,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[3,1,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[2,1,2,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpsrlq $48, %xmm0, %xmm0
 | |
| ; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 | |
| ; AVX2-SLOW-NEXT:    vpbroadcastw 8(%rdi), %xmm5
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vmovd %xmm2, (%rsi)
 | |
| ; AVX2-SLOW-NEXT:    vmovd %xmm3, (%rdx)
 | |
| ; AVX2-SLOW-NEXT:    vmovd %xmm4, (%rcx)
 | |
| ; AVX2-SLOW-NEXT:    vmovd %xmm0, (%r8)
 | |
| ; AVX2-SLOW-NEXT:    vmovd %xmm1, (%r9)
 | |
| ; AVX2-SLOW-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-FAST-LABEL: vf2:
 | |
| ; AVX2-FAST:       # %bb.0:
 | |
| ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
 | |
| ; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[0,1,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm0[2,3,12,13,u,u,u,u,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[4,5,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpsrlq $48, %xmm0, %xmm0
 | |
| ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 | |
| ; AVX2-FAST-NEXT:    vpbroadcastw 8(%rdi), %xmm5
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1],xmm5[2,3,4,5,6,7]
 | |
| ; AVX2-FAST-NEXT:    vmovd %xmm2, (%rsi)
 | |
| ; AVX2-FAST-NEXT:    vmovd %xmm3, (%rdx)
 | |
| ; AVX2-FAST-NEXT:    vmovd %xmm4, (%rcx)
 | |
| ; AVX2-FAST-NEXT:    vmovd %xmm0, (%r8)
 | |
| ; AVX2-FAST-NEXT:    vmovd %xmm1, (%r9)
 | |
| ; AVX2-FAST-NEXT:    retq
 | |
|   %wide.vec = load <10 x i16>, <10 x i16>* %in.vec, align 32
 | |
| 
 | |
|   %strided.vec0 = shufflevector <10 x i16> %wide.vec, <10 x i16> poison, <2 x i32> <i32 0, i32 5>
 | |
|   %strided.vec1 = shufflevector <10 x i16> %wide.vec, <10 x i16> poison, <2 x i32> <i32 1, i32 6>
 | |
|   %strided.vec2 = shufflevector <10 x i16> %wide.vec, <10 x i16> poison, <2 x i32> <i32 2, i32 7>
 | |
|   %strided.vec3 = shufflevector <10 x i16> %wide.vec, <10 x i16> poison, <2 x i32> <i32 3, i32 8>
 | |
|   %strided.vec4 = shufflevector <10 x i16> %wide.vec, <10 x i16> poison, <2 x i32> <i32 4, i32 9>
 | |
| 
 | |
|   store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32
 | |
|   store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32
 | |
|   store <2 x i16> %strided.vec2, <2 x i16>* %out.vec2, align 32
 | |
|   store <2 x i16> %strided.vec3, <2 x i16>* %out.vec3, align 32
 | |
|   store <2 x i16> %strided.vec4, <2 x i16>* %out.vec4, align 32
 | |
| 
 | |
|   ret void
 | |
| }
 | |
| 
 | |
| define void @vf4(<20 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1, <4 x i16>* %out.vec2, <4 x i16>* %out.vec3, <4 x i16>* %out.vec4) nounwind {
 | |
| ; AVX2-SLOW-LABEL: vf4:
 | |
| ; AVX2-SLOW:       # %bb.0:
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[3,1,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,2,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,3,2,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1,2,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[2,3,12,13,6,7,0,1,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm2[0],xmm0[1,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm1[2],xmm5[3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm2[2],xmm0[3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm1[0],xmm6[1],xmm1[2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vmovq %xmm3, (%rsi)
 | |
| ; AVX2-SLOW-NEXT:    vmovq %xmm4, (%rdx)
 | |
| ; AVX2-SLOW-NEXT:    vmovq %xmm5, (%rcx)
 | |
| ; AVX2-SLOW-NEXT:    vmovq %xmm6, (%r8)
 | |
| ; AVX2-SLOW-NEXT:    vmovq %xmm0, (%r9)
 | |
| ; AVX2-SLOW-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-FAST-LABEL: vf4:
 | |
| ; AVX2-FAST:       # %bb.0:
 | |
| ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
 | |
| ; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
 | |
| ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[4,5,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[0,1,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1,2,3,4,5,6,7]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[2,3,12,13,6,7,0,1,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm2[0],xmm0[1,2,3]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm1[2],xmm5[3]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm2[2],xmm0[3,4,5,6,7]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm1[0],xmm6[1],xmm1[2,3]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
 | |
| ; AVX2-FAST-NEXT:    vmovq %xmm3, (%rsi)
 | |
| ; AVX2-FAST-NEXT:    vmovq %xmm4, (%rdx)
 | |
| ; AVX2-FAST-NEXT:    vmovq %xmm5, (%rcx)
 | |
| ; AVX2-FAST-NEXT:    vmovq %xmm6, (%r8)
 | |
| ; AVX2-FAST-NEXT:    vmovq %xmm0, (%r9)
 | |
| ; AVX2-FAST-NEXT:    retq
 | |
|   %wide.vec = load <20 x i16>, <20 x i16>* %in.vec, align 32
 | |
| 
 | |
|   %strided.vec0 = shufflevector <20 x i16> %wide.vec, <20 x i16> poison, <4 x i32> <i32 0, i32 5, i32 10, i32 15>
 | |
|   %strided.vec1 = shufflevector <20 x i16> %wide.vec, <20 x i16> poison, <4 x i32> <i32 1, i32 6, i32 11, i32 16>
 | |
|   %strided.vec2 = shufflevector <20 x i16> %wide.vec, <20 x i16> poison, <4 x i32> <i32 2, i32 7, i32 12, i32 17>
 | |
|   %strided.vec3 = shufflevector <20 x i16> %wide.vec, <20 x i16> poison, <4 x i32> <i32 3, i32 8, i32 13, i32 18>
 | |
|   %strided.vec4 = shufflevector <20 x i16> %wide.vec, <20 x i16> poison, <4 x i32> <i32 4, i32 9, i32 14, i32 19>
 | |
| 
 | |
|   store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32
 | |
|   store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32
 | |
|   store <4 x i16> %strided.vec2, <4 x i16>* %out.vec2, align 32
 | |
|   store <4 x i16> %strided.vec3, <4 x i16>* %out.vec3, align 32
 | |
|   store <4 x i16> %strided.vec4, <4 x i16>* %out.vec4, align 32
 | |
| 
 | |
|   ret void
 | |
| }
 | |
| 
 | |
| define void @vf8(<40 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1, <8 x i16>* %out.vec2, <8 x i16>* %out.vec3, <8 x i16>* %out.vec4) nounwind {
 | |
| ; AVX2-SLOW-LABEL: vf8:
 | |
| ; AVX2-SLOW:       # %bb.0:
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5],ymm2[6],ymm0[7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13],ymm2[14],ymm0[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4,5],xmm3[6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,10,11,4,5,14,15,8,9,2,3,12,13,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpbroadcastw 70(%rdi), %xmm3
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm3[7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7,8],ymm0[9],ymm2[10],ymm0[11],ymm2[12,13],ymm0[14],ymm2[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6],xmm4[7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,3,12,13,6,7,0,1,10,11,4,5,14,15,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
 | |
| ; AVX2-SLOW-NEXT:    vpsllq $48, %xmm4, %xmm5
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm5[7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8,9],ymm0[10],ymm2[11],ymm0[12],ymm2[13,14],ymm0[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[0,1,2,0]
 | |
| ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,6,5]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10],ymm0[11],ymm2[12],ymm0[13],ymm2[14,15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[0,1,0,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,5,6]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5],ymm0[6],ymm2[7,8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3,4],xmm2[5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[0,1,1,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3]
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %xmm1, (%rsi)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %xmm3, (%rdx)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %xmm5, (%rcx)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %xmm6, (%r8)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, (%r9)
 | |
| ; AVX2-SLOW-NEXT:    vzeroupper
 | |
| ; AVX2-SLOW-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-FAST-LABEL: vf8:
 | |
| ; AVX2-FAST:       # %bb.0:
 | |
| ; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm0
 | |
| ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13],ymm1[14],ymm0[15]
 | |
| ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4,5],xmm3[6,7]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,10,11,4,5,14,15,8,9,2,3,12,13,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpbroadcastw 70(%rdi), %xmm3
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6],xmm3[7]
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 | |
| ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6],xmm4[7]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,3,12,13,6,7,0,1,10,11,4,5,14,15,u,u]
 | |
| ; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
 | |
| ; AVX2-FAST-NEXT:    vpsllq $48, %xmm4, %xmm5
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm5[7]
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
 | |
| ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
 | |
| ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
 | |
| ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | |
| ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
 | |
| ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15]
 | |
| ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
 | |
| ; AVX2-FAST-NEXT:    vmovdqa %xmm2, (%rsi)
 | |
| ; AVX2-FAST-NEXT:    vmovdqa %xmm3, (%rdx)
 | |
| ; AVX2-FAST-NEXT:    vmovdqa %xmm5, (%rcx)
 | |
| ; AVX2-FAST-NEXT:    vmovdqa %xmm6, (%r8)
 | |
| ; AVX2-FAST-NEXT:    vmovdqa %xmm0, (%r9)
 | |
| ; AVX2-FAST-NEXT:    vzeroupper
 | |
| ; AVX2-FAST-NEXT:    retq
 | |
|   %wide.vec = load <40 x i16>, <40 x i16>* %in.vec, align 32
 | |
| 
 | |
|   %strided.vec0 = shufflevector <40 x i16> %wide.vec, <40 x i16> poison, <8 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35>
 | |
|   %strided.vec1 = shufflevector <40 x i16> %wide.vec, <40 x i16> poison, <8 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36>
 | |
|   %strided.vec2 = shufflevector <40 x i16> %wide.vec, <40 x i16> poison, <8 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37>
 | |
|   %strided.vec3 = shufflevector <40 x i16> %wide.vec, <40 x i16> poison, <8 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38>
 | |
|   %strided.vec4 = shufflevector <40 x i16> %wide.vec, <40 x i16> poison, <8 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39>
 | |
| 
 | |
|   store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32
 | |
|   store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32
 | |
|   store <8 x i16> %strided.vec2, <8 x i16>* %out.vec2, align 32
 | |
|   store <8 x i16> %strided.vec3, <8 x i16>* %out.vec3, align 32
 | |
|   store <8 x i16> %strided.vec4, <8 x i16>* %out.vec4, align 32
 | |
| 
 | |
|   ret void
 | |
| }
 | |
| 
 | |
| define void @vf16(<80 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1, <16 x i16>* %out.vec2, <16 x i16>* %out.vec3, <16 x i16>* %out.vec4) nounwind {
 | |
| ; AVX2-SLOW-LABEL: vf16:
 | |
| ; AVX2-SLOW:       # %bb.0:
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm1
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4,5],xmm5[6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,10,11,4,5,14,15,8,9,2,3,12,13,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
 | |
| ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5],ymm5[6],ymm6[7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[0,1,10,11,4,5,14,15,8,9,10,11,4,5,6,7,16,17,26,27,20,21,30,31,24,25,26,27,20,21,22,23]
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0]
 | |
| ; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm4, %ymm5, %ymm5
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm6
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm4
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm4[0],xmm6[1],xmm4[2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,2,3,12,13,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm5[0,1,2,3,4],ymm7[5,6,7],ymm5[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm5[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10],ymm2[11],ymm3[12,13],ymm2[14],ymm3[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm5
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5,6],xmm5[7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,3,12,13,6,7,0,1,10,11,4,5,14,15,10,11]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
 | |
| ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm7[2,3,0,1]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm10[5],ymm7[6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[2,3,12,13,6,7,0,1,10,11,6,7,8,9,8,9,18,19,28,29,22,23,16,17,26,27,22,23,24,25,24,25]
 | |
| ; AVX2-SLOW-NEXT:    vpblendvb %ymm8, %ymm5, %ymm7, %ymm5
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm4[0,1],xmm6[2],xmm4[3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,4,5,14,15,8,9]
 | |
| ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm5[0,1,2,3,4],ymm7[5,6,7],ymm5[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3],ymm2[4],ymm3[5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11],ymm2[12],ymm3[13,14],ymm2[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm7
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm7[3,4],xmm5[5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13],ymm1[14],ymm0[15]
 | |
| ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6],ymm7[7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11,20,21,30,31,24,25,18,19,28,29,26,27,16,17,26,27]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm6[0],xmm4[1],xmm6[2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11]
 | |
| ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm5[0,1,2,3,4],ymm7[5,6,7],ymm5[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm2[0],ymm3[1,2],ymm2[3],ymm3[4],ymm2[5],ymm3[6,7],ymm2[8],ymm3[9,10],ymm2[11],ymm3[12],ymm2[13],ymm3[14,15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm7
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2],xmm5[3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 | |
| ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm7[2,3,0,1]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm11[4],ymm7[5],ymm11[6],ymm7[7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13,22,23,16,17,26,27,20,21,30,31,30,31,18,19,28,29]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm6[0,1],xmm4[2],xmm6[3]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,8,9,2,3,12,13]
 | |
| ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm5[0,1,2,3,4],ymm7[5,6,7],ymm5[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13],ymm2[14],ymm3[15]
 | |
| ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4],xmm3[5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
 | |
| ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6],ymm1[7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15,24,25,18,19,28,29,22,23,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[3,1,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,2,1,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
 | |
| ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,3,4,5,6,7]
 | |
| ; AVX2-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 | |
| ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 | |
| ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %ymm9, (%rsi)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %ymm10, (%rdx)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %ymm8, (%rcx)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %ymm5, (%r8)
 | |
| ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%r9)
 | |
| ; AVX2-SLOW-NEXT:    vzeroupper
 | |
| ; AVX2-SLOW-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-FAST-ALL-LABEL: vf16:
 | |
| ; AVX2-FAST-ALL:       # %bb.0:
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa 128(%rdi), %ymm0
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa (%rdi), %ymm3
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa 32(%rdi), %ymm4
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa 64(%rdi), %ymm1
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa 96(%rdi), %ymm2
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13],ymm4[14],ymm3[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vextracti128 $1, %ymm5, %xmm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1,2,3],xmm5[4,5],xmm6[6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,10,11,4,5,14,15,8,9,2,3,12,13,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm1[0],ymm2[1,2],ymm1[3],ymm2[4],ymm1[5],ymm2[6,7],ymm1[8],ymm2[9,10],ymm1[11],ymm2[12],ymm1[13],ymm2[14,15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [1,3,0,2,4,6,1,3]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm6, %ymm7, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[0,1,6,7,8,9,14,15,4,5,14,15,4,5,2,3,16,17,22,23,24,25,30,31,20,21,30,31,20,21,18,19]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm5, %ymm6, %ymm5
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,0,3,5,u>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm6, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb %ymm11, %ymm6, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm5[0,1,2,3,4],ymm6[5,6,7],ymm5[8,9,10,11,12],ymm6[13,14,15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm9 = ymm5[0,1,2,3],ymm6[4,5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7,8],ymm3[9],ymm4[10],ymm3[11],ymm4[12,13],ymm3[14],ymm4[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vextracti128 $1, %ymm6, %xmm5
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5,6],xmm5[7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,3,12,13,6,7,0,1,10,11,4,5,14,15,10,11]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm10 = <2,u,u,u,4,7,1,6>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm6, %ymm10, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[2,3,4,5,10,11,0,1,14,15,2,3,12,13,0,1,18,19,20,21,26,27,16,17,30,31,18,19,28,29,16,17]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendvb %ymm8, %ymm5, %ymm6, %ymm5
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,1,3,6,u>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm6, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm5[0,1,2,3,4],ymm6[5,6,7],ymm5[8,9,10,11,12],ymm6[13,14,15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm12 = ymm5[0,1,2,3],ymm6[4,5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm5 = ymm4[0,1],ymm3[2],ymm4[3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8,9],ymm3[10],ymm4[11],ymm3[12],ymm4[13,14],ymm3[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vextracti128 $1, %ymm5, %xmm7
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm7[3,4],xmm5[5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm10 = <0,2,u,u,5,7,2,4>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm7, %ymm10, %ymm7
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,0,1,6,7,16,17,22,23,24,25,30,31,20,21,22,23,16,17,22,23]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3,4,5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [1,4,6,0,1,4,6,0]
 | |
| ; AVX2-FAST-ALL-NEXT:    # ymm7 = mem[0,1,0,1]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm7, %ymm7
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb %ymm11, %ymm7, %ymm7
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm7 = ymm5[0,1,2,3,4],ymm7[5,6,7],ymm5[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm7 = ymm5[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0],ymm4[1,2],ymm3[3],ymm4[4],ymm3[5],ymm4[6,7],ymm3[8],ymm4[9,10],ymm3[11],ymm4[12],ymm3[13],ymm4[14,15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vextracti128 $1, %ymm5, %xmm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm10 = <0,3,u,u,5,0,2,7>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm6, %ymm10, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,2,3,4,5,18,19,20,21,26,27,16,17,30,31,30,31,18,19,20,21]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3,4,5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [2,4,7,0,2,4,7,0]
 | |
| ; AVX2-FAST-ALL-NEXT:    # ymm6 = mem[0,1,0,1]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm6, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm6 = ymm5[0,1,2,3,4],ymm6[5,6,7],ymm5[8,9,10,11,12],ymm6[13,14,15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5],ymm3[6],ymm4[7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vextracti128 $1, %ymm3, %xmm4
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = <1,3,u,u,6,0,3,5>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm1, %ymm2, %ymm1
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,0,1,6,7,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5],ymm3[6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,0,2,5,7>
 | |
| ; AVX2-FAST-ALL-NEXT:    vpermd %ymm0, %ymm2, %ymm0
 | |
| ; AVX2-FAST-ALL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31]
 | |
| ; AVX2-FAST-ALL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm9, (%rsi)
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm12, (%rdx)
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm7, (%rcx)
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm5, (%r8)
 | |
| ; AVX2-FAST-ALL-NEXT:    vmovdqa %ymm0, (%r9)
 | |
| ; AVX2-FAST-ALL-NEXT:    vzeroupper
 | |
| ; AVX2-FAST-ALL-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-FAST-PERLANE-LABEL: vf16:
 | |
| ; AVX2-FAST-PERLANE:       # %bb.0:
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm2
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm3
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 64(%rdi), %ymm0
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 96(%rdi), %ymm1
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm4, %xmm5
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4,5],xmm5[6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,10,11,4,5,14,15,8,9,2,3,12,13,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5],ymm5[6],ymm6[7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[0,1,10,11,4,5,14,15,8,9,10,11,4,5,6,7,16,17,26,27,20,21,30,31,24,25,26,27,20,21,22,23]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm4, %ymm5, %ymm6
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 144(%rdi), %xmm4
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 128(%rdi), %xmm5
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm7 = xmm5[0],xmm4[1],xmm5[2,3]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,2,3,12,13,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2,3,4],ymm7[5,6,7],ymm6[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm9 = ymm6[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10],ymm2[11],ymm3[12,13],ymm2[14],ymm3[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm7, %xmm6
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6],xmm6[7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[2,3,12,13,6,7,0,1,10,11,4,5,14,15,10,11]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm10 = ymm7[2,3,0,1]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm10[5],ymm7[6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[2,3,12,13,6,7,0,1,10,11,6,7,8,9,8,9,18,19,28,29,22,23,16,17,26,27,22,23,24,25,24,25]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm7 = xmm5[0,1],xmm4[2],xmm5[3]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,4,5,14,15,8,9]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2,3,4],ymm7[5,6,7],ymm6[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm10 = ymm6[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1],ymm2[2],ymm3[3],ymm2[4],ymm3[5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11],ymm2[12],ymm3[13,14],ymm2[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm6, %xmm7
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3,4],xmm6[5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13],ymm1[14],ymm0[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6],ymm7[7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11,20,21,30,31,24,25,18,19,28,29,26,27,16,17,26,27]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm7 = xmm4[0],xmm5[1],xmm4[2,3]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2,3,4],ymm7[5,6,7],ymm6[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm6 = ymm2[0],ymm3[1,2],ymm2[3],ymm3[4],ymm2[5],ymm3[6,7],ymm2[8],ymm3[9,10],ymm2[11],ymm3[12],ymm2[13],ymm3[14,15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm6, %xmm7
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm11 = ymm7[2,3,0,1]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm11[4],ymm7[5],ymm11[6],ymm7[7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13,22,23,16,17,26,27,20,21,30,31,30,31,18,19,28,29]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm7 = xmm4[0,1],xmm5[2],xmm4[3]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,8,9,2,3,12,13]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2,3,4],ymm7[5,6,7],ymm6[8,9,10,11,12],ymm7[13,14,15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13],ymm2[14],ymm3[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm3
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4],xmm3[5,6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6],ymm1[7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15,24,25,18,19,28,29,22,23,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[12,13,14,15,4,5,14,15,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm5[0,1,2,3,0,1,10,11,u,u,u,u,u,u,u,u]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, (%rsi)
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm10, (%rdx)
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, (%rcx)
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, (%r8)
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, (%r9)
 | |
| ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 | |
| ; AVX2-FAST-PERLANE-NEXT:    retq
 | |
|   %wide.vec = load <80 x i16>, <80 x i16>* %in.vec, align 32
 | |
| 
 | |
|   %strided.vec0 = shufflevector <80 x i16> %wide.vec, <80 x i16> poison, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75>
 | |
|   %strided.vec1 = shufflevector <80 x i16> %wide.vec, <80 x i16> poison, <16 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76>
 | |
|   %strided.vec2 = shufflevector <80 x i16> %wide.vec, <80 x i16> poison, <16 x i32> <i32 2, i32 7, i32 12, i32 17, i32 22, i32 27, i32 32, i32 37, i32 42, i32 47, i32 52, i32 57, i32 62, i32 67, i32 72, i32 77>
 | |
|   %strided.vec3 = shufflevector <80 x i16> %wide.vec, <80 x i16> poison, <16 x i32> <i32 3, i32 8, i32 13, i32 18, i32 23, i32 28, i32 33, i32 38, i32 43, i32 48, i32 53, i32 58, i32 63, i32 68, i32 73, i32 78>
 | |
|   %strided.vec4 = shufflevector <80 x i16> %wide.vec, <80 x i16> poison, <16 x i32> <i32 4, i32 9, i32 14, i32 19, i32 24, i32 29, i32 34, i32 39, i32 44, i32 49, i32 54, i32 59, i32 64, i32 69, i32 74, i32 79>
 | |
| 
 | |
|   store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32
 | |
|   store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32
 | |
|   store <16 x i16> %strided.vec2, <16 x i16>* %out.vec2, align 32
 | |
|   store <16 x i16> %strided.vec3, <16 x i16>* %out.vec3, align 32
 | |
|   store <16 x i16> %strided.vec4, <16 x i16>* %out.vec4, align 32
 | |
| 
 | |
|   ret void
 | |
| }
 |