[AVX512] add PRORVQ and PRORVD Intrinsic
Differential Revision:http://reviews.llvm.org/D15955 llvm-svn: 257283
This commit is contained in:
		
							parent
							
								
									0190d07300
								
							
						
					
					
						commit
						885f61c534
					
				| 
						 | 
					@ -2960,6 +2960,26 @@ let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
 | 
				
			||||||
  def int_x86_avx512_mask_psrlv8_si : GCCBuiltin<"__builtin_ia32_psrlv8si_mask">,
 | 
					  def int_x86_avx512_mask_psrlv8_si : GCCBuiltin<"__builtin_ia32_psrlv8si_mask">,
 | 
				
			||||||
              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, 
 | 
					              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, 
 | 
				
			||||||
                         llvm_v8i32_ty, llvm_v8i32_ty,  llvm_i8_ty], [IntrNoMem]>;
 | 
					                         llvm_v8i32_ty, llvm_v8i32_ty,  llvm_i8_ty], [IntrNoMem]>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  def int_x86_avx512_mask_prorv_d_128 : GCCBuiltin<"__builtin_ia32_prorvd128_mask">,
 | 
				
			||||||
 | 
					              Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
 | 
				
			||||||
 | 
					                         llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
 | 
				
			||||||
 | 
					  def int_x86_avx512_mask_prorv_d_256 : GCCBuiltin<"__builtin_ia32_prorvd256_mask">,
 | 
				
			||||||
 | 
					              Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
 | 
				
			||||||
 | 
					                         llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
 | 
				
			||||||
 | 
					  def int_x86_avx512_mask_prorv_d_512 : GCCBuiltin<"__builtin_ia32_prorvd512_mask">,
 | 
				
			||||||
 | 
					              Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
 | 
				
			||||||
 | 
					                         llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
 | 
				
			||||||
 | 
					  def int_x86_avx512_mask_prorv_q_128 : GCCBuiltin<"__builtin_ia32_prorvq128_mask">,
 | 
				
			||||||
 | 
					              Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
 | 
				
			||||||
 | 
					                         llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
 | 
				
			||||||
 | 
					  def int_x86_avx512_mask_prorv_q_256 : GCCBuiltin<"__builtin_ia32_prorvq256_mask">,
 | 
				
			||||||
 | 
					              Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
 | 
				
			||||||
 | 
					                         llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
 | 
				
			||||||
 | 
					  def int_x86_avx512_mask_prorv_q_512 : GCCBuiltin<"__builtin_ia32_prorvq512_mask">,
 | 
				
			||||||
 | 
					              Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
 | 
				
			||||||
 | 
					                         llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Gather ops
 | 
					// Gather ops
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1201,6 +1201,12 @@ static const IntrinsicData  IntrinsicsWithoutChain[] = {
 | 
				
			||||||
  X86_INTRINSIC_DATA(avx512_mask_por_q_128, INTR_TYPE_2OP_MASK, ISD::OR, 0),
 | 
					  X86_INTRINSIC_DATA(avx512_mask_por_q_128, INTR_TYPE_2OP_MASK, ISD::OR, 0),
 | 
				
			||||||
  X86_INTRINSIC_DATA(avx512_mask_por_q_256, INTR_TYPE_2OP_MASK, ISD::OR, 0),
 | 
					  X86_INTRINSIC_DATA(avx512_mask_por_q_256, INTR_TYPE_2OP_MASK, ISD::OR, 0),
 | 
				
			||||||
  X86_INTRINSIC_DATA(avx512_mask_por_q_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
 | 
					  X86_INTRINSIC_DATA(avx512_mask_por_q_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
 | 
				
			||||||
 | 
					  X86_INTRINSIC_DATA(avx512_mask_prorv_d_128, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
 | 
				
			||||||
 | 
					  X86_INTRINSIC_DATA(avx512_mask_prorv_d_256, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
 | 
				
			||||||
 | 
					  X86_INTRINSIC_DATA(avx512_mask_prorv_d_512, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
 | 
				
			||||||
 | 
					  X86_INTRINSIC_DATA(avx512_mask_prorv_q_128, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
 | 
				
			||||||
 | 
					  X86_INTRINSIC_DATA(avx512_mask_prorv_q_256, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
 | 
				
			||||||
 | 
					  X86_INTRINSIC_DATA(avx512_mask_prorv_q_512, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
 | 
				
			||||||
  X86_INTRINSIC_DATA(avx512_mask_pshuf_b_128, INTR_TYPE_2OP_MASK,
 | 
					  X86_INTRINSIC_DATA(avx512_mask_pshuf_b_128, INTR_TYPE_2OP_MASK,
 | 
				
			||||||
                     X86ISD::PSHUFB, 0),
 | 
					                     X86ISD::PSHUFB, 0),
 | 
				
			||||||
  X86_INTRINSIC_DATA(avx512_mask_pshuf_b_256, INTR_TYPE_2OP_MASK,
 | 
					  X86_INTRINSIC_DATA(avx512_mask_pshuf_b_256, INTR_TYPE_2OP_MASK,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6561,3 +6561,44 @@ define <16 x i32>@test_int_x86_avx512_mask_pshuf_d_512(<16 x i32> %x0, i16 %x1,
 | 
				
			||||||
	%res4 = add <16 x i32> %res3, %res2
 | 
						%res4 = add <16 x i32> %res3, %res2
 | 
				
			||||||
	ret <16 x i32> %res4
 | 
						ret <16 x i32> %res4
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
 | 
				
			||||||
 | 
					; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_512:
 | 
				
			||||||
 | 
					; CHECK:       ## BB#0:
 | 
				
			||||||
 | 
					; CHECK-NEXT:    kmovw %edi, %k1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm2 {%k1} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm3 {%k1} {z} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %zmm1, %zmm0, %zmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddd %zmm3, %zmm2, %zmm1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddd %zmm0, %zmm1, %zmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    retq 
 | 
				
			||||||
 | 
					  %res = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
 | 
				
			||||||
 | 
					  %res1 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> zeroinitializer, i16 %x3)
 | 
				
			||||||
 | 
					  %res2 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
 | 
				
			||||||
 | 
					  %res3 = add <16 x i32> %res, %res1
 | 
				
			||||||
 | 
					  %res4 = add <16 x i32> %res3, %res2
 | 
				
			||||||
 | 
					  ret <16 x i32> %res4
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
 | 
				
			||||||
 | 
					; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_512:
 | 
				
			||||||
 | 
					; CHECK:       ## BB#0:
 | 
				
			||||||
 | 
					; CHECK-NEXT:    movzbl %dil, %eax 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    kmovw %eax, %k1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm2 {%k1} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm3 {%k1} {z} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %zmm1, %zmm0, %zmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddq %zmm3, %zmm2, %zmm1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddq %zmm0, %zmm1, %zmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    retq 
 | 
				
			||||||
 | 
					  %res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
 | 
				
			||||||
 | 
					  %res1 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
 | 
				
			||||||
 | 
					  %res2 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
 | 
				
			||||||
 | 
					  %res3 = add <8 x i64> %res, %res1
 | 
				
			||||||
 | 
					  %res4 = add <8 x i64> %res3, %res2
 | 
				
			||||||
 | 
					  ret <8 x i64> %res4
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6555,3 +6555,87 @@ define <8 x i32>@test_int_x86_avx512_mask_psllv8_si(<8 x i32> %x0, <8 x i32> %x1
 | 
				
			||||||
  %res4 = add <8 x i32> %res3, %res2
 | 
					  %res4 = add <8 x i32> %res3, %res2
 | 
				
			||||||
  ret <8 x i32> %res4
 | 
					  ret <8 x i32> %res4
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <4 x i32>@test_int_x86_avx512_mask_prorv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
 | 
				
			||||||
 | 
					; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_128:
 | 
				
			||||||
 | 
					; CHECK:       ## BB#0:
 | 
				
			||||||
 | 
					; CHECK-NEXT:    movzbl %dil, %eax 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    kmovw %eax, %k1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %xmm1, %xmm0, %xmm2 {%k1} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %xmm1, %xmm0, %xmm3 {%k1} {z} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %xmm1, %xmm0, %xmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddd %xmm3, %xmm2, %xmm1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    retq 
 | 
				
			||||||
 | 
					  %res = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
 | 
				
			||||||
 | 
					  %res1 = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %x3)
 | 
				
			||||||
 | 
					  %res2 = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
 | 
				
			||||||
 | 
					  %res3 = add <4 x i32> %res, %res1
 | 
				
			||||||
 | 
					  %res4 = add <4 x i32> %res3, %res2
 | 
				
			||||||
 | 
					  ret <4 x i32> %res4
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <8 x i32>@test_int_x86_avx512_mask_prorv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
 | 
				
			||||||
 | 
					; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_256:
 | 
				
			||||||
 | 
					; CHECK:       ## BB#0:
 | 
				
			||||||
 | 
					; CHECK-NEXT:    movzbl %dil, %eax 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    kmovw %eax, %k1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %ymm1, %ymm0, %ymm2 {%k1} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %ymm1, %ymm0, %ymm3 {%k1} {z} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvd %ymm1, %ymm0, %ymm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddd %ymm3, %ymm2, %ymm1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddd %ymm0, %ymm1, %ymm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    retq 
 | 
				
			||||||
 | 
					  %res = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
 | 
				
			||||||
 | 
					  %res1 = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
 | 
				
			||||||
 | 
					  %res2 = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
 | 
				
			||||||
 | 
					  %res3 = add <8 x i32> %res, %res1
 | 
				
			||||||
 | 
					  %res4 = add <8 x i32> %res3, %res2
 | 
				
			||||||
 | 
					  ret <8 x i32> %res4
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <2 x i64>@test_int_x86_avx512_mask_prorv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
 | 
				
			||||||
 | 
					; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_128:
 | 
				
			||||||
 | 
					; CHECK:       ## BB#0:
 | 
				
			||||||
 | 
					; CHECK-NEXT:    movzbl %dil, %eax 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    kmovw %eax, %k1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %xmm1, %xmm0, %xmm2 {%k1} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %xmm1, %xmm0, %xmm3 {%k1} {z} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %xmm1, %xmm0, %xmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddq %xmm3, %xmm2, %xmm1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    retq 
 | 
				
			||||||
 | 
					  %res = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
 | 
				
			||||||
 | 
					  %res1 = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
 | 
				
			||||||
 | 
					  %res2 = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
 | 
				
			||||||
 | 
					  %res3 = add <2 x i64> %res, %res1
 | 
				
			||||||
 | 
					  %res4 = add <2 x i64> %res3, %res2
 | 
				
			||||||
 | 
					  ret <2 x i64> %res4
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					declare <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					define <4 x i64>@test_int_x86_avx512_mask_prorv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
 | 
				
			||||||
 | 
					; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_256:
 | 
				
			||||||
 | 
					; CHECK:       ## BB#0:
 | 
				
			||||||
 | 
					; CHECK-NEXT:    movzbl %dil, %eax 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    kmovw %eax, %k1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %ymm1, %ymm0, %ymm2 {%k1} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %ymm1, %ymm0, %ymm3 {%k1} {z} 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vprorvq %ymm1, %ymm0, %ymm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddq %ymm3, %ymm2, %ymm1 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    vpaddq %ymm0, %ymm1, %ymm0 
 | 
				
			||||||
 | 
					; CHECK-NEXT:    retq 
 | 
				
			||||||
 | 
					  %res = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
 | 
				
			||||||
 | 
					  %res1 = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
 | 
				
			||||||
 | 
					  %res2 = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
 | 
				
			||||||
 | 
					  %res3 = add <4 x i64> %res, %res1
 | 
				
			||||||
 | 
					  %res4 = add <4 x i64> %res3, %res2
 | 
				
			||||||
 | 
					  ret <4 x i64> %res4
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue