[X86] Remove sse1 intrinsic tests from the avx intrinsics test file.

They are all covered by the SSE intrinsics test with SSE, AVX, and AVX512 command lines.

Also remove an unneeded sfence intrinsic test since it was already covered.

llvm-svn: 295699
This commit is contained in:
Craig Topper 2017-02-21 07:32:03 +00:00
parent 7fa88bb844
commit 0d47fdcf3f
5 changed files with 98 additions and 549 deletions

View File

@ -1915,501 +1915,6 @@ define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1
} }
define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse_cmp_ps:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpordps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0xc2,0xc1,0x07]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse_cmp_ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpordss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0xc2,0xc1,0x07]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind readnone
define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_comieq_ss:
; AVX: ## BB#0:
; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
; AVX-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_comieq_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX512VL-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX512VL-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
; AVX512VL-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_comige_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_comige_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.comige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_comigt_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_comigt_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.comigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_comile_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_comile_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.comile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_comilt_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vcomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2f,0xc8]
; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_comilt_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vcomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc8]
; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.comilt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_comineq_ss:
; AVX: ## BB#0:
; AVX-NEXT: vcomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
; AVX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_comineq_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vcomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2f,0xc1]
; AVX512VL-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX512VL-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
; AVX512VL-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.comineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.comineq.ss(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_cvtsi2ss(<4 x float> %a0) {
; AVX-LABEL: test_x86_sse_cvtsi2ss:
; AVX: ## BB#0:
; AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; AVX-NEXT: vcvtsi2ssl %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x2a,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_cvtsi2ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
; AVX512VL-NEXT: vcvtsi2ssl %eax, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2a,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float>, i32) nounwind readnone
define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) {
; AVX-LABEL: test_x86_sse_cvtss2si:
; AVX: ## BB#0:
; AVX-NEXT: vcvtss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2d,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_cvtss2si:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vcvtss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2d,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) {
; AVX-LABEL: test_x86_sse_cvttss2si:
; AVX: ## BB#0:
; AVX-NEXT: vcvttss2si %xmm0, %eax ## encoding: [0xc5,0xfa,0x2c,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_cvttss2si:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vcvttss2si %xmm0, %eax ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x2c,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
define void @test_x86_sse_ldmxcsr(i8* %a0) {
; CHECK-LABEL: test_x86_sse_ldmxcsr:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; CHECK-NEXT: vldmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x10]
; CHECK-NEXT: retl ## encoding: [0xc3]
call void @llvm.x86.sse.ldmxcsr(i8* %a0)
ret void
}
declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind
define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_max_ps:
; AVX: ## BB#0:
; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5f,0xc1]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_max_ps:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5f,0xc1]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse_max_ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5f,0xc1]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_min_ps:
; AVX: ## BB#0:
; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5d,0xc1]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_min_ps:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vminps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5d,0xc1]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: test_x86_sse_min_ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vminss %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5d,0xc1]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_movmsk_ps:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovmskps %xmm0, %eax ## encoding: [0xc5,0xf8,0x50,0xc0]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rcp_ps(<4 x float> %a0) {
; AVX-LABEL: test_x86_sse_rcp_ps:
; AVX: ## BB#0:
; AVX-NEXT: vrcpps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x53,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_rcp_ps:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vrcp14ps %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x4c,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rcp_ss(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_rcp_ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vrcpss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x53,0xc0]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rsqrt_ps(<4 x float> %a0) {
; AVX-LABEL: test_x86_sse_rsqrt_ps:
; AVX: ## BB#0:
; AVX-NEXT: vrsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x52,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_rsqrt_ps:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vrsqrt14ps %xmm0, %xmm0 ## encoding: [0x62,0xf2,0x7d,0x08,0x4e,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rsqrt_ss(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_rsqrt_ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x52,0xc0]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_sqrt_ps(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_sqrt_ps:
; CHECK: ## BB#0:
; CHECK-NEXT: vsqrtps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x51,0xc0]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_sqrt_ss(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_sqrt_ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x51,0xc0]
; CHECK-NEXT: retl ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
define void @test_x86_sse_stmxcsr(i8* %a0) {
; CHECK-LABEL: test_x86_sse_stmxcsr:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; CHECK-NEXT: vstmxcsr (%eax) ## encoding: [0xc5,0xf8,0xae,0x18]
; CHECK-NEXT: retl ## encoding: [0xc3]
call void @llvm.x86.sse.stmxcsr(i8* %a0)
ret void
}
declare void @llvm.x86.sse.stmxcsr(i8*) nounwind
define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_ucomieq_ss:
; AVX: ## BB#0:
; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
; AVX-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_ucomieq_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX512VL-NEXT: setnp %al ## encoding: [0x0f,0x9b,0xc0]
; AVX512VL-NEXT: sete %cl ## encoding: [0x0f,0x94,0xc1]
; AVX512VL-NEXT: andb %al, %cl ## encoding: [0x20,0xc1]
; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_ucomige_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_ucomige_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.ucomige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_ucomigt_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_ucomigt_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.ucomigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_ucomile_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
; AVX-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_ucomile_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
; AVX512VL-NEXT: setae %al ## encoding: [0x0f,0x93,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.ucomile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_ucomilt_ss:
; AVX: ## BB#0:
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX-NEXT: vucomiss %xmm0, %xmm1 ## encoding: [0xc5,0xf8,0x2e,0xc8]
; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_ucomilt_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
; AVX512VL-NEXT: vucomiss %xmm0, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc8]
; AVX512VL-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.ucomilt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
; AVX-LABEL: test_x86_sse_ucomineq_ss:
; AVX: ## BB#0:
; AVX-NEXT: vucomiss %xmm1, %xmm0 ## encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
; AVX-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
; AVX-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX-NEXT: retl ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_sse_ucomineq_ss:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
; AVX512VL-NEXT: setp %al ## encoding: [0x0f,0x9a,0xc0]
; AVX512VL-NEXT: setne %cl ## encoding: [0x0f,0x95,0xc1]
; AVX512VL-NEXT: orb %al, %cl ## encoding: [0x08,0xc1]
; AVX512VL-NEXT: movzbl %cl, %eax ## encoding: [0x0f,0xb6,0xc1]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
%res = call i32 @llvm.x86.sse.ucomineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
ret i32 %res
}
declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnone
define <16 x i8> @test_x86_ssse3_pabs_b_128(<16 x i8> %a0) { define <16 x i8> @test_x86_ssse3_pabs_b_128(<16 x i8> %a0) {
; AVX-LABEL: test_x86_ssse3_pabs_b_128: ; AVX-LABEL: test_x86_ssse3_pabs_b_128:
; AVX: ## BB#0: ; AVX: ## BB#0:
@ -3660,16 +3165,6 @@ define void @mwait(i32 %E, i32 %H) nounwind {
} }
declare void @llvm.x86.sse3.mwait(i32, i32) nounwind declare void @llvm.x86.sse3.mwait(i32, i32) nounwind
define void @sfence() nounwind {
; CHECK-LABEL: sfence:
; CHECK: ## BB#0:
; CHECK-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
; CHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse.sfence()
ret void
}
declare void @llvm.x86.sse.sfence() nounwind
define void @lfence() nounwind { define void @lfence() nounwind {
; CHECK-LABEL: lfence: ; CHECK-LABEL: lfence:
; CHECK: ## BB#0: ; CHECK: ## BB#0:
@ -3738,8 +3233,8 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX-LABEL: movnt_dq: ; AVX-LABEL: movnt_dq:
; AVX: ## BB#0: ; AVX: ## BB#0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX-NEXT: vpaddq LCPI247_0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A] ; AVX-NEXT: vpaddq LCPI216_0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
; AVX-NEXT: ## fixup A - offset: 4, value: LCPI247_0, kind: FK_Data_4 ; AVX-NEXT: ## fixup A - offset: 4, value: LCPI216_0, kind: FK_Data_4
; AVX-NEXT: vmovntdq %ymm0, (%eax) ## encoding: [0xc5,0xfd,0xe7,0x00] ; AVX-NEXT: vmovntdq %ymm0, (%eax) ## encoding: [0xc5,0xfd,0xe7,0x00]
; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX-NEXT: retl ## encoding: [0xc3] ; AVX-NEXT: retl ## encoding: [0xc3]
@ -3747,8 +3242,8 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX512VL-LABEL: movnt_dq: ; AVX512VL-LABEL: movnt_dq:
; AVX512VL: ## BB#0: ; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] ; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; AVX512VL-NEXT: vpaddq LCPI247_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A] ; AVX512VL-NEXT: vpaddq LCPI216_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI247_0, kind: FK_Data_4 ; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI216_0, kind: FK_Data_4
; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00] ; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00]
; AVX512VL-NEXT: retl ## encoding: [0xc3] ; AVX512VL-NEXT: retl ## encoding: [0xc3]
%a2 = add <2 x i64> %a1, <i64 1, i64 1> %a2 = add <2 x i64> %a1, <i64 1, i64 1>

View File

@ -35,38 +35,6 @@ define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) {
declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_cvtss2si64:
; CHECK: ## BB#0:
; CHECK-NEXT: vcvtss2si %xmm0, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
ret i64 %res
}
declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
; CHECK-LABEL: test_x86_sse_cvtsi642ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_cvttss2si64:
; CHECK: ## BB#0:
; CHECK-NEXT: vcvttss2si %xmm0, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]
ret i64 %res
}
declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) { define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) {
; AVX-LABEL: test_x86_avx_vzeroall: ; AVX-LABEL: test_x86_avx_vzeroall:
; AVX: ## BB#0: ; AVX: ## BB#0:

View File

@ -1,8 +0,0 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep sfence
declare void @llvm.x86.sse.sfence() nounwind
define void @test() {
call void @llvm.x86.sse.sfence()
ret void
}

View File

@ -682,3 +682,19 @@ define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) {
ret i32 %res ret i32 %res
} }
declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnone declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnone
define void @sfence() nounwind {
; SSE-LABEL: sfence:
; SSE: ## BB#0:
; SSE-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
; SSE-NEXT: retl ## encoding: [0xc3]
;
; VCHECK-LABEL: sfence:
; VCHECK: ## BB#0:
; VCHECK-NEXT: sfence ## encoding: [0x0f,0xae,0xf8]
; VCHECK-NEXT: retl ## encoding: [0xc3]
tail call void @llvm.x86.sse.sfence()
ret void
}
declare void @llvm.x86.sse.sfence() nounwind

View File

@ -0,0 +1,78 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse -show-mc-encoding | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_cvtss2si64:
; CHECK: ## BB#0:
; CHECK-NEXT: vcvtss2si %xmm0, %rax
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse_cvtss2si64:
; SSE: ## BB#0:
; SSE-NEXT: cvtss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2d,0xc0]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvtss2si64:
; AVX2: ## BB#0:
; AVX2-NEXT: vcvtss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvtss2si64:
; SKX: ## BB#0:
; SKX-NEXT: vcvtss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2d,0xc0]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
ret i64 %res
}
declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
; CHECK-LABEL: test_x86_sse_cvtsi642ss:
; CHECK: ## BB#0:
; CHECK-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse_cvtsi642ss:
; SSE: ## BB#0:
; SSE-NEXT: cvtsi2ssq %rdi, %xmm0 ## encoding: [0xf3,0x48,0x0f,0x2a,0xc7]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvtsi642ss:
; AVX2: ## BB#0:
; AVX2-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 ## encoding: [0xc4,0xe1,0xfa,0x2a,0xc7]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvtsi642ss:
; SKX: ## BB#0:
; SKX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfe,0x08,0x2a,0xc7]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_sse_cvttss2si64:
; CHECK: ## BB#0:
; CHECK-NEXT: vcvttss2si %xmm0, %rax
; CHECK-NEXT: retq
; SSE-LABEL: test_x86_sse_cvttss2si64:
; SSE: ## BB#0:
; SSE-NEXT: cvttss2si %xmm0, %rax ## encoding: [0xf3,0x48,0x0f,0x2c,0xc0]
; SSE-NEXT: retq ## encoding: [0xc3]
;
; AVX2-LABEL: test_x86_sse_cvttss2si64:
; AVX2: ## BB#0:
; AVX2-NEXT: vcvttss2si %xmm0, %rax ## encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
; AVX2-NEXT: retq ## encoding: [0xc3]
;
; SKX-LABEL: test_x86_sse_cvttss2si64:
; SKX: ## BB#0:
; SKX-NEXT: vcvttss2si %xmm0, %rax ## EVEX TO VEX Compression encoding: [0xc4,0xe1,0xfa,0x2c,0xc0]
; SKX-NEXT: retq ## encoding: [0xc3]
%res = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0) ; <i64> [#uses=1]
ret i64 %res
}
declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone