1292 lines
		
	
	
		
			48 KiB
		
	
	
	
		
			LLVM
		
	
	
	
			
		
		
	
	
			1292 lines
		
	
	
		
			48 KiB
		
	
	
	
		
			LLVM
		
	
	
	
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
 | |
| 
 | |
| declare {<1 x i32>, <1 x i1>} @llvm.ssub.with.overflow.v1i32(<1 x i32>, <1 x i32>)
 | |
| declare {<2 x i32>, <2 x i1>} @llvm.ssub.with.overflow.v2i32(<2 x i32>, <2 x i32>)
 | |
| declare {<3 x i32>, <3 x i1>} @llvm.ssub.with.overflow.v3i32(<3 x i32>, <3 x i32>)
 | |
| declare {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32>, <4 x i32>)
 | |
| declare {<6 x i32>, <6 x i1>} @llvm.ssub.with.overflow.v6i32(<6 x i32>, <6 x i32>)
 | |
| declare {<8 x i32>, <8 x i1>} @llvm.ssub.with.overflow.v8i32(<8 x i32>, <8 x i32>)
 | |
| declare {<16 x i32>, <16 x i1>} @llvm.ssub.with.overflow.v16i32(<16 x i32>, <16 x i32>)
 | |
| 
 | |
| declare {<16 x i8>, <16 x i1>} @llvm.ssub.with.overflow.v16i8(<16 x i8>, <16 x i8>)
 | |
| declare {<8 x i16>, <8 x i1>} @llvm.ssub.with.overflow.v8i16(<8 x i16>, <8 x i16>)
 | |
| declare {<2 x i64>, <2 x i1>} @llvm.ssub.with.overflow.v2i64(<2 x i64>, <2 x i64>)
 | |
| 
 | |
| declare {<4 x i24>, <4 x i1>} @llvm.ssub.with.overflow.v4i24(<4 x i24>, <4 x i24>)
 | |
| declare {<4 x i1>, <4 x i1>} @llvm.ssub.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 | |
| declare {<2 x i128>, <2 x i1>} @llvm.ssub.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 | |
| 
 | |
| define <1 x i32> @ssubo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
 | |
| ; SSE-LABEL: ssubo_v1i32:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    xorl %eax, %eax
 | |
| ; SSE-NEXT:    subl %esi, %edi
 | |
| ; SSE-NEXT:    seto %al
 | |
| ; SSE-NEXT:    negl %eax
 | |
| ; SSE-NEXT:    movl %edi, (%rdx)
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX-LABEL: ssubo_v1i32:
 | |
| ; AVX:       # %bb.0:
 | |
| ; AVX-NEXT:    xorl %eax, %eax
 | |
| ; AVX-NEXT:    subl %esi, %edi
 | |
| ; AVX-NEXT:    seto %al
 | |
| ; AVX-NEXT:    negl %eax
 | |
| ; AVX-NEXT:    movl %edi, (%rdx)
 | |
| ; AVX-NEXT:    retq
 | |
|   %t = call {<1 x i32>, <1 x i1>} @llvm.ssub.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
 | |
|   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
 | |
|   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
 | |
|   %res = sext <1 x i1> %obit to <1 x i32>
 | |
|   store <1 x i32> %val, <1 x i32>* %p2
 | |
|   ret <1 x i32> %res
 | |
| }
 | |
| 
 | |
| define <2 x i32> @ssubo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind {
 | |
| ; SSE-LABEL: ssubo_v2i32:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm2
 | |
| ; SSE-NEXT:    movdqa %xmm0, %xmm3
 | |
| ; SSE-NEXT:    psubd %xmm1, %xmm3
 | |
| ; SSE-NEXT:    pcmpgtd %xmm2, %xmm1
 | |
| ; SSE-NEXT:    pcmpgtd %xmm3, %xmm0
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSE-NEXT:    movq %xmm3, (%rdi)
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v2i32:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | |
| ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX1-NEXT:    vmovq %xmm1, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v2i32:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | |
| ; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX2-NEXT:    vmovq %xmm1, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v2i32:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX512-NEXT:    vpcmpgtd %xmm2, %xmm1, %k0
 | |
| ; AVX512-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
 | |
| ; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vmovq %xmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<2 x i32>, <2 x i1>} @llvm.ssub.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1)
 | |
|   %val = extractvalue {<2 x i32>, <2 x i1>} %t, 0
 | |
|   %obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1
 | |
|   %res = sext <2 x i1> %obit to <2 x i32>
 | |
|   store <2 x i32> %val, <2 x i32>* %p2
 | |
|   ret <2 x i32> %res
 | |
| }
 | |
| 
 | |
| define <3 x i32> @ssubo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind {
 | |
| ; SSE2-LABEL: ssubo_v3i32:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    pxor %xmm2, %xmm2
 | |
| ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 | |
| ; SSE2-NEXT:    psubd %xmm1, %xmm3
 | |
| ; SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
 | |
| ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm0
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    movq %xmm3, (%rdi)
 | |
| ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
 | |
| ; SSE2-NEXT:    movd %xmm1, 8(%rdi)
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSSE3-LABEL: ssubo_v3i32:
 | |
| ; SSSE3:       # %bb.0:
 | |
| ; SSSE3-NEXT:    pxor %xmm2, %xmm2
 | |
| ; SSSE3-NEXT:    movdqa %xmm0, %xmm3
 | |
| ; SSSE3-NEXT:    psubd %xmm1, %xmm3
 | |
| ; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
 | |
| ; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
 | |
| ; SSSE3-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSSE3-NEXT:    movq %xmm3, (%rdi)
 | |
| ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
 | |
| ; SSSE3-NEXT:    movd %xmm1, 8(%rdi)
 | |
| ; SSSE3-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: ssubo_v3i32:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    pxor %xmm2, %xmm2
 | |
| ; SSE41-NEXT:    movdqa %xmm0, %xmm3
 | |
| ; SSE41-NEXT:    psubd %xmm1, %xmm3
 | |
| ; SSE41-NEXT:    pcmpgtd %xmm2, %xmm1
 | |
| ; SSE41-NEXT:    pcmpgtd %xmm3, %xmm0
 | |
| ; SSE41-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    pextrd $2, %xmm3, 8(%rdi)
 | |
| ; SSE41-NEXT:    movq %xmm3, (%rdi)
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v3i32:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | |
| ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX1-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
 | |
| ; AVX1-NEXT:    vmovq %xmm1, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v3i32:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | |
| ; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX2-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
 | |
| ; AVX2-NEXT:    vmovq %xmm1, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v3i32:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX512-NEXT:    vpcmpgtd %xmm2, %xmm1, %k0
 | |
| ; AVX512-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
 | |
| ; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
 | |
| ; AVX512-NEXT:    vmovq %xmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<3 x i32>, <3 x i1>} @llvm.ssub.with.overflow.v3i32(<3 x i32> %a0, <3 x i32> %a1)
 | |
|   %val = extractvalue {<3 x i32>, <3 x i1>} %t, 0
 | |
|   %obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1
 | |
|   %res = sext <3 x i1> %obit to <3 x i32>
 | |
|   store <3 x i32> %val, <3 x i32>* %p2
 | |
|   ret <3 x i32> %res
 | |
| }
 | |
| 
 | |
| define <4 x i32> @ssubo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind {
 | |
| ; SSE-LABEL: ssubo_v4i32:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm2
 | |
| ; SSE-NEXT:    movdqa %xmm0, %xmm3
 | |
| ; SSE-NEXT:    psubd %xmm1, %xmm3
 | |
| ; SSE-NEXT:    pcmpgtd %xmm2, %xmm1
 | |
| ; SSE-NEXT:    pcmpgtd %xmm3, %xmm0
 | |
| ; SSE-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSE-NEXT:    movdqa %xmm3, (%rdi)
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v4i32:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | |
| ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v4i32:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | |
| ; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v4i32:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX512-NEXT:    vpcmpgtd %xmm2, %xmm1, %k0
 | |
| ; AVX512-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
 | |
| ; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1)
 | |
|   %val = extractvalue {<4 x i32>, <4 x i1>} %t, 0
 | |
|   %obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1
 | |
|   %res = sext <4 x i1> %obit to <4 x i32>
 | |
|   store <4 x i32> %val, <4 x i32>* %p2
 | |
|   ret <4 x i32> %res
 | |
| }
 | |
| 
 | |
| define <6 x i32> @ssubo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind {
 | |
| ; SSE2-LABEL: ssubo_v6i32:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    movq %rdi, %rax
 | |
| ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | |
| ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 | |
| ; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 | |
| ; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 | |
| ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 | |
| ; SSE2-NEXT:    movd %r8d, %xmm1
 | |
| ; SSE2-NEXT:    movd %ecx, %xmm2
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 | |
| ; SSE2-NEXT:    movd %edx, %xmm1
 | |
| ; SSE2-NEXT:    movd %esi, %xmm3
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
 | |
| ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
 | |
| ; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | |
| ; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 | |
| ; SSE2-NEXT:    movd %r9d, %xmm1
 | |
| ; SSE2-NEXT:    movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
 | |
| ; SSE2-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
 | |
| ; SSE2-NEXT:    movdqa %xmm3, %xmm4
 | |
| ; SSE2-NEXT:    psubd %xmm0, %xmm4
 | |
| ; SSE2-NEXT:    pcmpgtd %xmm4, %xmm3
 | |
| ; SSE2-NEXT:    pxor %xmm5, %xmm5
 | |
| ; SSE2-NEXT:    pcmpgtd %xmm5, %xmm0
 | |
| ; SSE2-NEXT:    pxor %xmm3, %xmm0
 | |
| ; SSE2-NEXT:    movdqa %xmm1, %xmm3
 | |
| ; SSE2-NEXT:    psubd %xmm2, %xmm3
 | |
| ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm1
 | |
| ; SSE2-NEXT:    pcmpgtd %xmm5, %xmm2
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm2
 | |
| ; SSE2-NEXT:    movq %xmm3, 16(%rcx)
 | |
| ; SSE2-NEXT:    movdqa %xmm4, (%rcx)
 | |
| ; SSE2-NEXT:    movq %xmm2, 16(%rdi)
 | |
| ; SSE2-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSSE3-LABEL: ssubo_v6i32:
 | |
| ; SSSE3:       # %bb.0:
 | |
| ; SSSE3-NEXT:    movq %rdi, %rax
 | |
| ; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | |
| ; SSSE3-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | |
| ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 | |
| ; SSSE3-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 | |
| ; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | |
| ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 | |
| ; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 | |
| ; SSSE3-NEXT:    movd %r8d, %xmm1
 | |
| ; SSSE3-NEXT:    movd %ecx, %xmm2
 | |
| ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 | |
| ; SSSE3-NEXT:    movd %edx, %xmm1
 | |
| ; SSSE3-NEXT:    movd %esi, %xmm3
 | |
| ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
 | |
| ; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
 | |
| ; SSSE3-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | |
| ; SSSE3-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 | |
| ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 | |
| ; SSSE3-NEXT:    movd %r9d, %xmm1
 | |
| ; SSSE3-NEXT:    movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
 | |
| ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
 | |
| ; SSSE3-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
 | |
| ; SSSE3-NEXT:    movdqa %xmm3, %xmm4
 | |
| ; SSSE3-NEXT:    psubd %xmm0, %xmm4
 | |
| ; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm3
 | |
| ; SSSE3-NEXT:    pxor %xmm5, %xmm5
 | |
| ; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm0
 | |
| ; SSSE3-NEXT:    pxor %xmm3, %xmm0
 | |
| ; SSSE3-NEXT:    movdqa %xmm1, %xmm3
 | |
| ; SSSE3-NEXT:    psubd %xmm2, %xmm3
 | |
| ; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm1
 | |
| ; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm2
 | |
| ; SSSE3-NEXT:    pxor %xmm1, %xmm2
 | |
| ; SSSE3-NEXT:    movq %xmm3, 16(%rcx)
 | |
| ; SSSE3-NEXT:    movdqa %xmm4, (%rcx)
 | |
| ; SSSE3-NEXT:    movq %xmm2, 16(%rdi)
 | |
| ; SSSE3-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSSE3-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: ssubo_v6i32:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    movq %rdi, %rax
 | |
| ; SSE41-NEXT:    movd %esi, %xmm1
 | |
| ; SSE41-NEXT:    pinsrd $1, %edx, %xmm1
 | |
| ; SSE41-NEXT:    pinsrd $2, %ecx, %xmm1
 | |
| ; SSE41-NEXT:    pinsrd $3, %r8d, %xmm1
 | |
| ; SSE41-NEXT:    movd %r9d, %xmm0
 | |
| ; SSE41-NEXT:    pinsrd $1, {{[0-9]+}}(%rsp), %xmm0
 | |
| ; SSE41-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
 | |
| ; SSE41-NEXT:    pinsrd $1, {{[0-9]+}}(%rsp), %xmm2
 | |
| ; SSE41-NEXT:    movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
 | |
| ; SSE41-NEXT:    pinsrd $1, {{[0-9]+}}(%rsp), %xmm3
 | |
| ; SSE41-NEXT:    pinsrd $2, {{[0-9]+}}(%rsp), %xmm3
 | |
| ; SSE41-NEXT:    pinsrd $3, {{[0-9]+}}(%rsp), %xmm3
 | |
| ; SSE41-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
 | |
| ; SSE41-NEXT:    movdqa %xmm1, %xmm4
 | |
| ; SSE41-NEXT:    psubd %xmm3, %xmm4
 | |
| ; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
 | |
| ; SSE41-NEXT:    pxor %xmm5, %xmm5
 | |
| ; SSE41-NEXT:    pcmpgtd %xmm5, %xmm3
 | |
| ; SSE41-NEXT:    pxor %xmm1, %xmm3
 | |
| ; SSE41-NEXT:    movdqa %xmm0, %xmm1
 | |
| ; SSE41-NEXT:    psubd %xmm2, %xmm1
 | |
| ; SSE41-NEXT:    pcmpgtd %xmm5, %xmm2
 | |
| ; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    pxor %xmm2, %xmm0
 | |
| ; SSE41-NEXT:    movq %xmm1, 16(%rcx)
 | |
| ; SSE41-NEXT:    movdqa %xmm4, (%rcx)
 | |
| ; SSE41-NEXT:    movq %xmm0, 16(%rdi)
 | |
| ; SSE41-NEXT:    movdqa %xmm3, (%rdi)
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v6i32:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | |
| ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm4
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm1, %xmm3
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 | |
| ; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm4, %xmm4
 | |
| ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | |
| ; AVX1-NEXT:    vxorps %ymm0, %ymm3, %ymm0
 | |
| ; AVX1-NEXT:    vmovq %xmm2, 16(%rdi)
 | |
| ; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v6i32:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm1, %ymm2
 | |
| ; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm1
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
 | |
| ; AVX2-NEXT:    vpxor %ymm0, %ymm2, %ymm0
 | |
| ; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
 | |
| ; AVX2-NEXT:    vmovq %xmm2, 16(%rdi)
 | |
| ; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v6i32:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX512-NEXT:    vpcmpgtd %ymm2, %ymm1, %k0
 | |
| ; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm1
 | |
| ; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm0, %k1
 | |
| ; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm2
 | |
| ; AVX512-NEXT:    vmovq %xmm2, 16(%rdi)
 | |
| ; AVX512-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<6 x i32>, <6 x i1>} @llvm.ssub.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1)
 | |
|   %val = extractvalue {<6 x i32>, <6 x i1>} %t, 0
 | |
|   %obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1
 | |
|   %res = sext <6 x i1> %obit to <6 x i32>
 | |
|   store <6 x i32> %val, <6 x i32>* %p2
 | |
|   ret <6 x i32> %res
 | |
| }
 | |
| 
 | |
| define <8 x i32> @ssubo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind {
 | |
| ; SSE-LABEL: ssubo_v8i32:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm4, %xmm4
 | |
| ; SSE-NEXT:    movdqa %xmm0, %xmm5
 | |
| ; SSE-NEXT:    psubd %xmm2, %xmm5
 | |
| ; SSE-NEXT:    pcmpgtd %xmm4, %xmm2
 | |
| ; SSE-NEXT:    pcmpgtd %xmm5, %xmm0
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm0
 | |
| ; SSE-NEXT:    movdqa %xmm1, %xmm2
 | |
| ; SSE-NEXT:    psubd %xmm3, %xmm2
 | |
| ; SSE-NEXT:    pcmpgtd %xmm4, %xmm3
 | |
| ; SSE-NEXT:    pcmpgtd %xmm2, %xmm1
 | |
| ; SSE-NEXT:    pxor %xmm3, %xmm1
 | |
| ; SSE-NEXT:    movdqa %xmm2, 16(%rdi)
 | |
| ; SSE-NEXT:    movdqa %xmm5, (%rdi)
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v8i32:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | |
| ; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm4
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm1, %xmm3
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 | |
| ; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm4, %xmm4
 | |
| ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | |
| ; AVX1-NEXT:    vxorps %ymm0, %ymm3, %ymm0
 | |
| ; AVX1-NEXT:    vmovdqa %xmm2, 16(%rdi)
 | |
| ; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v8i32:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm1, %ymm2
 | |
| ; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm1
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
 | |
| ; AVX2-NEXT:    vpxor %ymm0, %ymm2, %ymm0
 | |
| ; AVX2-NEXT:    vmovdqa %ymm1, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v8i32:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX512-NEXT:    vpcmpgtd %ymm2, %ymm1, %k0
 | |
| ; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm1
 | |
| ; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm0, %k1
 | |
| ; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vmovdqa %ymm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<8 x i32>, <8 x i1>} @llvm.ssub.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1)
 | |
|   %val = extractvalue {<8 x i32>, <8 x i1>} %t, 0
 | |
|   %obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1
 | |
|   %res = sext <8 x i1> %obit to <8 x i32>
 | |
|   store <8 x i32> %val, <8 x i32>* %p2
 | |
|   ret <8 x i32> %res
 | |
| }
 | |
| 
 | |
| define <16 x i32> @ssubo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2) nounwind {
 | |
| ; SSE-LABEL: ssubo_v16i32:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pxor %xmm9, %xmm9
 | |
| ; SSE-NEXT:    movdqa %xmm0, %xmm8
 | |
| ; SSE-NEXT:    psubd %xmm4, %xmm8
 | |
| ; SSE-NEXT:    pcmpgtd %xmm9, %xmm4
 | |
| ; SSE-NEXT:    pcmpgtd %xmm8, %xmm0
 | |
| ; SSE-NEXT:    pxor %xmm4, %xmm0
 | |
| ; SSE-NEXT:    movdqa %xmm1, %xmm4
 | |
| ; SSE-NEXT:    psubd %xmm5, %xmm4
 | |
| ; SSE-NEXT:    pcmpgtd %xmm9, %xmm5
 | |
| ; SSE-NEXT:    pcmpgtd %xmm4, %xmm1
 | |
| ; SSE-NEXT:    pxor %xmm5, %xmm1
 | |
| ; SSE-NEXT:    movdqa %xmm2, %xmm5
 | |
| ; SSE-NEXT:    psubd %xmm6, %xmm5
 | |
| ; SSE-NEXT:    pcmpgtd %xmm9, %xmm6
 | |
| ; SSE-NEXT:    pcmpgtd %xmm5, %xmm2
 | |
| ; SSE-NEXT:    pxor %xmm6, %xmm2
 | |
| ; SSE-NEXT:    movdqa %xmm3, %xmm6
 | |
| ; SSE-NEXT:    psubd %xmm7, %xmm6
 | |
| ; SSE-NEXT:    pcmpgtd %xmm9, %xmm7
 | |
| ; SSE-NEXT:    pcmpgtd %xmm6, %xmm3
 | |
| ; SSE-NEXT:    pxor %xmm7, %xmm3
 | |
| ; SSE-NEXT:    movdqa %xmm6, 48(%rdi)
 | |
| ; SSE-NEXT:    movdqa %xmm5, 32(%rdi)
 | |
| ; SSE-NEXT:    movdqa %xmm4, 16(%rdi)
 | |
| ; SSE-NEXT:    movdqa %xmm8, (%rdi)
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v16i32:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
 | |
| ; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm4, %xmm6
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
 | |
| ; AVX1-NEXT:    vpsubd %xmm4, %xmm7, %xmm8
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm8, %xmm7, %xmm7
 | |
| ; AVX1-NEXT:    vpxor %xmm7, %xmm6, %xmm6
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm3, %xmm7
 | |
| ; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm3
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpxor %xmm1, %xmm7, %xmm1
 | |
| ; AVX1-NEXT:    vpackssdw %xmm6, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm6, %xmm7
 | |
| ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 | |
| ; AVX1-NEXT:    vpsubd %xmm6, %xmm4, %xmm6
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm6, %xmm4, %xmm4
 | |
| ; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm4
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm2, %xmm5
 | |
| ; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpxor %xmm0, %xmm5, %xmm0
 | |
| ; AVX1-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm4
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
 | |
| ; AVX1-NEXT:    vpacksswb %xmm1, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm4
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
 | |
| ; AVX1-NEXT:    vmovdqa %xmm8, 48(%rdi)
 | |
| ; AVX1-NEXT:    vmovdqa %xmm3, 32(%rdi)
 | |
| ; AVX1-NEXT:    vmovdqa %xmm6, 16(%rdi)
 | |
| ; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v16i32:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm4, %ymm3, %ymm5
 | |
| ; AVX2-NEXT:    vpsubd %ymm3, %ymm1, %ymm3
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm3, %ymm1, %ymm1
 | |
| ; AVX2-NEXT:    vpxor %ymm1, %ymm5, %ymm1
 | |
| ; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm5
 | |
| ; AVX2-NEXT:    vpackssdw %xmm5, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm4, %ymm2, %ymm4
 | |
| ; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm2
 | |
| ; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
 | |
| ; AVX2-NEXT:    vpxor %ymm0, %ymm4, %ymm0
 | |
| ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | |
| ; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm0
 | |
| ; AVX2-NEXT:    vpacksswb %xmm1, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | |
| ; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | |
| ; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v16i32:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX512-NEXT:    vpcmpgtd %zmm2, %zmm1, %k0
 | |
| ; AVX512-NEXT:    vpsubd %zmm1, %zmm0, %zmm1
 | |
| ; AVX512-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
 | |
| ; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | |
| ; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vmovdqa64 %zmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<16 x i32>, <16 x i1>} @llvm.ssub.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1)
 | |
|   %val = extractvalue {<16 x i32>, <16 x i1>} %t, 0
 | |
|   %obit = extractvalue {<16 x i32>, <16 x i1>} %t, 1
 | |
|   %res = sext <16 x i1> %obit to <16 x i32>
 | |
|   store <16 x i32> %val, <16 x i32>* %p2
 | |
|   ret <16 x i32> %res
 | |
| }
 | |
| 
 | |
| define <16 x i32> @ssubo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind {
 | |
| ; SSE2-LABEL: ssubo_v16i8:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE2-NEXT:    psubsb %xmm1, %xmm2
 | |
| ; SSE2-NEXT:    psubb %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    pcmpeqb %xmm0, %xmm2
 | |
| ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
 | |
| ; SSE2-NEXT:    pxor %xmm2, %xmm3
 | |
| ; SSE2-NEXT:    movdqa %xmm3, %xmm1
 | |
| ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 | |
| ; SSE2-NEXT:    movdqa %xmm1, %xmm4
 | |
| ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3]
 | |
| ; SSE2-NEXT:    pslld $31, %xmm4
 | |
| ; SSE2-NEXT:    psrad $31, %xmm4
 | |
| ; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 | |
| ; SSE2-NEXT:    pslld $31, %xmm1
 | |
| ; SSE2-NEXT:    psrad $31, %xmm1
 | |
| ; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 | |
| ; SSE2-NEXT:    movdqa %xmm3, %xmm2
 | |
| ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
 | |
| ; SSE2-NEXT:    pslld $31, %xmm2
 | |
| ; SSE2-NEXT:    psrad $31, %xmm2
 | |
| ; SSE2-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
 | |
| ; SSE2-NEXT:    pslld $31, %xmm3
 | |
| ; SSE2-NEXT:    psrad $31, %xmm3
 | |
| ; SSE2-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSE2-NEXT:    movdqa %xmm4, %xmm0
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSSE3-LABEL: ssubo_v16i8:
 | |
| ; SSSE3:       # %bb.0:
 | |
| ; SSSE3-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSSE3-NEXT:    psubsb %xmm1, %xmm2
 | |
| ; SSSE3-NEXT:    psubb %xmm1, %xmm0
 | |
| ; SSSE3-NEXT:    pcmpeqb %xmm0, %xmm2
 | |
| ; SSSE3-NEXT:    pcmpeqd %xmm3, %xmm3
 | |
| ; SSSE3-NEXT:    pxor %xmm2, %xmm3
 | |
| ; SSSE3-NEXT:    movdqa %xmm3, %xmm1
 | |
| ; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 | |
| ; SSSE3-NEXT:    movdqa %xmm1, %xmm4
 | |
| ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3]
 | |
| ; SSSE3-NEXT:    pslld $31, %xmm4
 | |
| ; SSSE3-NEXT:    psrad $31, %xmm4
 | |
| ; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 | |
| ; SSSE3-NEXT:    pslld $31, %xmm1
 | |
| ; SSSE3-NEXT:    psrad $31, %xmm1
 | |
| ; SSSE3-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 | |
| ; SSSE3-NEXT:    movdqa %xmm3, %xmm2
 | |
| ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
 | |
| ; SSSE3-NEXT:    pslld $31, %xmm2
 | |
| ; SSSE3-NEXT:    psrad $31, %xmm2
 | |
| ; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
 | |
| ; SSSE3-NEXT:    pslld $31, %xmm3
 | |
| ; SSSE3-NEXT:    psrad $31, %xmm3
 | |
| ; SSSE3-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSSE3-NEXT:    movdqa %xmm4, %xmm0
 | |
| ; SSSE3-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: ssubo_v16i8:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE41-NEXT:    psubsb %xmm1, %xmm2
 | |
| ; SSE41-NEXT:    psubb %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    pcmpeqb %xmm0, %xmm2
 | |
| ; SSE41-NEXT:    pcmpeqd %xmm3, %xmm3
 | |
| ; SSE41-NEXT:    pxor %xmm2, %xmm3
 | |
| ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
 | |
| ; SSE41-NEXT:    pslld $31, %xmm4
 | |
| ; SSE41-NEXT:    psrad $31, %xmm4
 | |
| ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
 | |
| ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
 | |
| ; SSE41-NEXT:    pslld $31, %xmm1
 | |
| ; SSE41-NEXT:    psrad $31, %xmm1
 | |
| ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
 | |
| ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
 | |
| ; SSE41-NEXT:    pslld $31, %xmm2
 | |
| ; SSE41-NEXT:    psrad $31, %xmm2
 | |
| ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
 | |
| ; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
 | |
| ; SSE41-NEXT:    pslld $31, %xmm3
 | |
| ; SSE41-NEXT:    psrad $31, %xmm3
 | |
| ; SSE41-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v16i8:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2
 | |
| ; AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm3
 | |
| ; AVX1-NEXT:    vpcmpeqb %xmm2, %xmm3, %xmm0
 | |
| ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
 | |
| ; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | |
| ; AVX1-NEXT:    vmovdqa %xmm3, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v16i8:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2
 | |
| ; AVX2-NEXT:    vpsubb %xmm1, %xmm0, %xmm3
 | |
| ; AVX2-NEXT:    vpcmpeqb %xmm2, %xmm3, %xmm0
 | |
| ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | |
| ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 | |
| ; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | |
| ; AVX2-NEXT:    vmovdqa %xmm3, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v16i8:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm2
 | |
| ; AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpcmpneqb %xmm2, %xmm1, %k1
 | |
| ; AVX512-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<16 x i8>, <16 x i1>} @llvm.ssub.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1)
 | |
|   %val = extractvalue {<16 x i8>, <16 x i1>} %t, 0
 | |
|   %obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1
 | |
|   %res = sext <16 x i1> %obit to <16 x i32>
 | |
|   store <16 x i8> %val, <16 x i8>* %p2
 | |
|   ret <16 x i32> %res
 | |
| }
 | |
| 
 | |
| define <8 x i32> @ssubo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind {
 | |
| ; SSE2-LABEL: ssubo_v8i16:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE2-NEXT:    psubsw %xmm1, %xmm2
 | |
| ; SSE2-NEXT:    psubw %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    pcmpeqw %xmm0, %xmm2
 | |
| ; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    pxor %xmm2, %xmm1
 | |
| ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 | |
| ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
 | |
| ; SSE2-NEXT:    pslld $31, %xmm2
 | |
| ; SSE2-NEXT:    psrad $31, %xmm2
 | |
| ; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 | |
| ; SSE2-NEXT:    pslld $31, %xmm1
 | |
| ; SSE2-NEXT:    psrad $31, %xmm1
 | |
| ; SSE2-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSE2-NEXT:    movdqa %xmm2, %xmm0
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSSE3-LABEL: ssubo_v8i16:
 | |
| ; SSSE3:       # %bb.0:
 | |
| ; SSSE3-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSSE3-NEXT:    psubsw %xmm1, %xmm2
 | |
| ; SSSE3-NEXT:    psubw %xmm1, %xmm0
 | |
| ; SSSE3-NEXT:    pcmpeqw %xmm0, %xmm2
 | |
| ; SSSE3-NEXT:    pcmpeqd %xmm1, %xmm1
 | |
| ; SSSE3-NEXT:    pxor %xmm2, %xmm1
 | |
| ; SSSE3-NEXT:    movdqa %xmm1, %xmm2
 | |
| ; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
 | |
| ; SSSE3-NEXT:    pslld $31, %xmm2
 | |
| ; SSSE3-NEXT:    psrad $31, %xmm2
 | |
| ; SSSE3-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 | |
| ; SSSE3-NEXT:    pslld $31, %xmm1
 | |
| ; SSSE3-NEXT:    psrad $31, %xmm1
 | |
| ; SSSE3-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSSE3-NEXT:    movdqa %xmm2, %xmm0
 | |
| ; SSSE3-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: ssubo_v8i16:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE41-NEXT:    psubsw %xmm1, %xmm2
 | |
| ; SSE41-NEXT:    psubw %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    pcmpeqw %xmm0, %xmm2
 | |
| ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
 | |
| ; SSE41-NEXT:    pxor %xmm2, %xmm1
 | |
| ; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | |
| ; SSE41-NEXT:    pslld $31, %xmm2
 | |
| ; SSE41-NEXT:    psrad $31, %xmm2
 | |
| ; SSE41-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
 | |
| ; SSE41-NEXT:    pslld $31, %xmm1
 | |
| ; SSE41-NEXT:    psrad $31, %xmm1
 | |
| ; SSE41-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v8i16:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2
 | |
| ; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm0
 | |
| ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm2
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 | |
| ; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 | |
| ; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v8i16:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2
 | |
| ; AVX2-NEXT:    vpsubw %xmm1, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm0
 | |
| ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 | |
| ; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v8i16:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm2
 | |
| ; AVX512-NEXT:    vpsubw %xmm1, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpcmpneqw %xmm2, %xmm1, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<8 x i16>, <8 x i1>} @llvm.ssub.with.overflow.v8i16(<8 x i16> %a0, <8 x i16> %a1)
 | |
|   %val = extractvalue {<8 x i16>, <8 x i1>} %t, 0
 | |
|   %obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1
 | |
|   %res = sext <8 x i1> %obit to <8 x i32>
 | |
|   store <8 x i16> %val, <8 x i16>* %p2
 | |
|   ret <8 x i32> %res
 | |
| }
 | |
| 
 | |
| define <2 x i32> @ssubo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind {
 | |
| ; SSE-LABEL: ssubo_v2i64:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 | |
| ; SSE-NEXT:    movdqa %xmm0, %xmm3
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm3
 | |
| ; SSE-NEXT:    psubq %xmm1, %xmm0
 | |
| ; SSE-NEXT:    movdqa %xmm0, (%rdi)
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm0
 | |
| ; SSE-NEXT:    movdqa %xmm3, %xmm4
 | |
| ; SSE-NEXT:    pcmpgtd %xmm0, %xmm4
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 | |
| ; SSE-NEXT:    pcmpeqd %xmm3, %xmm0
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 | |
| ; SSE-NEXT:    pand %xmm5, %xmm0
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
 | |
| ; SSE-NEXT:    por %xmm0, %xmm3
 | |
| ; SSE-NEXT:    pxor %xmm2, %xmm1
 | |
| ; SSE-NEXT:    movdqa %xmm1, %xmm0
 | |
| ; SSE-NEXT:    pcmpgtd %xmm2, %xmm0
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
 | |
| ; SSE-NEXT:    pcmpeqd %xmm2, %xmm1
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
 | |
| ; SSE-NEXT:    pand %xmm4, %xmm1
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 | |
| ; SSE-NEXT:    por %xmm1, %xmm0
 | |
| ; SSE-NEXT:    pxor %xmm3, %xmm0
 | |
| ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v2i64:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm2
 | |
| ; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 | |
| ; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v2i64:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm2
 | |
| ; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | |
| ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 | |
| ; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v2i64:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | |
| ; AVX512-NEXT:    vpcmpgtq %xmm2, %xmm1, %k0
 | |
| ; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
 | |
| ; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    vmovdqa %xmm1, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<2 x i64>, <2 x i1>} @llvm.ssub.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1)
 | |
|   %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0
 | |
|   %obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1
 | |
|   %res = sext <2 x i1> %obit to <2 x i32>
 | |
|   store <2 x i64> %val, <2 x i64>* %p2
 | |
|   ret <2 x i32> %res
 | |
| }
 | |
| 
 | |
| define <4 x i32> @ssubo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind {
 | |
| ; SSE2-LABEL: ssubo_v4i24:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE2-NEXT:    pslld $8, %xmm1
 | |
| ; SSE2-NEXT:    psrad $8, %xmm1
 | |
| ; SSE2-NEXT:    pslld $8, %xmm2
 | |
| ; SSE2-NEXT:    psrad $8, %xmm2
 | |
| ; SSE2-NEXT:    psubd %xmm1, %xmm2
 | |
| ; SSE2-NEXT:    movdqa %xmm2, %xmm0
 | |
| ; SSE2-NEXT:    pslld $8, %xmm0
 | |
| ; SSE2-NEXT:    psrad $8, %xmm0
 | |
| ; SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
 | |
| ; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
 | |
| ; SSE2-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSE2-NEXT:    movd %xmm2, %eax
 | |
| ; SSE2-NEXT:    movw %ax, (%rdi)
 | |
| ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[3,3,3,3]
 | |
| ; SSE2-NEXT:    movd %xmm1, %ecx
 | |
| ; SSE2-NEXT:    movw %cx, 9(%rdi)
 | |
| ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
 | |
| ; SSE2-NEXT:    movd %xmm1, %edx
 | |
| ; SSE2-NEXT:    movw %dx, 6(%rdi)
 | |
| ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
 | |
| ; SSE2-NEXT:    movd %xmm1, %esi
 | |
| ; SSE2-NEXT:    movw %si, 3(%rdi)
 | |
| ; SSE2-NEXT:    shrl $16, %eax
 | |
| ; SSE2-NEXT:    movb %al, 2(%rdi)
 | |
| ; SSE2-NEXT:    shrl $16, %ecx
 | |
| ; SSE2-NEXT:    movb %cl, 11(%rdi)
 | |
| ; SSE2-NEXT:    shrl $16, %edx
 | |
| ; SSE2-NEXT:    movb %dl, 8(%rdi)
 | |
| ; SSE2-NEXT:    shrl $16, %esi
 | |
| ; SSE2-NEXT:    movb %sil, 5(%rdi)
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSSE3-LABEL: ssubo_v4i24:
 | |
| ; SSSE3:       # %bb.0:
 | |
| ; SSSE3-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSSE3-NEXT:    pslld $8, %xmm1
 | |
| ; SSSE3-NEXT:    psrad $8, %xmm1
 | |
| ; SSSE3-NEXT:    pslld $8, %xmm2
 | |
| ; SSSE3-NEXT:    psrad $8, %xmm2
 | |
| ; SSSE3-NEXT:    psubd %xmm1, %xmm2
 | |
| ; SSSE3-NEXT:    movdqa %xmm2, %xmm0
 | |
| ; SSSE3-NEXT:    pslld $8, %xmm0
 | |
| ; SSSE3-NEXT:    psrad $8, %xmm0
 | |
| ; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm0
 | |
| ; SSSE3-NEXT:    pcmpeqd %xmm1, %xmm1
 | |
| ; SSSE3-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSSE3-NEXT:    movd %xmm2, %eax
 | |
| ; SSSE3-NEXT:    movw %ax, (%rdi)
 | |
| ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[3,3,3,3]
 | |
| ; SSSE3-NEXT:    movd %xmm1, %ecx
 | |
| ; SSSE3-NEXT:    movw %cx, 9(%rdi)
 | |
| ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
 | |
| ; SSSE3-NEXT:    movd %xmm1, %edx
 | |
| ; SSSE3-NEXT:    movw %dx, 6(%rdi)
 | |
| ; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
 | |
| ; SSSE3-NEXT:    movd %xmm1, %esi
 | |
| ; SSSE3-NEXT:    movw %si, 3(%rdi)
 | |
| ; SSSE3-NEXT:    shrl $16, %eax
 | |
| ; SSSE3-NEXT:    movb %al, 2(%rdi)
 | |
| ; SSSE3-NEXT:    shrl $16, %ecx
 | |
| ; SSSE3-NEXT:    movb %cl, 11(%rdi)
 | |
| ; SSSE3-NEXT:    shrl $16, %edx
 | |
| ; SSSE3-NEXT:    movb %dl, 8(%rdi)
 | |
| ; SSSE3-NEXT:    shrl $16, %esi
 | |
| ; SSSE3-NEXT:    movb %sil, 5(%rdi)
 | |
| ; SSSE3-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: ssubo_v4i24:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    movdqa %xmm0, %xmm2
 | |
| ; SSE41-NEXT:    pslld $8, %xmm1
 | |
| ; SSE41-NEXT:    psrad $8, %xmm1
 | |
| ; SSE41-NEXT:    pslld $8, %xmm2
 | |
| ; SSE41-NEXT:    psrad $8, %xmm2
 | |
| ; SSE41-NEXT:    psubd %xmm1, %xmm2
 | |
| ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | |
| ; SSE41-NEXT:    pslld $8, %xmm0
 | |
| ; SSE41-NEXT:    psrad $8, %xmm0
 | |
| ; SSE41-NEXT:    pcmpeqd %xmm2, %xmm0
 | |
| ; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
 | |
| ; SSE41-NEXT:    pxor %xmm1, %xmm0
 | |
| ; SSE41-NEXT:    pextrd $3, %xmm2, %eax
 | |
| ; SSE41-NEXT:    movw %ax, 9(%rdi)
 | |
| ; SSE41-NEXT:    pextrd $2, %xmm2, %ecx
 | |
| ; SSE41-NEXT:    movw %cx, 6(%rdi)
 | |
| ; SSE41-NEXT:    pextrd $1, %xmm2, %edx
 | |
| ; SSE41-NEXT:    movw %dx, 3(%rdi)
 | |
| ; SSE41-NEXT:    movd %xmm2, %esi
 | |
| ; SSE41-NEXT:    movw %si, (%rdi)
 | |
| ; SSE41-NEXT:    shrl $16, %eax
 | |
| ; SSE41-NEXT:    movb %al, 11(%rdi)
 | |
| ; SSE41-NEXT:    shrl $16, %ecx
 | |
| ; SSE41-NEXT:    movb %cl, 8(%rdi)
 | |
| ; SSE41-NEXT:    shrl $16, %edx
 | |
| ; SSE41-NEXT:    movb %dl, 5(%rdi)
 | |
| ; SSE41-NEXT:    shrl $16, %esi
 | |
| ; SSE41-NEXT:    movb %sil, 2(%rdi)
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v4i24:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpslld $8, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpsrad $8, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpslld $8, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpsrad $8, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpslld $8, %xmm1, %xmm0
 | |
| ; AVX1-NEXT:    vpsrad $8, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
 | |
| ; AVX1-NEXT:    movw %ax, 9(%rdi)
 | |
| ; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
 | |
| ; AVX1-NEXT:    movw %cx, 6(%rdi)
 | |
| ; AVX1-NEXT:    vpextrd $1, %xmm1, %edx
 | |
| ; AVX1-NEXT:    movw %dx, 3(%rdi)
 | |
| ; AVX1-NEXT:    vmovd %xmm1, %esi
 | |
| ; AVX1-NEXT:    movw %si, (%rdi)
 | |
| ; AVX1-NEXT:    shrl $16, %eax
 | |
| ; AVX1-NEXT:    movb %al, 11(%rdi)
 | |
| ; AVX1-NEXT:    shrl $16, %ecx
 | |
| ; AVX1-NEXT:    movb %cl, 8(%rdi)
 | |
| ; AVX1-NEXT:    shrl $16, %edx
 | |
| ; AVX1-NEXT:    movb %dl, 5(%rdi)
 | |
| ; AVX1-NEXT:    shrl $16, %esi
 | |
| ; AVX1-NEXT:    movb %sil, 2(%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v4i24:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpslld $8, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpsrad $8, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpslld $8, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpsrad $8, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpslld $8, %xmm1, %xmm0
 | |
| ; AVX2-NEXT:    vpsrad $8, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpextrd $3, %xmm1, %eax
 | |
| ; AVX2-NEXT:    movw %ax, 9(%rdi)
 | |
| ; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
 | |
| ; AVX2-NEXT:    movw %cx, 6(%rdi)
 | |
| ; AVX2-NEXT:    vpextrd $1, %xmm1, %edx
 | |
| ; AVX2-NEXT:    movw %dx, 3(%rdi)
 | |
| ; AVX2-NEXT:    vmovd %xmm1, %esi
 | |
| ; AVX2-NEXT:    movw %si, (%rdi)
 | |
| ; AVX2-NEXT:    shrl $16, %eax
 | |
| ; AVX2-NEXT:    movb %al, 11(%rdi)
 | |
| ; AVX2-NEXT:    shrl $16, %ecx
 | |
| ; AVX2-NEXT:    movb %cl, 8(%rdi)
 | |
| ; AVX2-NEXT:    shrl $16, %edx
 | |
| ; AVX2-NEXT:    movb %dl, 5(%rdi)
 | |
| ; AVX2-NEXT:    shrl $16, %esi
 | |
| ; AVX2-NEXT:    movb %sil, 2(%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v4i24:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpslld $8, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vpsrad $8, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vpslld $8, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpsrad $8, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | |
| ; AVX512-NEXT:    vpslld $8, %xmm1, %xmm0
 | |
| ; AVX512-NEXT:    vpsrad $8, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vpextrd $3, %xmm1, %eax
 | |
| ; AVX512-NEXT:    movw %ax, 9(%rdi)
 | |
| ; AVX512-NEXT:    vpextrd $2, %xmm1, %ecx
 | |
| ; AVX512-NEXT:    movw %cx, 6(%rdi)
 | |
| ; AVX512-NEXT:    vpextrd $1, %xmm1, %edx
 | |
| ; AVX512-NEXT:    movw %dx, 3(%rdi)
 | |
| ; AVX512-NEXT:    vmovd %xmm1, %esi
 | |
| ; AVX512-NEXT:    movw %si, (%rdi)
 | |
| ; AVX512-NEXT:    shrl $16, %eax
 | |
| ; AVX512-NEXT:    movb %al, 11(%rdi)
 | |
| ; AVX512-NEXT:    shrl $16, %ecx
 | |
| ; AVX512-NEXT:    movb %cl, 8(%rdi)
 | |
| ; AVX512-NEXT:    shrl $16, %edx
 | |
| ; AVX512-NEXT:    movb %dl, 5(%rdi)
 | |
| ; AVX512-NEXT:    shrl $16, %esi
 | |
| ; AVX512-NEXT:    movb %sil, 2(%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<4 x i24>, <4 x i1>} @llvm.ssub.with.overflow.v4i24(<4 x i24> %a0, <4 x i24> %a1)
 | |
|   %val = extractvalue {<4 x i24>, <4 x i1>} %t, 0
 | |
|   %obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1
 | |
|   %res = sext <4 x i1> %obit to <4 x i32>
 | |
|   store <4 x i24> %val, <4 x i24>* %p2
 | |
|   ret <4 x i32> %res
 | |
| }
 | |
| 
 | |
| define <4 x i32> @ssubo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind {
 | |
| ; SSE-LABEL: ssubo_v4i1:
 | |
| ; SSE:       # %bb.0:
 | |
| ; SSE-NEXT:    pslld $31, %xmm1
 | |
| ; SSE-NEXT:    psrad $31, %xmm1
 | |
| ; SSE-NEXT:    pslld $31, %xmm0
 | |
| ; SSE-NEXT:    psrad $31, %xmm0
 | |
| ; SSE-NEXT:    psubd %xmm1, %xmm0
 | |
| ; SSE-NEXT:    movdqa %xmm0, %xmm1
 | |
| ; SSE-NEXT:    pslld $31, %xmm1
 | |
| ; SSE-NEXT:    movmskps %xmm1, %eax
 | |
| ; SSE-NEXT:    psrad $31, %xmm1
 | |
| ; SSE-NEXT:    pcmpeqd %xmm0, %xmm1
 | |
| ; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
 | |
| ; SSE-NEXT:    pxor %xmm0, %xmm1
 | |
| ; SSE-NEXT:    movb %al, (%rdi)
 | |
| ; SSE-NEXT:    movdqa %xmm1, %xmm0
 | |
| ; SSE-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v4i1:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 | |
| ; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vpslld $31, %xmm0, %xmm1
 | |
| ; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm2
 | |
| ; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
 | |
| ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 | |
| ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    vmovmskps %xmm1, %eax
 | |
| ; AVX1-NEXT:    movb %al, (%rdi)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v4i1:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    vpslld $31, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm1
 | |
| ; AVX2-NEXT:    vpslld $31, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vpslld $31, %xmm0, %xmm1
 | |
| ; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm2
 | |
| ; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
 | |
| ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 | |
| ; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    vmovmskps %xmm1, %eax
 | |
| ; AVX2-NEXT:    movb %al, (%rdi)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v4i1:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    vpslld $31, %xmm1, %xmm1
 | |
| ; AVX512-NEXT:    vptestmd %xmm1, %xmm1, %k0
 | |
| ; AVX512-NEXT:    vpslld $31, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vptestmd %xmm0, %xmm0, %k1
 | |
| ; AVX512-NEXT:    vptestnmd %xmm1, %xmm1, %k2 {%k1}
 | |
| ; AVX512-NEXT:    kxorw %k0, %k1, %k0
 | |
| ; AVX512-NEXT:    kxorw %k2, %k0, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    kshiftlw $12, %k0, %k0
 | |
| ; AVX512-NEXT:    kshiftrw $12, %k0, %k0
 | |
| ; AVX512-NEXT:    kmovd %k0, %eax
 | |
| ; AVX512-NEXT:    movb %al, (%rdi)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<4 x i1>, <4 x i1>} @llvm.ssub.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1)
 | |
|   %val = extractvalue {<4 x i1>, <4 x i1>} %t, 0
 | |
|   %obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1
 | |
|   %res = sext <4 x i1> %obit to <4 x i32>
 | |
|   store <4 x i1> %val, <4 x i1>* %p2
 | |
|   ret <4 x i32> %res
 | |
| }
 | |
| 
 | |
| define <2 x i32> @ssubo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind {
 | |
| ; SSE2-LABEL: ssubo_v2i128:
 | |
| ; SSE2:       # %bb.0:
 | |
| ; SSE2-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 | |
| ; SSE2-NEXT:    subq %r8, %rdi
 | |
| ; SSE2-NEXT:    sbbq %r9, %rsi
 | |
| ; SSE2-NEXT:    seto %r8b
 | |
| ; SSE2-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
 | |
| ; SSE2-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
 | |
| ; SSE2-NEXT:    seto %al
 | |
| ; SSE2-NEXT:    movzbl %al, %eax
 | |
| ; SSE2-NEXT:    negl %eax
 | |
| ; SSE2-NEXT:    movd %eax, %xmm1
 | |
| ; SSE2-NEXT:    movzbl %r8b, %eax
 | |
| ; SSE2-NEXT:    negl %eax
 | |
| ; SSE2-NEXT:    movd %eax, %xmm0
 | |
| ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 | |
| ; SSE2-NEXT:    movq %rdx, 16(%r10)
 | |
| ; SSE2-NEXT:    movq %rdi, (%r10)
 | |
| ; SSE2-NEXT:    movq %rcx, 24(%r10)
 | |
| ; SSE2-NEXT:    movq %rsi, 8(%r10)
 | |
| ; SSE2-NEXT:    retq
 | |
| ;
 | |
| ; SSSE3-LABEL: ssubo_v2i128:
 | |
| ; SSSE3:       # %bb.0:
 | |
| ; SSSE3-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 | |
| ; SSSE3-NEXT:    subq %r8, %rdi
 | |
| ; SSSE3-NEXT:    sbbq %r9, %rsi
 | |
| ; SSSE3-NEXT:    seto %r8b
 | |
| ; SSSE3-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
 | |
| ; SSSE3-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
 | |
| ; SSSE3-NEXT:    seto %al
 | |
| ; SSSE3-NEXT:    movzbl %al, %eax
 | |
| ; SSSE3-NEXT:    negl %eax
 | |
| ; SSSE3-NEXT:    movd %eax, %xmm1
 | |
| ; SSSE3-NEXT:    movzbl %r8b, %eax
 | |
| ; SSSE3-NEXT:    negl %eax
 | |
| ; SSSE3-NEXT:    movd %eax, %xmm0
 | |
| ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 | |
| ; SSSE3-NEXT:    movq %rdx, 16(%r10)
 | |
| ; SSSE3-NEXT:    movq %rdi, (%r10)
 | |
| ; SSSE3-NEXT:    movq %rcx, 24(%r10)
 | |
| ; SSSE3-NEXT:    movq %rsi, 8(%r10)
 | |
| ; SSSE3-NEXT:    retq
 | |
| ;
 | |
| ; SSE41-LABEL: ssubo_v2i128:
 | |
| ; SSE41:       # %bb.0:
 | |
| ; SSE41-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 | |
| ; SSE41-NEXT:    subq %r8, %rdi
 | |
| ; SSE41-NEXT:    sbbq %r9, %rsi
 | |
| ; SSE41-NEXT:    seto %r8b
 | |
| ; SSE41-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
 | |
| ; SSE41-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
 | |
| ; SSE41-NEXT:    seto %al
 | |
| ; SSE41-NEXT:    movzbl %al, %r9d
 | |
| ; SSE41-NEXT:    negl %r9d
 | |
| ; SSE41-NEXT:    movzbl %r8b, %eax
 | |
| ; SSE41-NEXT:    negl %eax
 | |
| ; SSE41-NEXT:    movd %eax, %xmm0
 | |
| ; SSE41-NEXT:    pinsrd $1, %r9d, %xmm0
 | |
| ; SSE41-NEXT:    movq %rdx, 16(%r10)
 | |
| ; SSE41-NEXT:    movq %rdi, (%r10)
 | |
| ; SSE41-NEXT:    movq %rcx, 24(%r10)
 | |
| ; SSE41-NEXT:    movq %rsi, 8(%r10)
 | |
| ; SSE41-NEXT:    retq
 | |
| ;
 | |
| ; AVX1-LABEL: ssubo_v2i128:
 | |
| ; AVX1:       # %bb.0:
 | |
| ; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 | |
| ; AVX1-NEXT:    subq %r8, %rdi
 | |
| ; AVX1-NEXT:    sbbq %r9, %rsi
 | |
| ; AVX1-NEXT:    seto %r8b
 | |
| ; AVX1-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
 | |
| ; AVX1-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
 | |
| ; AVX1-NEXT:    seto %al
 | |
| ; AVX1-NEXT:    movzbl %al, %r9d
 | |
| ; AVX1-NEXT:    negl %r9d
 | |
| ; AVX1-NEXT:    movzbl %r8b, %eax
 | |
| ; AVX1-NEXT:    negl %eax
 | |
| ; AVX1-NEXT:    vmovd %eax, %xmm0
 | |
| ; AVX1-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
 | |
| ; AVX1-NEXT:    movq %rdx, 16(%r10)
 | |
| ; AVX1-NEXT:    movq %rdi, (%r10)
 | |
| ; AVX1-NEXT:    movq %rcx, 24(%r10)
 | |
| ; AVX1-NEXT:    movq %rsi, 8(%r10)
 | |
| ; AVX1-NEXT:    retq
 | |
| ;
 | |
| ; AVX2-LABEL: ssubo_v2i128:
 | |
| ; AVX2:       # %bb.0:
 | |
| ; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 | |
| ; AVX2-NEXT:    subq %r8, %rdi
 | |
| ; AVX2-NEXT:    sbbq %r9, %rsi
 | |
| ; AVX2-NEXT:    seto %r8b
 | |
| ; AVX2-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
 | |
| ; AVX2-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
 | |
| ; AVX2-NEXT:    seto %al
 | |
| ; AVX2-NEXT:    movzbl %al, %r9d
 | |
| ; AVX2-NEXT:    negl %r9d
 | |
| ; AVX2-NEXT:    movzbl %r8b, %eax
 | |
| ; AVX2-NEXT:    negl %eax
 | |
| ; AVX2-NEXT:    vmovd %eax, %xmm0
 | |
| ; AVX2-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
 | |
| ; AVX2-NEXT:    movq %rdx, 16(%r10)
 | |
| ; AVX2-NEXT:    movq %rdi, (%r10)
 | |
| ; AVX2-NEXT:    movq %rcx, 24(%r10)
 | |
| ; AVX2-NEXT:    movq %rsi, 8(%r10)
 | |
| ; AVX2-NEXT:    retq
 | |
| ;
 | |
| ; AVX512-LABEL: ssubo_v2i128:
 | |
| ; AVX512:       # %bb.0:
 | |
| ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 | |
| ; AVX512-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
 | |
| ; AVX512-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
 | |
| ; AVX512-NEXT:    seto %al
 | |
| ; AVX512-NEXT:    kmovd %eax, %k0
 | |
| ; AVX512-NEXT:    subq %r8, %rdi
 | |
| ; AVX512-NEXT:    sbbq %r9, %rsi
 | |
| ; AVX512-NEXT:    seto %al
 | |
| ; AVX512-NEXT:    andl $1, %eax
 | |
| ; AVX512-NEXT:    kmovw %eax, %k1
 | |
| ; AVX512-NEXT:    kshiftlw $1, %k0, %k0
 | |
| ; AVX512-NEXT:    korw %k0, %k1, %k1
 | |
| ; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 | |
| ; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 | |
| ; AVX512-NEXT:    movq %rdx, 16(%r10)
 | |
| ; AVX512-NEXT:    movq %rdi, (%r10)
 | |
| ; AVX512-NEXT:    movq %rcx, 24(%r10)
 | |
| ; AVX512-NEXT:    movq %rsi, 8(%r10)
 | |
| ; AVX512-NEXT:    retq
 | |
|   %t = call {<2 x i128>, <2 x i1>} @llvm.ssub.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1)
 | |
|   %val = extractvalue {<2 x i128>, <2 x i1>} %t, 0
 | |
|   %obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1
 | |
|   %res = sext <2 x i1> %obit to <2 x i32>
 | |
|   store <2 x i128> %val, <2 x i128>* %p2
 | |
|   ret <2 x i32> %res
 | |
| }
 |