2130 lines
		
	
	
		
			78 KiB
		
	
	
	
		
			LLVM
		
	
	
	
			
		
		
	
	
			2130 lines
		
	
	
		
			78 KiB
		
	
	
	
		
			LLVM
		
	
	
	
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 | 
						|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
 | 
						|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
 | 
						|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
 | 
						|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
 | 
						|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
 | 
						|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
 | 
						|
 | 
						|
declare <1 x i8> @llvm.ssub.sat.v1i8(<1 x i8>, <1 x i8>)
 | 
						|
declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
 | 
						|
declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>)
 | 
						|
declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>)
 | 
						|
declare <12 x i8> @llvm.ssub.sat.v12i8(<12 x i8>, <12 x i8>)
 | 
						|
declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
 | 
						|
declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
 | 
						|
declare <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8>, <64 x i8>)
 | 
						|
 | 
						|
declare <1 x i16> @llvm.ssub.sat.v1i16(<1 x i16>, <1 x i16>)
 | 
						|
declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>)
 | 
						|
declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>)
 | 
						|
declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
 | 
						|
declare <12 x i16> @llvm.ssub.sat.v12i16(<12 x i16>, <12 x i16>)
 | 
						|
declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
 | 
						|
declare <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16>, <32 x i16>)
 | 
						|
 | 
						|
declare <16 x i1> @llvm.ssub.sat.v16i1(<16 x i1>, <16 x i1>)
 | 
						|
declare <16 x i4> @llvm.ssub.sat.v16i4(<16 x i4>, <16 x i4>)
 | 
						|
 | 
						|
declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>)
 | 
						|
declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
 | 
						|
declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
 | 
						|
declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>)
 | 
						|
declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
 | 
						|
declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
 | 
						|
declare <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>)
 | 
						|
 | 
						|
declare <4 x i24> @llvm.ssub.sat.v4i24(<4 x i24>, <4 x i24>)
 | 
						|
declare <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128>, <2 x i128>)
 | 
						|
 | 
						|
; Legal types, depending on architecture.
 | 
						|
 | 
						|
define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
 | 
						|
; SSE-LABEL: v16i8:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v16i8:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %z = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
 | 
						|
  ret <16 x i8> %z
 | 
						|
}
 | 
						|
 | 
						|
define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) nounwind {
 | 
						|
; SSE-LABEL: v32i8:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psubsb %xmm2, %xmm0
 | 
						|
; SSE-NEXT:    psubsb %xmm3, %xmm1
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v32i8:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 | 
						|
; AVX1-NEXT:    vpsubsb %xmm2, %xmm3, %xmm2
 | 
						|
; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v32i8:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v32i8:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpsubsb %ymm1, %ymm0, %ymm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %x, <32 x i8> %y)
 | 
						|
  ret <32 x i8> %z
 | 
						|
}
 | 
						|
 | 
						|
define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) nounwind {
 | 
						|
; SSE-LABEL: v64i8:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psubsb %xmm4, %xmm0
 | 
						|
; SSE-NEXT:    psubsb %xmm5, %xmm1
 | 
						|
; SSE-NEXT:    psubsb %xmm6, %xmm2
 | 
						|
; SSE-NEXT:    psubsb %xmm7, %xmm3
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v64i8:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 | 
						|
; AVX1-NEXT:    vpsubsb %xmm4, %xmm5, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubsb %xmm2, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubsb %xmm2, %xmm4, %xmm2
 | 
						|
; AVX1-NEXT:    vpsubsb %xmm3, %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v64i8:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpsubsb %ymm2, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    vpsubsb %ymm3, %ymm1, %ymm1
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v64i8:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpsubsb %zmm1, %zmm0, %zmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %x, <64 x i8> %y)
 | 
						|
  ret <64 x i8> %z
 | 
						|
}
 | 
						|
 | 
						|
define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
 | 
						|
; SSE-LABEL: v8i16:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psubsw %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v8i16:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %z = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
 | 
						|
  ret <8 x i16> %z
 | 
						|
}
 | 
						|
 | 
						|
define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) nounwind {
 | 
						|
; SSE-LABEL: v16i16:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psubsw %xmm2, %xmm0
 | 
						|
; SSE-NEXT:    psubsw %xmm3, %xmm1
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v16i16:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 | 
						|
; AVX1-NEXT:    vpsubsw %xmm2, %xmm3, %xmm2
 | 
						|
; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v16i16:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v16i16:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpsubsw %ymm1, %ymm0, %ymm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %x, <16 x i16> %y)
 | 
						|
  ret <16 x i16> %z
 | 
						|
}
 | 
						|
 | 
						|
define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
 | 
						|
; SSE-LABEL: v32i16:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psubsw %xmm4, %xmm0
 | 
						|
; SSE-NEXT:    psubsw %xmm5, %xmm1
 | 
						|
; SSE-NEXT:    psubsw %xmm6, %xmm2
 | 
						|
; SSE-NEXT:    psubsw %xmm7, %xmm3
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v32i16:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 | 
						|
; AVX1-NEXT:    vpsubsw %xmm4, %xmm5, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubsw %xmm2, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubsw %xmm2, %xmm4, %xmm2
 | 
						|
; AVX1-NEXT:    vpsubsw %xmm3, %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v32i16:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpsubsw %ymm2, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    vpsubsw %ymm3, %ymm1, %ymm1
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v32i16:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpsubsw %zmm1, %zmm0, %zmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %x, <32 x i16> %y)
 | 
						|
  ret <32 x i16> %z
 | 
						|
}
 | 
						|
 | 
						|
; Too narrow vectors, legalized by widening.
 | 
						|
 | 
						|
define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
 | 
						|
; SSE-LABEL: v8i8:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 | 
						|
; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 | 
						|
; SSE-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    movq %xmm0, (%rdx)
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v8i8:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 | 
						|
; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 | 
						|
; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vmovq %xmm0, (%rdx)
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %x = load <8 x i8>, <8 x i8>* %px
 | 
						|
  %y = load <8 x i8>, <8 x i8>* %py
 | 
						|
  %z = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
 | 
						|
  store <8 x i8> %z, <8 x i8>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
 | 
						|
; SSE-LABEL: v4i8:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | 
						|
; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | 
						|
; SSE-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    movd %xmm0, (%rdx)
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v4i8:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | 
						|
; AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | 
						|
; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vmovd %xmm0, (%rdx)
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %x = load <4 x i8>, <4 x i8>* %px
 | 
						|
  %y = load <4 x i8>, <4 x i8>* %py
 | 
						|
  %z = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
 | 
						|
  store <4 x i8> %z, <4 x i8>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
 | 
						|
; SSE2-LABEL: v2i8:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    movzwl (%rdi), %eax
 | 
						|
; SSE2-NEXT:    movd %eax, %xmm0
 | 
						|
; SSE2-NEXT:    movzwl (%rsi), %eax
 | 
						|
; SSE2-NEXT:    movd %eax, %xmm1
 | 
						|
; SSE2-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE2-NEXT:    movd %xmm0, %eax
 | 
						|
; SSE2-NEXT:    movw %ax, (%rdx)
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v2i8:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    movzwl (%rdi), %eax
 | 
						|
; SSSE3-NEXT:    movd %eax, %xmm0
 | 
						|
; SSSE3-NEXT:    movzwl (%rsi), %eax
 | 
						|
; SSSE3-NEXT:    movd %eax, %xmm1
 | 
						|
; SSSE3-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSSE3-NEXT:    movd %xmm0, %eax
 | 
						|
; SSSE3-NEXT:    movw %ax, (%rdx)
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v2i8:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movzwl (%rdi), %eax
 | 
						|
; SSE41-NEXT:    movd %eax, %xmm0
 | 
						|
; SSE41-NEXT:    movzwl (%rsi), %eax
 | 
						|
; SSE41-NEXT:    movd %eax, %xmm1
 | 
						|
; SSE41-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE41-NEXT:    pextrw $0, %xmm0, (%rdx)
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v2i8:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    movzwl (%rdi), %eax
 | 
						|
; AVX-NEXT:    vmovd %eax, %xmm0
 | 
						|
; AVX-NEXT:    movzwl (%rsi), %eax
 | 
						|
; AVX-NEXT:    vmovd %eax, %xmm1
 | 
						|
; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vpextrw $0, %xmm0, (%rdx)
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %x = load <2 x i8>, <2 x i8>* %px
 | 
						|
  %y = load <2 x i8>, <2 x i8>* %py
 | 
						|
  %z = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
 | 
						|
  store <2 x i8> %z, <2 x i8>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
 | 
						|
; SSE-LABEL: v4i16:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 | 
						|
; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
 | 
						|
; SSE-NEXT:    psubsw %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    movq %xmm0, (%rdx)
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v4i16:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 | 
						|
; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 | 
						|
; AVX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vmovq %xmm0, (%rdx)
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %x = load <4 x i16>, <4 x i16>* %px
 | 
						|
  %y = load <4 x i16>, <4 x i16>* %py
 | 
						|
  %z = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
 | 
						|
  store <4 x i16> %z, <4 x i16>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
 | 
						|
; SSE-LABEL: v2i16:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | 
						|
; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | 
						|
; SSE-NEXT:    psubsw %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    movd %xmm0, (%rdx)
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v2i16:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 | 
						|
; AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 | 
						|
; AVX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vmovd %xmm0, (%rdx)
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %x = load <2 x i16>, <2 x i16>* %px
 | 
						|
  %y = load <2 x i16>, <2 x i16>* %py
 | 
						|
  %z = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
 | 
						|
  store <2 x i16> %z, <2 x i16>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
 | 
						|
; SSE-LABEL: v12i8:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v12i8:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %z = call <12 x i8> @llvm.ssub.sat.v12i8(<12 x i8> %x, <12 x i8> %y)
 | 
						|
  ret <12 x i8> %z
 | 
						|
}
 | 
						|
 | 
						|
define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
 | 
						|
; SSE-LABEL: v12i16:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    movdqa (%rdi), %xmm0
 | 
						|
; SSE-NEXT:    movdqa 16(%rdi), %xmm1
 | 
						|
; SSE-NEXT:    psubsw (%rsi), %xmm0
 | 
						|
; SSE-NEXT:    psubsw 16(%rsi), %xmm1
 | 
						|
; SSE-NEXT:    movq %xmm1, 16(%rdx)
 | 
						|
; SSE-NEXT:    movdqa %xmm0, (%rdx)
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v12i16:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
 | 
						|
; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
 | 
						|
; AVX1-NEXT:    vpsubsw 16(%rsi), %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vpsubsw (%rsi), %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vmovdqa %xmm0, (%rdx)
 | 
						|
; AVX1-NEXT:    vmovq %xmm1, 16(%rdx)
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v12i16:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 | 
						|
; AVX2-NEXT:    vpsubsw (%rsi), %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | 
						|
; AVX2-NEXT:    vmovq %xmm1, 16(%rdx)
 | 
						|
; AVX2-NEXT:    vmovdqa %xmm0, (%rdx)
 | 
						|
; AVX2-NEXT:    vzeroupper
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v12i16:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
 | 
						|
; AVX512-NEXT:    vpsubsw (%rsi), %ymm0, %ymm0
 | 
						|
; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 | 
						|
; AVX512-NEXT:    vmovq %xmm1, 16(%rdx)
 | 
						|
; AVX512-NEXT:    vmovdqa %xmm0, (%rdx)
 | 
						|
; AVX512-NEXT:    vzeroupper
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %x = load <12 x i16>, <12 x i16>* %px
 | 
						|
  %y = load <12 x i16>, <12 x i16>* %py
 | 
						|
  %z = call <12 x i16> @llvm.ssub.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
 | 
						|
  store <12 x i16> %z, <12 x i16>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
; Scalarization
 | 
						|
 | 
						|
define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
 | 
						|
; SSE-LABEL: v1i8:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    movb (%rdi), %al
 | 
						|
; SSE-NEXT:    movb (%rsi), %cl
 | 
						|
; SSE-NEXT:    xorl %esi, %esi
 | 
						|
; SSE-NEXT:    cmpb %cl, %al
 | 
						|
; SSE-NEXT:    setns %sil
 | 
						|
; SSE-NEXT:    addl $127, %esi
 | 
						|
; SSE-NEXT:    subb %cl, %al
 | 
						|
; SSE-NEXT:    movzbl %al, %eax
 | 
						|
; SSE-NEXT:    cmovol %esi, %eax
 | 
						|
; SSE-NEXT:    movb %al, (%rdx)
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v1i8:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    movb (%rdi), %al
 | 
						|
; AVX-NEXT:    movb (%rsi), %cl
 | 
						|
; AVX-NEXT:    xorl %esi, %esi
 | 
						|
; AVX-NEXT:    cmpb %cl, %al
 | 
						|
; AVX-NEXT:    setns %sil
 | 
						|
; AVX-NEXT:    addl $127, %esi
 | 
						|
; AVX-NEXT:    subb %cl, %al
 | 
						|
; AVX-NEXT:    movzbl %al, %eax
 | 
						|
; AVX-NEXT:    cmovol %esi, %eax
 | 
						|
; AVX-NEXT:    movb %al, (%rdx)
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %x = load <1 x i8>, <1 x i8>* %px
 | 
						|
  %y = load <1 x i8>, <1 x i8>* %py
 | 
						|
  %z = call <1 x i8> @llvm.ssub.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
 | 
						|
  store <1 x i8> %z, <1 x i8>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
 | 
						|
; SSE-LABEL: v1i16:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    movzwl (%rdi), %eax
 | 
						|
; SSE-NEXT:    movzwl (%rsi), %ecx
 | 
						|
; SSE-NEXT:    xorl %esi, %esi
 | 
						|
; SSE-NEXT:    cmpw %cx, %ax
 | 
						|
; SSE-NEXT:    setns %sil
 | 
						|
; SSE-NEXT:    addl $32767, %esi # imm = 0x7FFF
 | 
						|
; SSE-NEXT:    subw %cx, %ax
 | 
						|
; SSE-NEXT:    cmovol %esi, %eax
 | 
						|
; SSE-NEXT:    movw %ax, (%rdx)
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v1i16:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    movzwl (%rdi), %eax
 | 
						|
; AVX-NEXT:    movzwl (%rsi), %ecx
 | 
						|
; AVX-NEXT:    xorl %esi, %esi
 | 
						|
; AVX-NEXT:    cmpw %cx, %ax
 | 
						|
; AVX-NEXT:    setns %sil
 | 
						|
; AVX-NEXT:    addl $32767, %esi # imm = 0x7FFF
 | 
						|
; AVX-NEXT:    subw %cx, %ax
 | 
						|
; AVX-NEXT:    cmovol %esi, %eax
 | 
						|
; AVX-NEXT:    movw %ax, (%rdx)
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %x = load <1 x i16>, <1 x i16>* %px
 | 
						|
  %y = load <1 x i16>, <1 x i16>* %py
 | 
						|
  %z = call <1 x i16> @llvm.ssub.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
 | 
						|
  store <1 x i16> %z, <1 x i16>* %pz
 | 
						|
  ret void
 | 
						|
}
 | 
						|
 | 
						|
; Promotion
 | 
						|
 | 
						|
define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) nounwind {
 | 
						|
; SSE-LABEL: v16i4:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psllw $4, %xmm1
 | 
						|
; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
 | 
						|
; SSE-NEXT:    pand %xmm2, %xmm1
 | 
						|
; SSE-NEXT:    psllw $4, %xmm0
 | 
						|
; SSE-NEXT:    pand %xmm2, %xmm0
 | 
						|
; SSE-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    psrlw $4, %xmm0
 | 
						|
; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
 | 
						|
; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
 | 
						|
; SSE-NEXT:    pxor %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    psubb %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v16i4:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    vpsllw $4, %xmm1, %xmm1
 | 
						|
; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
 | 
						|
; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
 | 
						|
; AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
 | 
						|
; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %z = call <16 x i4> @llvm.ssub.sat.v16i4(<16 x i4> %x, <16 x i4> %y)
 | 
						|
  ret <16 x i4> %z
 | 
						|
}
 | 
						|
 | 
						|
define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) nounwind {
 | 
						|
; SSE-LABEL: v16i1:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    psllw $7, %xmm1
 | 
						|
; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 | 
						|
; SSE-NEXT:    pand %xmm2, %xmm1
 | 
						|
; SSE-NEXT:    psllw $7, %xmm0
 | 
						|
; SSE-NEXT:    pand %xmm2, %xmm0
 | 
						|
; SSE-NEXT:    psubsb %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    pxor %xmm1, %xmm1
 | 
						|
; SSE-NEXT:    pcmpgtb %xmm0, %xmm1
 | 
						|
; SSE-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v16i1:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 | 
						|
; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v16i1:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpsllw $7, %xmm1, %xmm1
 | 
						|
; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 | 
						|
; AVX2-NEXT:    vpand %xmm2, %xmm1, %xmm1
 | 
						|
; AVX2-NEXT:    vpsllw $7, %xmm0, %xmm0
 | 
						|
; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
 | 
						|
; AVX2-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0
 | 
						|
; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | 
						|
; AVX2-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v16i1:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpsllw $7, %xmm0, %xmm0
 | 
						|
; AVX512-NEXT:    vpmovb2m %xmm0, %k0
 | 
						|
; AVX512-NEXT:    vpsllw $7, %xmm1, %xmm0
 | 
						|
; AVX512-NEXT:    vpmovb2m %xmm0, %k1
 | 
						|
; AVX512-NEXT:    kandnw %k0, %k1, %k0
 | 
						|
; AVX512-NEXT:    vpmovm2b %k0, %xmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <16 x i1> @llvm.ssub.sat.v16i1(<16 x i1> %x, <16 x i1> %y)
 | 
						|
  ret <16 x i1> %z
 | 
						|
}
 | 
						|
 | 
						|
; Expanded
 | 
						|
 | 
						|
define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
 | 
						|
; SSE2-LABEL: v2i32:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    pxor %xmm2, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm3
 | 
						|
; SSE2-NEXT:    psubd %xmm1, %xmm3
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm3, %xmm0
 | 
						|
; SSE2-NEXT:    pxor %xmm1, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm3, %xmm1
 | 
						|
; SSE2-NEXT:    pandn {{.*}}(%rip), %xmm1
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm3, %xmm2
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm2
 | 
						|
; SSE2-NEXT:    por %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    pand %xmm0, %xmm1
 | 
						|
; SSE2-NEXT:    pandn %xmm3, %xmm0
 | 
						|
; SSE2-NEXT:    por %xmm1, %xmm0
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v2i32:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    pxor %xmm2, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm3
 | 
						|
; SSSE3-NEXT:    psubd %xmm1, %xmm3
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
 | 
						|
; SSSE3-NEXT:    pxor %xmm1, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm3, %xmm1
 | 
						|
; SSSE3-NEXT:    pandn {{.*}}(%rip), %xmm1
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm2
 | 
						|
; SSSE3-NEXT:    por %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    pand %xmm0, %xmm1
 | 
						|
; SSSE3-NEXT:    pandn %xmm3, %xmm0
 | 
						|
; SSSE3-NEXT:    por %xmm1, %xmm0
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v2i32:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm2
 | 
						|
; SSE41-NEXT:    pxor %xmm0, %xmm0
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm3
 | 
						|
; SSE41-NEXT:    psubd %xmm1, %xmm3
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm3, %xmm2
 | 
						|
; SSE41-NEXT:    pxor %xmm1, %xmm2
 | 
						|
; SSE41-NEXT:    movaps {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, {{.*}}(%rip), %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm1, %xmm3
 | 
						|
; SSE41-NEXT:    movaps %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v2i32:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | 
						|
; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | 
						|
; AVX1-NEXT:    vmovaps {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX1-NEXT:    vblendvps %xmm1, {{.*}}(%rip), %xmm2, %xmm2
 | 
						|
; AVX1-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v2i32:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | 
						|
; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | 
						|
; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | 
						|
; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX2-NEXT:    vblendvps %xmm1, %xmm2, %xmm3, %xmm2
 | 
						|
; AVX2-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v2i32:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX512-NEXT:    vpcmpgtd %xmm2, %xmm1, %k0
 | 
						|
; AVX512-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
 | 
						|
; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm2, %k2
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k2}
 | 
						|
; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
 | 
						|
; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %x, <2 x i32> %y)
 | 
						|
  ret <2 x i32> %z
 | 
						|
}
 | 
						|
 | 
						|
define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
 | 
						|
; SSE2-LABEL: v4i32:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    pxor %xmm2, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm3
 | 
						|
; SSE2-NEXT:    psubd %xmm1, %xmm3
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm3, %xmm0
 | 
						|
; SSE2-NEXT:    pxor %xmm1, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm3, %xmm1
 | 
						|
; SSE2-NEXT:    pandn {{.*}}(%rip), %xmm1
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm3, %xmm2
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm2
 | 
						|
; SSE2-NEXT:    por %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    pand %xmm0, %xmm1
 | 
						|
; SSE2-NEXT:    pandn %xmm3, %xmm0
 | 
						|
; SSE2-NEXT:    por %xmm1, %xmm0
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v4i32:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    pxor %xmm2, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm3
 | 
						|
; SSSE3-NEXT:    psubd %xmm1, %xmm3
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
 | 
						|
; SSSE3-NEXT:    pxor %xmm1, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm3, %xmm1
 | 
						|
; SSSE3-NEXT:    pandn {{.*}}(%rip), %xmm1
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm2
 | 
						|
; SSSE3-NEXT:    por %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    pand %xmm0, %xmm1
 | 
						|
; SSSE3-NEXT:    pandn %xmm3, %xmm0
 | 
						|
; SSSE3-NEXT:    por %xmm1, %xmm0
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v4i32:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm2
 | 
						|
; SSE41-NEXT:    pxor %xmm0, %xmm0
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm3
 | 
						|
; SSE41-NEXT:    psubd %xmm1, %xmm3
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm3, %xmm2
 | 
						|
; SSE41-NEXT:    pxor %xmm1, %xmm2
 | 
						|
; SSE41-NEXT:    movaps {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, {{.*}}(%rip), %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm1, %xmm3
 | 
						|
; SSE41-NEXT:    movaps %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v4i32:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | 
						|
; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | 
						|
; AVX1-NEXT:    vmovaps {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX1-NEXT:    vblendvps %xmm1, {{.*}}(%rip), %xmm2, %xmm2
 | 
						|
; AVX1-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v4i32:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
 | 
						|
; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | 
						|
; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | 
						|
; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm2 = [2147483647,2147483647,2147483647,2147483647]
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX2-NEXT:    vblendvps %xmm1, %xmm2, %xmm3, %xmm2
 | 
						|
; AVX2-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v4i32:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX512-NEXT:    vpcmpgtd %xmm2, %xmm1, %k0
 | 
						|
; AVX512-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
 | 
						|
; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm2, %k2
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k2}
 | 
						|
; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
 | 
						|
; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
 | 
						|
  ret <4 x i32> %z
 | 
						|
}
 | 
						|
 | 
						|
define <8 x i32> @v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
 | 
						|
; SSE2-LABEL: v8i32:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm5
 | 
						|
; SSE2-NEXT:    psubd %xmm2, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm2
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm5, %xmm0
 | 
						|
; SSE2-NEXT:    pxor %xmm2, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSE2-NEXT:    movdqa %xmm5, %xmm6
 | 
						|
; SSE2-NEXT:    pandn %xmm2, %xmm6
 | 
						|
; SSE2-NEXT:    pxor %xmm7, %xmm7
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm5, %xmm7
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm7
 | 
						|
; SSE2-NEXT:    por %xmm7, %xmm6
 | 
						|
; SSE2-NEXT:    pand %xmm0, %xmm6
 | 
						|
; SSE2-NEXT:    pandn %xmm5, %xmm0
 | 
						|
; SSE2-NEXT:    por %xmm6, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm5
 | 
						|
; SSE2-NEXT:    psubd %xmm3, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm3
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm5, %xmm1
 | 
						|
; SSE2-NEXT:    pxor %xmm3, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm5, %xmm3
 | 
						|
; SSE2-NEXT:    pandn %xmm2, %xmm3
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm4
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm3
 | 
						|
; SSE2-NEXT:    pand %xmm1, %xmm3
 | 
						|
; SSE2-NEXT:    pandn %xmm5, %xmm1
 | 
						|
; SSE2-NEXT:    por %xmm3, %xmm1
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v8i32:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm5
 | 
						|
; SSSE3-NEXT:    psubd %xmm2, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm2
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm0
 | 
						|
; SSSE3-NEXT:    pxor %xmm2, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSSE3-NEXT:    movdqa %xmm5, %xmm6
 | 
						|
; SSSE3-NEXT:    pandn %xmm2, %xmm6
 | 
						|
; SSSE3-NEXT:    pxor %xmm7, %xmm7
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm7
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm7
 | 
						|
; SSSE3-NEXT:    por %xmm7, %xmm6
 | 
						|
; SSSE3-NEXT:    pand %xmm0, %xmm6
 | 
						|
; SSSE3-NEXT:    pandn %xmm5, %xmm0
 | 
						|
; SSSE3-NEXT:    por %xmm6, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm5
 | 
						|
; SSSE3-NEXT:    psubd %xmm3, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm3
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm1
 | 
						|
; SSSE3-NEXT:    pxor %xmm3, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm5, %xmm3
 | 
						|
; SSSE3-NEXT:    pandn %xmm2, %xmm3
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm4
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm3
 | 
						|
; SSSE3-NEXT:    pand %xmm1, %xmm3
 | 
						|
; SSSE3-NEXT:    pandn %xmm5, %xmm1
 | 
						|
; SSSE3-NEXT:    por %xmm3, %xmm1
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v8i32:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm8, %xmm8
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm5
 | 
						|
; SSE41-NEXT:    psubd %xmm2, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm8, %xmm2
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm5, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm2, %xmm4
 | 
						|
; SSE41-NEXT:    movaps {{.*#+}} xmm7 = [2147483647,2147483647,2147483647,2147483647]
 | 
						|
; SSE41-NEXT:    movaps {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSE41-NEXT:    movaps %xmm6, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm5, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm7, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm2, %xmm5
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm2
 | 
						|
; SSE41-NEXT:    psubd %xmm3, %xmm2
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm8, %xmm3
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm2, %xmm1
 | 
						|
; SSE41-NEXT:    pxor %xmm3, %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm7, %xmm6
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm6, %xmm2
 | 
						|
; SSE41-NEXT:    movaps %xmm5, %xmm0
 | 
						|
; SSE41-NEXT:    movaps %xmm2, %xmm1
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v8i32:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | 
						|
; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm4
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm1, %xmm3
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm4, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    vxorps %ymm0, %ymm3, %ymm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 | 
						|
; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX1-NEXT:    vblendvps %ymm1, {{.*}}(%rip), %ymm2, %ymm2
 | 
						|
; AVX1-NEXT:    vblendvps %ymm0, %ymm2, %ymm1, %ymm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v8i32:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm1, %ymm2
 | 
						|
; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm1
 | 
						|
; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    vpxor %ymm0, %ymm2, %ymm0
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647]
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} ymm3 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX2-NEXT:    vblendvps %ymm1, %ymm2, %ymm3, %ymm2
 | 
						|
; AVX2-NEXT:    vblendvps %ymm0, %ymm2, %ymm1, %ymm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v8i32:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX512-NEXT:    vpcmpgtd %ymm2, %ymm1, %k0
 | 
						|
; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm0, %k1
 | 
						|
; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm2, %k2
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k2}
 | 
						|
; AVX512-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k1}
 | 
						|
; AVX512-NEXT:    vmovdqa %ymm1, %ymm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %x, <8 x i32> %y)
 | 
						|
  ret <8 x i32> %z
 | 
						|
}
 | 
						|
 | 
						|
define <16 x i32> @v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
 | 
						|
; SSE2-LABEL: v16i32:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm8
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm9
 | 
						|
; SSE2-NEXT:    psubd %xmm4, %xmm9
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm9, %xmm0
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSE2-NEXT:    movdqa %xmm9, %xmm11
 | 
						|
; SSE2-NEXT:    pandn %xmm10, %xmm11
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm9, %xmm4
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm4
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm11
 | 
						|
; SSE2-NEXT:    pand %xmm0, %xmm11
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm0
 | 
						|
; SSE2-NEXT:    por %xmm11, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm9
 | 
						|
; SSE2-NEXT:    psubd %xmm5, %xmm9
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm9, %xmm1
 | 
						|
; SSE2-NEXT:    pxor %xmm5, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm9, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm10, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm9, %xmm4
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm4
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pand %xmm1, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm1
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm2, %xmm4
 | 
						|
; SSE2-NEXT:    psubd %xmm6, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm6
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm2
 | 
						|
; SSE2-NEXT:    pxor %xmm6, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm10, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm6, %xmm6
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm6
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm6
 | 
						|
; SSE2-NEXT:    por %xmm6, %xmm5
 | 
						|
; SSE2-NEXT:    pand %xmm2, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm4, %xmm2
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm3, %xmm4
 | 
						|
; SSE2-NEXT:    psubd %xmm7, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm7
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm3
 | 
						|
; SSE2-NEXT:    pxor %xmm7, %xmm3
 | 
						|
; SSE2-NEXT:    movdqa %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm10, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm8
 | 
						|
; SSE2-NEXT:    psrld $1, %xmm8
 | 
						|
; SSE2-NEXT:    por %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    pand %xmm3, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm4, %xmm3
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm3
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v16i32:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm8
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm9
 | 
						|
; SSSE3-NEXT:    psubd %xmm4, %xmm9
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm9, %xmm0
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSSE3-NEXT:    movdqa %xmm9, %xmm11
 | 
						|
; SSSE3-NEXT:    pandn %xmm10, %xmm11
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm9, %xmm4
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm4
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm11
 | 
						|
; SSSE3-NEXT:    pand %xmm0, %xmm11
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm0
 | 
						|
; SSSE3-NEXT:    por %xmm11, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm9
 | 
						|
; SSSE3-NEXT:    psubd %xmm5, %xmm9
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm9, %xmm1
 | 
						|
; SSSE3-NEXT:    pxor %xmm5, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm9, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm10, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm9, %xmm4
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm4
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pand %xmm1, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm1
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm2, %xmm4
 | 
						|
; SSSE3-NEXT:    psubd %xmm6, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm6
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm2
 | 
						|
; SSSE3-NEXT:    pxor %xmm6, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm10, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm6, %xmm6
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm6
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm6
 | 
						|
; SSSE3-NEXT:    por %xmm6, %xmm5
 | 
						|
; SSSE3-NEXT:    pand %xmm2, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm4, %xmm2
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm3, %xmm4
 | 
						|
; SSSE3-NEXT:    psubd %xmm7, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm7
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm3
 | 
						|
; SSSE3-NEXT:    pxor %xmm7, %xmm3
 | 
						|
; SSSE3-NEXT:    movdqa %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm10, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm8
 | 
						|
; SSSE3-NEXT:    psrld $1, %xmm8
 | 
						|
; SSSE3-NEXT:    por %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    pand %xmm3, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm4, %xmm3
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm3
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v16i32:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm8
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm3
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm10
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm9
 | 
						|
; SSE41-NEXT:    psubd %xmm4, %xmm9
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
 | 
						|
; SSE41-NEXT:    pxor %xmm4, %xmm3
 | 
						|
; SSE41-NEXT:    movaps {{.*#+}} xmm12 = [2147483647,2147483647,2147483647,2147483647]
 | 
						|
; SSE41-NEXT:    movaps {{.*#+}} xmm11 = [2147483648,2147483648,2147483648,2147483648]
 | 
						|
; SSE41-NEXT:    movaps %xmm11, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm12, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm4, %xmm9
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm4
 | 
						|
; SSE41-NEXT:    psubd %xmm5, %xmm4
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm4, %xmm1
 | 
						|
; SSE41-NEXT:    pxor %xmm5, %xmm1
 | 
						|
; SSE41-NEXT:    movaps %xmm11, %xmm3
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm12, %xmm3
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm3, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm3
 | 
						|
; SSE41-NEXT:    psubd %xmm6, %xmm3
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm6
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm3, %xmm2
 | 
						|
; SSE41-NEXT:    pxor %xmm6, %xmm2
 | 
						|
; SSE41-NEXT:    movaps %xmm11, %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm12, %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm1, %xmm3
 | 
						|
; SSE41-NEXT:    movdqa %xmm8, %xmm5
 | 
						|
; SSE41-NEXT:    psubd %xmm7, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm7
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm5, %xmm8
 | 
						|
; SSE41-NEXT:    pxor %xmm7, %xmm8
 | 
						|
; SSE41-NEXT:    movdqa %xmm5, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm12, %xmm11
 | 
						|
; SSE41-NEXT:    movdqa %xmm8, %xmm0
 | 
						|
; SSE41-NEXT:    blendvps %xmm0, %xmm11, %xmm5
 | 
						|
; SSE41-NEXT:    movaps %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    movaps %xmm4, %xmm1
 | 
						|
; SSE41-NEXT:    movaps %xmm3, %xmm2
 | 
						|
; SSE41-NEXT:    movaps %xmm5, %xmm3
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v16i32:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 | 
						|
; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm4, %xmm6
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm2, %xmm7
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubd %xmm4, %xmm7, %xmm4
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm4, %xmm7, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    vxorps %ymm0, %ymm6, %ymm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
 | 
						|
; AVX1-NEXT:    vmovaps {{.*#+}} ymm4 = [2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647]
 | 
						|
; AVX1-NEXT:    vmovaps {{.*#+}} ymm6 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX1-NEXT:    vblendvps %ymm2, %ymm4, %ymm6, %ymm7
 | 
						|
; AVX1-NEXT:    vblendvps %ymm0, %ymm7, %ymm2, %ymm0
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm2, %xmm7
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm5, %xmm3, %xmm5
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubd %xmm2, %xmm7, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm7, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm3
 | 
						|
; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm1, %ymm1
 | 
						|
; AVX1-NEXT:    vxorps %ymm1, %ymm5, %ymm1
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 | 
						|
; AVX1-NEXT:    vblendvps %ymm2, %ymm4, %ymm6, %ymm3
 | 
						|
; AVX1-NEXT:    vblendvps %ymm1, %ymm3, %ymm2, %ymm1
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v16i32:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 | 
						|
; AVX2-NEXT:    vpcmpgtd %ymm4, %ymm2, %ymm5
 | 
						|
; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm2
 | 
						|
; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    vpxor %ymm0, %ymm5, %ymm0
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} ymm5 = [2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647,2147483647]
 | 
						|
; AVX2-NEXT:    vbroadcastss {{.*#+}} ymm6 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX2-NEXT:    vblendvps %ymm2, %ymm5, %ymm6, %ymm7
 | 
						|
; AVX2-NEXT:    vblendvps %ymm0, %ymm7, %ymm2, %ymm0
 | 
						|
; AVX2-NEXT:    vpcmpgtd %ymm4, %ymm3, %ymm2
 | 
						|
; AVX2-NEXT:    vpsubd %ymm3, %ymm1, %ymm3
 | 
						|
; AVX2-NEXT:    vpcmpgtd %ymm3, %ymm1, %ymm1
 | 
						|
; AVX2-NEXT:    vpxor %ymm1, %ymm2, %ymm1
 | 
						|
; AVX2-NEXT:    vblendvps %ymm3, %ymm5, %ymm6, %ymm2
 | 
						|
; AVX2-NEXT:    vblendvps %ymm1, %ymm2, %ymm3, %ymm1
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v16i32:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX512-NEXT:    vpcmpgtd %zmm2, %zmm1, %k0
 | 
						|
; AVX512-NEXT:    vpsubd %zmm1, %zmm0, %zmm1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
 | 
						|
; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | 
						|
; AVX512-NEXT:    vpcmpgtd %zmm1, %zmm2, %k2
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*#+}} zmm0 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
 | 
						|
; AVX512-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k2}
 | 
						|
; AVX512-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 | 
						|
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %x, <16 x i32> %y)
 | 
						|
  ret <16 x i32> %z
 | 
						|
}
 | 
						|
 | 
						|
define <2 x i64> @v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
 | 
						|
; SSE2-LABEL: v2i64:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm3
 | 
						|
; SSE2-NEXT:    pxor %xmm2, %xmm3
 | 
						|
; SSE2-NEXT:    psubq %xmm1, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm2, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm3, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm3, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm6, %xmm3
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm3, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm3
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm2, %xmm3
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm5, %xmm1
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm1, %xmm2
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm1
 | 
						|
; SSE2-NEXT:    pandn {{.*}}(%rip), %xmm1
 | 
						|
; SSE2-NEXT:    pxor %xmm3, %xmm3
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm0, %xmm3
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
 | 
						|
; SSE2-NEXT:    por %xmm3, %xmm1
 | 
						|
; SSE2-NEXT:    pand %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    pandn %xmm0, %xmm2
 | 
						|
; SSE2-NEXT:    por %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v2i64:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm3
 | 
						|
; SSSE3-NEXT:    pxor %xmm2, %xmm3
 | 
						|
; SSSE3-NEXT:    psubq %xmm1, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm2, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm3, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm3, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm6, %xmm3
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm3, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm3
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm3
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm5, %xmm1
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm1, %xmm2
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm1
 | 
						|
; SSSE3-NEXT:    pandn {{.*}}(%rip), %xmm1
 | 
						|
; SSSE3-NEXT:    pxor %xmm3, %xmm3
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm0, %xmm3
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand {{.*}}(%rip), %xmm3
 | 
						|
; SSSE3-NEXT:    por %xmm3, %xmm1
 | 
						|
; SSSE3-NEXT:    pand %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    pandn %xmm0, %xmm2
 | 
						|
; SSSE3-NEXT:    por %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v2i64:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm3
 | 
						|
; SSE41-NEXT:    pxor %xmm0, %xmm3
 | 
						|
; SSE41-NEXT:    psubq %xmm1, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm0, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm4, %xmm3
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm5, %xmm4
 | 
						|
; SSE41-NEXT:    por %xmm3, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm0, %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm0, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm0, %xmm1
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm5, %xmm3
 | 
						|
; SSE41-NEXT:    por %xmm1, %xmm3
 | 
						|
; SSE41-NEXT:    pxor %xmm4, %xmm3
 | 
						|
; SSE41-NEXT:    movapd {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, {{.*}}(%rip), %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
 | 
						|
; SSE41-NEXT:    movapd %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v2i64:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm2
 | 
						|
; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | 
						|
; AVX1-NEXT:    vmovapd {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 | 
						|
; AVX1-NEXT:    vblendvpd %xmm1, {{.*}}(%rip), %xmm2, %xmm2
 | 
						|
; AVX1-NEXT:    vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v2i64:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm2
 | 
						|
; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 | 
						|
; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
 | 
						|
; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
 | 
						|
; AVX2-NEXT:    vmovapd {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 | 
						|
; AVX2-NEXT:    vblendvpd %xmm1, {{.*}}(%rip), %xmm2, %xmm2
 | 
						|
; AVX2-NEXT:    vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v2i64:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX512-NEXT:    vpcmpgtq %xmm2, %xmm1, %k0
 | 
						|
; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 | 
						|
; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
 | 
						|
; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | 
						|
; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm2, %k2
 | 
						|
; AVX512-NEXT:    vmovdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
 | 
						|
; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k2}
 | 
						|
; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
 | 
						|
; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %x, <2 x i64> %y)
 | 
						|
  ret <2 x i64> %z
 | 
						|
}
 | 
						|
 | 
						|
define <4 x i64> @v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
 | 
						|
; SSE2-LABEL: v4i64:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    psubq %xmm2, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm6
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm6
 | 
						|
; SSE2-NEXT:    movdqa %xmm5, %xmm7
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm6, %xmm7
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm5, %xmm6
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm2, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm8, %xmm2
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm6, %xmm2
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm2, %xmm7
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm7
 | 
						|
; SSE2-NEXT:    movdqa %xmm7, %xmm4
 | 
						|
; SSE2-NEXT:    pandn %xmm0, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [9223372036854775808,9223372036854775808]
 | 
						|
; SSE2-NEXT:    pxor %xmm5, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm6, %xmm6
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm0, %xmm6
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm0
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm10 = [9223372036854775807,9223372036854775807]
 | 
						|
; SSE2-NEXT:    pand %xmm10, %xmm2
 | 
						|
; SSE2-NEXT:    por %xmm2, %xmm0
 | 
						|
; SSE2-NEXT:    pand %xmm7, %xmm0
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm2
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm2
 | 
						|
; SSE2-NEXT:    psubq %xmm3, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm2, %xmm7
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm7
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm2, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm6, %xmm2
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm2, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm3
 | 
						|
; SSE2-NEXT:    movdqa %xmm3, %xmm2
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm2
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm8, %xmm3
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm6, %xmm3
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm3, %xmm2
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm2, %xmm3
 | 
						|
; SSE2-NEXT:    pandn %xmm1, %xmm3
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm1, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm1
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm10, %xmm4
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm1
 | 
						|
; SSE2-NEXT:    pand %xmm2, %xmm1
 | 
						|
; SSE2-NEXT:    por %xmm3, %xmm1
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v4i64:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    psubq %xmm2, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm6
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm6
 | 
						|
; SSSE3-NEXT:    movdqa %xmm5, %xmm7
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm6, %xmm7
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm5, %xmm6
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm2, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm8, %xmm2
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm6, %xmm2
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm2, %xmm7
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm7
 | 
						|
; SSSE3-NEXT:    movdqa %xmm7, %xmm4
 | 
						|
; SSSE3-NEXT:    pandn %xmm0, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm9 = [9223372036854775808,9223372036854775808]
 | 
						|
; SSSE3-NEXT:    pxor %xmm5, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm6, %xmm6
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm0, %xmm6
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm0
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm10 = [9223372036854775807,9223372036854775807]
 | 
						|
; SSSE3-NEXT:    pand %xmm10, %xmm2
 | 
						|
; SSSE3-NEXT:    por %xmm2, %xmm0
 | 
						|
; SSSE3-NEXT:    pand %xmm7, %xmm0
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm2
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm2
 | 
						|
; SSSE3-NEXT:    psubq %xmm3, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm2, %xmm7
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm7
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm6, %xmm2
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm2, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm3
 | 
						|
; SSSE3-NEXT:    movdqa %xmm3, %xmm2
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm2
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm8, %xmm3
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm6, %xmm3
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm3, %xmm2
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm2, %xmm3
 | 
						|
; SSSE3-NEXT:    pandn %xmm1, %xmm3
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm1
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm10, %xmm4
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm1
 | 
						|
; SSSE3-NEXT:    pand %xmm2, %xmm1
 | 
						|
; SSSE3-NEXT:    por %xmm3, %xmm1
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v4i64:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa {{.*#+}} xmm9 = [2147483648,2147483648]
 | 
						|
; SSE41-NEXT:    pxor %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    psubq %xmm2, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm5
 | 
						|
; SSE41-NEXT:    pxor %xmm9, %xmm5
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm7
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm5, %xmm7
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm5, %xmm0
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm7, %xmm6
 | 
						|
; SSE41-NEXT:    por %xmm0, %xmm6
 | 
						|
; SSE41-NEXT:    pxor %xmm9, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm9, %xmm2
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm0, %xmm5
 | 
						|
; SSE41-NEXT:    por %xmm2, %xmm5
 | 
						|
; SSE41-NEXT:    pxor %xmm6, %xmm5
 | 
						|
; SSE41-NEXT:    movapd {{.*#+}} xmm8 = [9223372036854775807,9223372036854775807]
 | 
						|
; SSE41-NEXT:    movapd {{.*#+}} xmm7 = [9223372036854775808,9223372036854775808]
 | 
						|
; SSE41-NEXT:    movapd %xmm7, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm5, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE41-NEXT:    pxor %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    psubq %xmm3, %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm2
 | 
						|
; SSE41-NEXT:    pxor %xmm9, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm2, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm5, %xmm6
 | 
						|
; SSE41-NEXT:    por %xmm0, %xmm6
 | 
						|
; SSE41-NEXT:    pxor %xmm9, %xmm3
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm9, %xmm3
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm0, %xmm2
 | 
						|
; SSE41-NEXT:    por %xmm3, %xmm2
 | 
						|
; SSE41-NEXT:    pxor %xmm6, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm7
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm7, %xmm1
 | 
						|
; SSE41-NEXT:    movapd %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v4i64:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | 
						|
; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm4
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm3
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
 | 
						|
; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    vxorpd %ymm0, %ymm3, %ymm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 | 
						|
; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 | 
						|
; AVX1-NEXT:    vblendvpd %ymm1, {{.*}}(%rip), %ymm2, %ymm2
 | 
						|
; AVX1-NEXT:    vblendvpd %ymm0, %ymm2, %ymm1, %ymm0
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v4i64:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm1, %ymm2
 | 
						|
; AVX2-NEXT:    vpsubq %ymm1, %ymm0, %ymm1
 | 
						|
; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    vpxor %ymm0, %ymm2, %ymm0
 | 
						|
; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807]
 | 
						|
; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 | 
						|
; AVX2-NEXT:    vblendvpd %ymm1, %ymm2, %ymm3, %ymm2
 | 
						|
; AVX2-NEXT:    vblendvpd %ymm0, %ymm2, %ymm1, %ymm0
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v4i64:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX512-NEXT:    vpcmpgtq %ymm2, %ymm1, %k0
 | 
						|
; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm1
 | 
						|
; AVX512-NEXT:    vpcmpgtq %ymm1, %ymm0, %k1
 | 
						|
; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | 
						|
; AVX512-NEXT:    vpcmpgtq %ymm1, %ymm2, %k2
 | 
						|
; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 | 
						|
; AVX512-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm0 {%k2}
 | 
						|
; AVX512-NEXT:    vmovdqa64 %ymm0, %ymm1 {%k1}
 | 
						|
; AVX512-NEXT:    vmovdqa %ymm1, %ymm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %x, <4 x i64> %y)
 | 
						|
  ret <4 x i64> %z
 | 
						|
}
 | 
						|
 | 
						|
define <8 x i64> @v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
 | 
						|
; SSE2-LABEL: v8i64:
 | 
						|
; SSE2:       # %bb.0:
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm9
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm9
 | 
						|
; SSE2-NEXT:    psubq %xmm4, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm0, %xmm10
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm10
 | 
						|
; SSE2-NEXT:    movdqa %xmm9, %xmm11
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm10, %xmm11
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm9, %xmm10
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm12, %xmm9
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm9, %xmm10
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm4, %xmm9
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm9
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm11 = xmm9[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm12 = xmm4[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm11, %xmm12
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm12, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm10, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm4, %xmm12
 | 
						|
; SSE2-NEXT:    pandn %xmm0, %xmm12
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm9 = [9223372036854775808,9223372036854775808]
 | 
						|
; SSE2-NEXT:    pxor %xmm11, %xmm11
 | 
						|
; SSE2-NEXT:    pxor %xmm10, %xmm10
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm0, %xmm10
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm0
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm13 = xmm10[1,1,3,3]
 | 
						|
; SSE2-NEXT:    movdqa {{.*#+}} xmm10 = [9223372036854775807,9223372036854775807]
 | 
						|
; SSE2-NEXT:    pand %xmm10, %xmm13
 | 
						|
; SSE2-NEXT:    por %xmm13, %xmm0
 | 
						|
; SSE2-NEXT:    pand %xmm4, %xmm0
 | 
						|
; SSE2-NEXT:    por %xmm12, %xmm0
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm12
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm12
 | 
						|
; SSE2-NEXT:    psubq %xmm5, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm1, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm12, %xmm13
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm4, %xmm13
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm12, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm14, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm12 = xmm13[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm12
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    movdqa %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm13 = xmm4[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm13, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm12, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm4, %xmm12
 | 
						|
; SSE2-NEXT:    pandn %xmm1, %xmm12
 | 
						|
; SSE2-NEXT:    pxor %xmm5, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm1, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm1
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm10, %xmm5
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm1
 | 
						|
; SSE2-NEXT:    pand %xmm4, %xmm1
 | 
						|
; SSE2-NEXT:    por %xmm12, %xmm1
 | 
						|
; SSE2-NEXT:    movdqa %xmm2, %xmm12
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm12
 | 
						|
; SSE2-NEXT:    psubq %xmm6, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm2, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    movdqa %xmm12, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm13 = xmm4[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm12, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm13, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm6
 | 
						|
; SSE2-NEXT:    movdqa %xmm6, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm12 = xmm5[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm8, %xmm6
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm12, %xmm6
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm6, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    movdqa %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    pandn %xmm2, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm6, %xmm6
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm2, %xmm6
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm2
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm10, %xmm6
 | 
						|
; SSE2-NEXT:    por %xmm6, %xmm2
 | 
						|
; SSE2-NEXT:    pand %xmm5, %xmm2
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm2
 | 
						|
; SSE2-NEXT:    movdqa %xmm3, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    psubq %xmm7, %xmm3
 | 
						|
; SSE2-NEXT:    movdqa %xmm3, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSE2-NEXT:    movdqa %xmm4, %xmm6
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm5, %xmm6
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm12 = xmm6[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm12, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pxor %xmm8, %xmm7
 | 
						|
; SSE2-NEXT:    movdqa %xmm7, %xmm4
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm8, %xmm4
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 | 
						|
; SSE2-NEXT:    pcmpeqd %xmm8, %xmm7
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm6, %xmm7
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSE2-NEXT:    por %xmm7, %xmm4
 | 
						|
; SSE2-NEXT:    pxor %xmm5, %xmm4
 | 
						|
; SSE2-NEXT:    movdqa %xmm4, %xmm5
 | 
						|
; SSE2-NEXT:    pandn %xmm3, %xmm5
 | 
						|
; SSE2-NEXT:    pcmpgtd %xmm3, %xmm11
 | 
						|
; SSE2-NEXT:    pandn %xmm9, %xmm3
 | 
						|
; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm11[1,1,3,3]
 | 
						|
; SSE2-NEXT:    pand %xmm10, %xmm6
 | 
						|
; SSE2-NEXT:    por %xmm6, %xmm3
 | 
						|
; SSE2-NEXT:    pand %xmm4, %xmm3
 | 
						|
; SSE2-NEXT:    por %xmm5, %xmm3
 | 
						|
; SSE2-NEXT:    retq
 | 
						|
;
 | 
						|
; SSSE3-LABEL: v8i64:
 | 
						|
; SSSE3:       # %bb.0:
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm9
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm9
 | 
						|
; SSSE3-NEXT:    psubq %xmm4, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm0, %xmm10
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm10
 | 
						|
; SSSE3-NEXT:    movdqa %xmm9, %xmm11
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm10, %xmm11
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm9, %xmm10
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm12, %xmm9
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm9, %xmm10
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm4, %xmm9
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm9
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm11 = xmm9[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm12 = xmm4[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm11, %xmm12
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm9[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm12, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm10, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm4, %xmm12
 | 
						|
; SSSE3-NEXT:    pandn %xmm0, %xmm12
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm9 = [9223372036854775808,9223372036854775808]
 | 
						|
; SSSE3-NEXT:    pxor %xmm11, %xmm11
 | 
						|
; SSSE3-NEXT:    pxor %xmm10, %xmm10
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm0, %xmm10
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm0
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm13 = xmm10[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    movdqa {{.*#+}} xmm10 = [9223372036854775807,9223372036854775807]
 | 
						|
; SSSE3-NEXT:    pand %xmm10, %xmm13
 | 
						|
; SSSE3-NEXT:    por %xmm13, %xmm0
 | 
						|
; SSSE3-NEXT:    pand %xmm4, %xmm0
 | 
						|
; SSSE3-NEXT:    por %xmm12, %xmm0
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm12
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm12
 | 
						|
; SSSE3-NEXT:    psubq %xmm5, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm1, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm12, %xmm13
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm13
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm12, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm14, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm12 = xmm13[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm12
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    movdqa %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm13 = xmm4[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm13, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm12, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm4, %xmm12
 | 
						|
; SSSE3-NEXT:    pandn %xmm1, %xmm12
 | 
						|
; SSSE3-NEXT:    pxor %xmm5, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm1
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm10, %xmm5
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm1
 | 
						|
; SSSE3-NEXT:    pand %xmm4, %xmm1
 | 
						|
; SSSE3-NEXT:    por %xmm12, %xmm1
 | 
						|
; SSSE3-NEXT:    movdqa %xmm2, %xmm12
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm12
 | 
						|
; SSSE3-NEXT:    psubq %xmm6, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm2, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    movdqa %xmm12, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm13 = xmm4[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm12, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm13, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm6
 | 
						|
; SSSE3-NEXT:    movdqa %xmm6, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm12 = xmm5[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm8, %xmm6
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm12, %xmm6
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm6, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    movdqa %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    pandn %xmm2, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm6, %xmm6
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm6
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm2
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm10, %xmm6
 | 
						|
; SSSE3-NEXT:    por %xmm6, %xmm2
 | 
						|
; SSSE3-NEXT:    pand %xmm5, %xmm2
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm2
 | 
						|
; SSSE3-NEXT:    movdqa %xmm3, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    psubq %xmm7, %xmm3
 | 
						|
; SSSE3-NEXT:    movdqa %xmm3, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm5
 | 
						|
; SSSE3-NEXT:    movdqa %xmm4, %xmm6
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm5, %xmm6
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm12 = xmm6[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm12, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pxor %xmm8, %xmm7
 | 
						|
; SSSE3-NEXT:    movdqa %xmm7, %xmm4
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm8, %xmm4
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
 | 
						|
; SSSE3-NEXT:    pcmpeqd %xmm8, %xmm7
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm6, %xmm7
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    por %xmm7, %xmm4
 | 
						|
; SSSE3-NEXT:    pxor %xmm5, %xmm4
 | 
						|
; SSSE3-NEXT:    movdqa %xmm4, %xmm5
 | 
						|
; SSSE3-NEXT:    pandn %xmm3, %xmm5
 | 
						|
; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm11
 | 
						|
; SSSE3-NEXT:    pandn %xmm9, %xmm3
 | 
						|
; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm11[1,1,3,3]
 | 
						|
; SSSE3-NEXT:    pand %xmm10, %xmm6
 | 
						|
; SSSE3-NEXT:    por %xmm6, %xmm3
 | 
						|
; SSSE3-NEXT:    pand %xmm4, %xmm3
 | 
						|
; SSSE3-NEXT:    por %xmm5, %xmm3
 | 
						|
; SSSE3-NEXT:    retq
 | 
						|
;
 | 
						|
; SSE41-LABEL: v8i64:
 | 
						|
; SSE41:       # %bb.0:
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm8
 | 
						|
; SSE41-NEXT:    movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    psubq %xmm4, %xmm8
 | 
						|
; SSE41-NEXT:    movdqa %xmm8, %xmm9
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm9
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm11
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm9, %xmm11
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm12 = xmm0[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm11, %xmm12
 | 
						|
; SSE41-NEXT:    por %xmm0, %xmm12
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm4
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm0, %xmm9
 | 
						|
; SSE41-NEXT:    por %xmm4, %xmm9
 | 
						|
; SSE41-NEXT:    pxor %xmm12, %xmm9
 | 
						|
; SSE41-NEXT:    movapd {{.*#+}} xmm12 = [9223372036854775807,9223372036854775807]
 | 
						|
; SSE41-NEXT:    movapd {{.*#+}} xmm11 = [9223372036854775808,9223372036854775808]
 | 
						|
; SSE41-NEXT:    movapd %xmm11, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm8, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm12, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm9, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm8
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    psubq %xmm5, %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm9
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm4, %xmm9
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm13 = xmm0[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm9, %xmm13
 | 
						|
; SSE41-NEXT:    por %xmm0, %xmm13
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm5
 | 
						|
; SSE41-NEXT:    movdqa %xmm5, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm5
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm0, %xmm4
 | 
						|
; SSE41-NEXT:    por %xmm5, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm13, %xmm4
 | 
						|
; SSE41-NEXT:    movapd %xmm11, %xmm5
 | 
						|
; SSE41-NEXT:    movdqa %xmm1, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm12, %xmm5
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm1
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    psubq %xmm6, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm9
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm4, %xmm9
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm9, %xmm5
 | 
						|
; SSE41-NEXT:    por %xmm0, %xmm5
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm6
 | 
						|
; SSE41-NEXT:    movdqa %xmm6, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm6
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm0, %xmm4
 | 
						|
; SSE41-NEXT:    por %xmm6, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm5, %xmm4
 | 
						|
; SSE41-NEXT:    movapd %xmm11, %xmm5
 | 
						|
; SSE41-NEXT:    movdqa %xmm2, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm12, %xmm5
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm2
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    psubq %xmm7, %xmm3
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm0, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm4, %xmm5
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm5, %xmm6
 | 
						|
; SSE41-NEXT:    por %xmm0, %xmm6
 | 
						|
; SSE41-NEXT:    pxor %xmm10, %xmm7
 | 
						|
; SSE41-NEXT:    movdqa %xmm7, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpeqd %xmm10, %xmm0
 | 
						|
; SSE41-NEXT:    pcmpgtd %xmm10, %xmm7
 | 
						|
; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
 | 
						|
; SSE41-NEXT:    pand %xmm0, %xmm4
 | 
						|
; SSE41-NEXT:    por %xmm7, %xmm4
 | 
						|
; SSE41-NEXT:    pxor %xmm6, %xmm4
 | 
						|
; SSE41-NEXT:    movdqa %xmm3, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm12, %xmm11
 | 
						|
; SSE41-NEXT:    movdqa %xmm4, %xmm0
 | 
						|
; SSE41-NEXT:    blendvpd %xmm0, %xmm11, %xmm3
 | 
						|
; SSE41-NEXT:    movapd %xmm8, %xmm0
 | 
						|
; SSE41-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX1-LABEL: v8i64:
 | 
						|
; AVX1:       # %bb.0:
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 | 
						|
; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm6
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm7
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubq %xmm4, %xmm7, %xmm4
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
 | 
						|
; AVX1-NEXT:    vxorpd %ymm0, %ymm6, %ymm0
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
 | 
						|
; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807]
 | 
						|
; AVX1-NEXT:    vmovapd {{.*#+}} ymm6 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 | 
						|
; AVX1-NEXT:    vblendvpd %ymm2, %ymm4, %ymm6, %ymm7
 | 
						|
; AVX1-NEXT:    vblendvpd %ymm0, %ymm7, %ymm2, %ymm0
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm7
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm5
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
 | 
						|
; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubq %xmm2, %xmm7, %xmm2
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm7
 | 
						|
; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm3
 | 
						|
; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm1
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm1, %ymm1
 | 
						|
; AVX1-NEXT:    vxorpd %ymm1, %ymm5, %ymm1
 | 
						|
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
 | 
						|
; AVX1-NEXT:    vblendvpd %ymm2, %ymm4, %ymm6, %ymm3
 | 
						|
; AVX1-NEXT:    vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
 | 
						|
; AVX1-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX2-LABEL: v8i64:
 | 
						|
; AVX2:       # %bb.0:
 | 
						|
; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
 | 
						|
; AVX2-NEXT:    vpcmpgtq %ymm4, %ymm2, %ymm5
 | 
						|
; AVX2-NEXT:    vpsubq %ymm2, %ymm0, %ymm2
 | 
						|
; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm0
 | 
						|
; AVX2-NEXT:    vpxor %ymm0, %ymm5, %ymm0
 | 
						|
; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm5 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807]
 | 
						|
; AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm6 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 | 
						|
; AVX2-NEXT:    vblendvpd %ymm2, %ymm5, %ymm6, %ymm7
 | 
						|
; AVX2-NEXT:    vblendvpd %ymm0, %ymm7, %ymm2, %ymm0
 | 
						|
; AVX2-NEXT:    vpcmpgtq %ymm4, %ymm3, %ymm2
 | 
						|
; AVX2-NEXT:    vpsubq %ymm3, %ymm1, %ymm3
 | 
						|
; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm1, %ymm1
 | 
						|
; AVX2-NEXT:    vpxor %ymm1, %ymm2, %ymm1
 | 
						|
; AVX2-NEXT:    vblendvpd %ymm3, %ymm5, %ymm6, %ymm2
 | 
						|
; AVX2-NEXT:    vblendvpd %ymm1, %ymm2, %ymm3, %ymm1
 | 
						|
; AVX2-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX512-LABEL: v8i64:
 | 
						|
; AVX512:       # %bb.0:
 | 
						|
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 | 
						|
; AVX512-NEXT:    vpcmpgtq %zmm2, %zmm1, %k0
 | 
						|
; AVX512-NEXT:    vpsubq %zmm1, %zmm0, %zmm1
 | 
						|
; AVX512-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
 | 
						|
; AVX512-NEXT:    kxorw %k1, %k0, %k1
 | 
						|
; AVX512-NEXT:    vpcmpgtq %zmm1, %zmm2, %k2
 | 
						|
; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
 | 
						|
; AVX512-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k2}
 | 
						|
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 | 
						|
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm0
 | 
						|
; AVX512-NEXT:    retq
 | 
						|
  %z = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %x, <8 x i64> %y)
 | 
						|
  ret <8 x i64> %z
 | 
						|
}
 | 
						|
 | 
						|
define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 | 
						|
; SSE-LABEL: v2i128:
 | 
						|
; SSE:       # %bb.0:
 | 
						|
; SSE-NEXT:    pushq %r15
 | 
						|
; SSE-NEXT:    pushq %r14
 | 
						|
; SSE-NEXT:    pushq %r13
 | 
						|
; SSE-NEXT:    pushq %r12
 | 
						|
; SSE-NEXT:    pushq %rbx
 | 
						|
; SSE-NEXT:    movq %rdi, %rax
 | 
						|
; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 | 
						|
; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %r14
 | 
						|
; SSE-NEXT:    subq {{[0-9]+}}(%rsp), %rcx
 | 
						|
; SSE-NEXT:    movq %r8, %r13
 | 
						|
; SSE-NEXT:    sbbq %r14, %r13
 | 
						|
; SSE-NEXT:    movq %r13, %r10
 | 
						|
; SSE-NEXT:    sarq $63, %r10
 | 
						|
; SSE-NEXT:    xorl %edi, %edi
 | 
						|
; SSE-NEXT:    testq %r13, %r13
 | 
						|
; SSE-NEXT:    setns %dil
 | 
						|
; SSE-NEXT:    movabsq $9223372036854775807, %r12 # imm = 0x7FFFFFFFFFFFFFFF
 | 
						|
; SSE-NEXT:    leaq (%rdi,%r12), %r15
 | 
						|
; SSE-NEXT:    testq %r8, %r8
 | 
						|
; SSE-NEXT:    setns %r8b
 | 
						|
; SSE-NEXT:    cmpb %dil, %r8b
 | 
						|
; SSE-NEXT:    setne %dil
 | 
						|
; SSE-NEXT:    testq %r14, %r14
 | 
						|
; SSE-NEXT:    setns %bl
 | 
						|
; SSE-NEXT:    cmpb %bl, %r8b
 | 
						|
; SSE-NEXT:    setne %bl
 | 
						|
; SSE-NEXT:    testb %dil, %bl
 | 
						|
; SSE-NEXT:    cmoveq %r13, %r15
 | 
						|
; SSE-NEXT:    cmoveq %rcx, %r10
 | 
						|
; SSE-NEXT:    subq %r9, %rsi
 | 
						|
; SSE-NEXT:    movq %rdx, %rdi
 | 
						|
; SSE-NEXT:    sbbq %r11, %rdi
 | 
						|
; SSE-NEXT:    setns %bl
 | 
						|
; SSE-NEXT:    movzbl %bl, %ebx
 | 
						|
; SSE-NEXT:    addq %rbx, %r12
 | 
						|
; SSE-NEXT:    movq %rdi, %rcx
 | 
						|
; SSE-NEXT:    sarq $63, %rcx
 | 
						|
; SSE-NEXT:    testq %r11, %r11
 | 
						|
; SSE-NEXT:    setns %r8b
 | 
						|
; SSE-NEXT:    testq %rdx, %rdx
 | 
						|
; SSE-NEXT:    setns %dl
 | 
						|
; SSE-NEXT:    cmpb %r8b, %dl
 | 
						|
; SSE-NEXT:    setne %r8b
 | 
						|
; SSE-NEXT:    cmpb %bl, %dl
 | 
						|
; SSE-NEXT:    setne %dl
 | 
						|
; SSE-NEXT:    testb %dl, %r8b
 | 
						|
; SSE-NEXT:    cmoveq %rsi, %rcx
 | 
						|
; SSE-NEXT:    cmoveq %rdi, %r12
 | 
						|
; SSE-NEXT:    movq %r15, 24(%rax)
 | 
						|
; SSE-NEXT:    movq %r10, 16(%rax)
 | 
						|
; SSE-NEXT:    movq %r12, 8(%rax)
 | 
						|
; SSE-NEXT:    movq %rcx, (%rax)
 | 
						|
; SSE-NEXT:    popq %rbx
 | 
						|
; SSE-NEXT:    popq %r12
 | 
						|
; SSE-NEXT:    popq %r13
 | 
						|
; SSE-NEXT:    popq %r14
 | 
						|
; SSE-NEXT:    popq %r15
 | 
						|
; SSE-NEXT:    retq
 | 
						|
;
 | 
						|
; AVX-LABEL: v2i128:
 | 
						|
; AVX:       # %bb.0:
 | 
						|
; AVX-NEXT:    pushq %r15
 | 
						|
; AVX-NEXT:    pushq %r14
 | 
						|
; AVX-NEXT:    pushq %r13
 | 
						|
; AVX-NEXT:    pushq %r12
 | 
						|
; AVX-NEXT:    pushq %rbx
 | 
						|
; AVX-NEXT:    movq %rdi, %rax
 | 
						|
; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 | 
						|
; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r14
 | 
						|
; AVX-NEXT:    subq {{[0-9]+}}(%rsp), %rcx
 | 
						|
; AVX-NEXT:    movq %r8, %r13
 | 
						|
; AVX-NEXT:    sbbq %r14, %r13
 | 
						|
; AVX-NEXT:    movq %r13, %r10
 | 
						|
; AVX-NEXT:    sarq $63, %r10
 | 
						|
; AVX-NEXT:    xorl %edi, %edi
 | 
						|
; AVX-NEXT:    testq %r13, %r13
 | 
						|
; AVX-NEXT:    setns %dil
 | 
						|
; AVX-NEXT:    movabsq $9223372036854775807, %r12 # imm = 0x7FFFFFFFFFFFFFFF
 | 
						|
; AVX-NEXT:    leaq (%rdi,%r12), %r15
 | 
						|
; AVX-NEXT:    testq %r8, %r8
 | 
						|
; AVX-NEXT:    setns %r8b
 | 
						|
; AVX-NEXT:    cmpb %dil, %r8b
 | 
						|
; AVX-NEXT:    setne %dil
 | 
						|
; AVX-NEXT:    testq %r14, %r14
 | 
						|
; AVX-NEXT:    setns %bl
 | 
						|
; AVX-NEXT:    cmpb %bl, %r8b
 | 
						|
; AVX-NEXT:    setne %bl
 | 
						|
; AVX-NEXT:    testb %dil, %bl
 | 
						|
; AVX-NEXT:    cmoveq %r13, %r15
 | 
						|
; AVX-NEXT:    cmoveq %rcx, %r10
 | 
						|
; AVX-NEXT:    subq %r9, %rsi
 | 
						|
; AVX-NEXT:    movq %rdx, %rdi
 | 
						|
; AVX-NEXT:    sbbq %r11, %rdi
 | 
						|
; AVX-NEXT:    setns %bl
 | 
						|
; AVX-NEXT:    movzbl %bl, %ebx
 | 
						|
; AVX-NEXT:    addq %rbx, %r12
 | 
						|
; AVX-NEXT:    movq %rdi, %rcx
 | 
						|
; AVX-NEXT:    sarq $63, %rcx
 | 
						|
; AVX-NEXT:    testq %r11, %r11
 | 
						|
; AVX-NEXT:    setns %r8b
 | 
						|
; AVX-NEXT:    testq %rdx, %rdx
 | 
						|
; AVX-NEXT:    setns %dl
 | 
						|
; AVX-NEXT:    cmpb %r8b, %dl
 | 
						|
; AVX-NEXT:    setne %r8b
 | 
						|
; AVX-NEXT:    cmpb %bl, %dl
 | 
						|
; AVX-NEXT:    setne %dl
 | 
						|
; AVX-NEXT:    testb %dl, %r8b
 | 
						|
; AVX-NEXT:    cmoveq %rsi, %rcx
 | 
						|
; AVX-NEXT:    cmoveq %rdi, %r12
 | 
						|
; AVX-NEXT:    movq %r15, 24(%rax)
 | 
						|
; AVX-NEXT:    movq %r10, 16(%rax)
 | 
						|
; AVX-NEXT:    movq %r12, 8(%rax)
 | 
						|
; AVX-NEXT:    movq %rcx, (%rax)
 | 
						|
; AVX-NEXT:    popq %rbx
 | 
						|
; AVX-NEXT:    popq %r12
 | 
						|
; AVX-NEXT:    popq %r13
 | 
						|
; AVX-NEXT:    popq %r14
 | 
						|
; AVX-NEXT:    popq %r15
 | 
						|
; AVX-NEXT:    retq
 | 
						|
  %z = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %x, <2 x i128> %y)
 | 
						|
  ret <2 x i128> %z
 | 
						|
}
 |