[X86] Enable custom splitting of v8i64/v16i32 sext/zext for avx/avx2 when input type will be promoted by the type legalize to 128-bits.
If the the input type will be promoted to 128 bits its better to put a sign_extend_inreg/and in the 128 bit register before the split occurs. Otherwise we end up doing it on each half in the wider register. Some of the overflow arithmetic tests are regressions, but I think we can make some improvement using getSetccResultType in DAG combine and/or type legalization. llvm-svn: 354709
This commit is contained in:
		
							parent
							
								
									b95ca56361
								
							
						
					
					
						commit
						a9697f24cf
					
				| 
						 | 
					@ -1137,13 +1137,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
 | 
				
			||||||
      setOperationAction(ISD::SRA, VT, Custom);
 | 
					      setOperationAction(ISD::SRA, VT, Custom);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (ExperimentalVectorWideningLegalization) {
 | 
					    // These types need custom splitting if their input is a 128-bit vector.
 | 
				
			||||||
      // These types need custom splitting if their input is a 128-bit vector.
 | 
					    setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
 | 
				
			||||||
      setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
 | 
					    setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
 | 
				
			||||||
      setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
 | 
					    setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
 | 
				
			||||||
      setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
 | 
					    setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
 | 
				
			||||||
      setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    setOperationAction(ISD::ROTL,              MVT::v8i32,  Custom);
 | 
					    setOperationAction(ISD::ROTL,              MVT::v8i32,  Custom);
 | 
				
			||||||
    setOperationAction(ISD::ROTL,              MVT::v16i16, Custom);
 | 
					    setOperationAction(ISD::ROTL,              MVT::v16i16, Custom);
 | 
				
			||||||
| 
						 | 
					@ -26886,14 +26884,13 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
 | 
				
			||||||
  }
 | 
					  }
 | 
				
			||||||
  case ISD::SIGN_EXTEND:
 | 
					  case ISD::SIGN_EXTEND:
 | 
				
			||||||
  case ISD::ZERO_EXTEND: {
 | 
					  case ISD::ZERO_EXTEND: {
 | 
				
			||||||
    if (!ExperimentalVectorWideningLegalization)
 | 
					 | 
				
			||||||
      return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    EVT VT = N->getValueType(0);
 | 
					    EVT VT = N->getValueType(0);
 | 
				
			||||||
    SDValue In = N->getOperand(0);
 | 
					    SDValue In = N->getOperand(0);
 | 
				
			||||||
    EVT InVT = In.getValueType();
 | 
					    EVT InVT = In.getValueType();
 | 
				
			||||||
    if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
 | 
					    if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
 | 
				
			||||||
        (InVT == MVT::v4i16 || InVT == MVT::v4i8)) {
 | 
					        (InVT == MVT::v4i16 || InVT == MVT::v4i8) &&
 | 
				
			||||||
 | 
					        getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector) {
 | 
				
			||||||
 | 
					      assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
 | 
				
			||||||
      // Custom split this so we can extend i8/i16->i32 invec. This is better
 | 
					      // Custom split this so we can extend i8/i16->i32 invec. This is better
 | 
				
			||||||
      // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
 | 
					      // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
 | 
				
			||||||
      // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
 | 
					      // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
 | 
				
			||||||
| 
						 | 
					@ -26918,7 +26915,21 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
 | 
				
			||||||
      return;
 | 
					      return;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if ((VT == MVT::v16i32 || VT == MVT::v8i64) && InVT.is128BitVector()) {
 | 
					    if (VT == MVT::v16i32 || VT == MVT::v8i64) {
 | 
				
			||||||
 | 
					      if (!InVT.is128BitVector()) {
 | 
				
			||||||
 | 
					        // Not a 128 bit vector, but maybe type legalization will promote
 | 
				
			||||||
 | 
					        // it to 128 bits.
 | 
				
			||||||
 | 
					        if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
 | 
				
			||||||
 | 
					          return;
 | 
				
			||||||
 | 
					        InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
 | 
				
			||||||
 | 
					        if (!InVT.is128BitVector())
 | 
				
			||||||
 | 
					          return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        // Promote the input to 128 bits. Type legalization will turn this into
 | 
				
			||||||
 | 
					        // zext_inreg/sext_inreg.
 | 
				
			||||||
 | 
					        In = DAG.getNode(N->getOpcode(), dl, InVT, In);
 | 
				
			||||||
 | 
					      }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
      // Perform custom splitting instead of the two stage extend we would get
 | 
					      // Perform custom splitting instead of the two stage extend we would get
 | 
				
			||||||
      // by default.
 | 
					      // by default.
 | 
				
			||||||
      EVT LoVT, HiVT;
 | 
					      EVT LoVT, HiVT;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -854,42 +854,44 @@ define <16 x i32> @saddo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm6, %xmm5, %xmm7
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm6, %xmm5, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm7
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm5, %xmm3
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm5, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm7, %xmm3, %xmm11
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm7, %xmm1, %xmm11
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm5, %xmm7
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm5, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm12
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm12
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm5, %xmm7
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm5, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm7
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm12, %xmm7, %xmm12
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm12, %xmm7, %xmm12
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
 | 
				
			||||||
; AVX1-NEXT:    vpaddd %xmm6, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpaddd %xmm6, %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm5, %xmm6
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm5, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm5, %xmm2
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm5, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm7, %xmm2
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm7, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vandps %ymm2, %ymm11, %ymm2
 | 
					; AVX1-NEXT:    vandps %ymm1, %ymm11, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
 | 
					; AVX1-NEXT:    vpacksswb %xmm8, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm10, %ymm4
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm2
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm0
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm10, %ymm3
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm8, %xmm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[2,3,0,1]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm4, 32(%rdi)
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm3, (%rdi)
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX2-LABEL: saddo_v16i32:
 | 
					; AVX2-LABEL: saddo_v16i32:
 | 
				
			||||||
| 
						 | 
					@ -920,8 +922,10 @@ define <16 x i32> @saddo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX2-NEXT:    vpandn %ymm5, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpandn %ymm5, %ymm0, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
					; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
				
			||||||
; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 | 
					; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm1, %ymm1
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1071,21 +1075,14 @@ define <16 x i32> @saddo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpandn %xmm3, %xmm0, %xmm1
 | 
					; AVX1-NEXT:    vpandn %xmm3, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm2, %xmm2
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm2
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
					; AVX1-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1104,14 +1101,9 @@ define <16 x i32> @saddo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX2-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
					; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpandn %xmm3, %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpandn %xmm3, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1275,8 +1275,8 @@ define <16 x i32> @smulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpmuldq %xmm4, %xmm6, %xmm7
 | 
					; AVX1-NEXT:    vpmuldq %xmm4, %xmm6, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
 | 
					; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpmulld %xmm4, %xmm6, %xmm9
 | 
					; AVX1-NEXT:    vpmulld %xmm4, %xmm6, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm9, %xmm6
 | 
					; AVX1-NEXT:    vpsrad $31, %xmm4, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm5, %xmm6
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm5, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm8, %xmm8
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm8, %xmm8
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm8, %xmm6, %xmm6
 | 
					; AVX1-NEXT:    vpxor %xmm8, %xmm6, %xmm6
 | 
				
			||||||
| 
						 | 
					@ -1286,23 +1286,23 @@ define <16 x i32> @smulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpmuldq %xmm3, %xmm1, %xmm7
 | 
					; AVX1-NEXT:    vpmuldq %xmm3, %xmm1, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
 | 
					; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpmulld %xmm3, %xmm1, %xmm3
 | 
					; AVX1-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm1
 | 
					; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm5, %xmm1
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm5, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm8, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpxor %xmm8, %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm6, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpackssdw %xmm6, %xmm3, %xmm9
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpmuldq %xmm6, %xmm4, %xmm4
 | 
					; AVX1-NEXT:    vpmuldq %xmm6, %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpmuldq %xmm5, %xmm7, %xmm6
 | 
					; AVX1-NEXT:    vpmuldq %xmm5, %xmm7, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3],xmm6[4,5],xmm4[6,7]
 | 
					; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpmulld %xmm5, %xmm7, %xmm5
 | 
					; AVX1-NEXT:    vpmulld %xmm5, %xmm7, %xmm5
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm5, %xmm6
 | 
					; AVX1-NEXT:    vpsrad $31, %xmm5, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm4, %xmm4
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm8, %xmm4, %xmm4
 | 
					; AVX1-NEXT:    vpxor %xmm8, %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpmuldq %xmm6, %xmm7, %xmm6
 | 
					; AVX1-NEXT:    vpmuldq %xmm6, %xmm7, %xmm6
 | 
				
			||||||
| 
						 | 
					@ -1313,19 +1313,21 @@ define <16 x i32> @smulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm2
 | 
					; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm6, %xmm2
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm6, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm8, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpxor %xmm8, %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm4, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm4
 | 
					; AVX1-NEXT:    vpacksswb %xmm9, %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm3, %ymm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm3
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm0
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm4
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm4, (%rdi)
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vmovaps %ymm4, 32(%rdi)
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vmovaps %ymm3, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX2-LABEL: smulo_v16i32:
 | 
					; AVX2-LABEL: smulo_v16i32:
 | 
				
			||||||
| 
						 | 
					@ -1355,8 +1357,10 @@ define <16 x i32> @smulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
					; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
				
			||||||
; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 | 
					; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm1, %ymm1
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1562,21 +1566,14 @@ define <16 x i32> @smulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
					; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1600,14 +1597,9 @@ define <16 x i32> @smulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
					; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %xmm3, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %xmm3, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -882,10 +882,10 @@ define <16 x i32> @ssubo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm13
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm13
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm13, %xmm5, %xmm7
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm13, %xmm5, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm7
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm5, %xmm3
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm5, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm7, %xmm3, %xmm7
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm7, %xmm1, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm11
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm11
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm5, %xmm7
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm5, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm12
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm12
 | 
				
			||||||
| 
						 | 
					@ -894,32 +894,34 @@ define <16 x i32> @ssubo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm12, %xmm7, %xmm6
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm12, %xmm7, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm6, %ymm11
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm6, %ymm11
 | 
				
			||||||
; AVX1-NEXT:    vpsubd %xmm13, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpsubd %xmm13, %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm5, %xmm6
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm5, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm5, %xmm2
 | 
					; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm5, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm7, %xmm2
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm7, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vandps %ymm2, %ymm11, %ymm2
 | 
					; AVX1-NEXT:    vandps %ymm1, %ymm11, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
 | 
					; AVX1-NEXT:    vpacksswb %xmm9, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm8, %ymm10, %ymm4
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm2
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm0
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm8, %ymm10, %ymm3
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm9, %xmm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[2,3,0,1]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm4, 32(%rdi)
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm3, (%rdi)
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX2-LABEL: ssubo_v16i32:
 | 
					; AVX2-LABEL: ssubo_v16i32:
 | 
				
			||||||
| 
						 | 
					@ -952,8 +954,10 @@ define <16 x i32> @ssubo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX2-NEXT:    vpandn %ymm0, %ymm5, %ymm0
 | 
					; AVX2-NEXT:    vpandn %ymm0, %ymm5, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
					; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
				
			||||||
; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 | 
					; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm1, %ymm1
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1107,21 +1111,14 @@ define <16 x i32> @ssubo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpandn %xmm0, %xmm3, %xmm1
 | 
					; AVX1-NEXT:    vpandn %xmm0, %xmm3, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm2, %xmm2
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm2
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
					; AVX1-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1141,14 +1138,9 @@ define <16 x i32> @ssubo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
					; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm5, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpxor %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpandn %xmm0, %xmm3, %xmm1
 | 
					; AVX2-NEXT:    vpandn %xmm0, %xmm3, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %xmm6, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -646,15 +646,17 @@ define <16 x i32> @uaddo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm7, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpackssdw %xmm7, %xmm0, %xmm0
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm4
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm4
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
					; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
					; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
				
			||||||
| 
						 | 
					@ -675,8 +677,10 @@ define <16 x i32> @uaddo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
					; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
				
			||||||
; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 | 
					; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm1, %ymm1
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -786,21 +790,14 @@ define <16 x i32> @uaddo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
					; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -812,14 +809,9 @@ define <16 x i32> @uaddo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
					; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1086,26 +1086,26 @@ define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpmuludq %xmm6, %xmm7, %xmm6
 | 
					; AVX1-NEXT:    vpmuludq %xmm6, %xmm7, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpmuludq %xmm10, %xmm12, %xmm7
 | 
					; AVX1-NEXT:    vpmuludq %xmm10, %xmm12, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7]
 | 
					; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
 | 
					; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm6, %xmm6
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm7, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm9, %xmm9
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm9, %xmm9
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm9, %xmm6, %xmm6
 | 
					; AVX1-NEXT:    vpxor %xmm9, %xmm7, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpmuludq %xmm7, %xmm4, %xmm4
 | 
					; AVX1-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpmuludq %xmm3, %xmm1, %xmm7
 | 
					; AVX1-NEXT:    vpmuludq %xmm3, %xmm1, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2,3],xmm7[4,5],xmm4[6,7]
 | 
					; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3],xmm6[4,5],xmm4[6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm4, %xmm4
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm9, %xmm4, %xmm4
 | 
					; AVX1-NEXT:    vpxor %xmm9, %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm6, %xmm4, %xmm11
 | 
					; AVX1-NEXT:    vpackssdw %xmm7, %xmm4, %xmm11
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
 | 
					; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpmuludq %xmm7, %xmm5, %xmm5
 | 
					; AVX1-NEXT:    vpmuludq %xmm7, %xmm5, %xmm5
 | 
				
			||||||
; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm7
 | 
					; AVX1-NEXT:    vpmuludq %xmm6, %xmm4, %xmm7
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
 | 
				
			||||||
; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
 | 
					; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm5, %xmm5
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm5, %xmm5
 | 
				
			||||||
| 
						 | 
					@ -1119,19 +1119,21 @@ define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm5, %xmm5
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm5, %xmm5
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm5
 | 
					; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm5
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm13, %xmm5, %xmm5
 | 
					; AVX1-NEXT:    vpackssdw %xmm13, %xmm5, %xmm5
 | 
				
			||||||
; AVX1-NEXT:    vpmulld %xmm4, %xmm6, %xmm4
 | 
					; AVX1-NEXT:    vpacksswb %xmm11, %xmm5, %xmm5
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmulld %xmm6, %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm2
 | 
				
			||||||
; AVX1-NEXT:    vpmulld %xmm10, %xmm12, %xmm0
 | 
					; AVX1-NEXT:    vpmulld %xmm10, %xmm12, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm3
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm5, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[2,3,0,1]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm11, %xmm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm11[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm4, %xmm4
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[3,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
					; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
					; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
				
			||||||
| 
						 | 
					@ -1161,10 +1163,12 @@ define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX2-NEXT:    vpxor %ymm6, %ymm5, %ymm5
 | 
					; AVX2-NEXT:    vpxor %ymm6, %ymm5, %ymm5
 | 
				
			||||||
; AVX2-NEXT:    vextracti128 $1, %ymm5, %xmm6
 | 
					; AVX2-NEXT:    vextracti128 $1, %ymm5, %xmm6
 | 
				
			||||||
; AVX2-NEXT:    vpackssdw %xmm6, %xmm5, %xmm5
 | 
					; AVX2-NEXT:    vpackssdw %xmm6, %xmm5, %xmm5
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpacksswb %xmm4, %xmm5, %xmm4
 | 
				
			||||||
; AVX2-NEXT:    vpmulld %ymm2, %ymm0, %ymm2
 | 
					; AVX2-NEXT:    vpmulld %ymm2, %ymm0, %ymm2
 | 
				
			||||||
; AVX2-NEXT:    vpmulld %ymm3, %ymm1, %ymm3
 | 
					; AVX2-NEXT:    vpmulld %ymm3, %ymm1, %ymm3
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm5, %ymm0
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm4, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm4, %ymm1
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm4[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1357,21 +1361,14 @@ define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
					; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -1391,14 +1388,9 @@ define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -684,15 +684,17 @@ define <16 x i32> @usubo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpackssdw %xmm7, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpackssdw %xmm7, %xmm0, %xmm0
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm4
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm4
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxwd %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
					; AVX1-NEXT:    vmovaps %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
					; AVX1-NEXT:    vmovaps %ymm2, (%rdi)
 | 
				
			||||||
| 
						 | 
					@ -713,8 +715,10 @@ define <16 x i32> @usubo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2)
 | 
				
			||||||
; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpxor %ymm4, %ymm0, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
					; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
 | 
				
			||||||
; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpackssdw %xmm4, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 | 
					; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxwd %xmm1, %ymm1
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm3, 32(%rdi)
 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %ymm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -824,21 +828,14 @@ define <16 x i32> @usubo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
					; AVX1-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 | 
					; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm3
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
					; AVX1-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
| 
						 | 
					@ -850,14 +847,9 @@ define <16 x i32> @usubo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
					; AVX2-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
					; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
					; AVX2-NEXT:    vmovdqa %xmm2, (%rdi)
 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6192,35 +6192,29 @@ define <8 x i64> @sext_8i6_to_8i64(i32 %x) nounwind uwtable readnone ssp {
 | 
				
			||||||
; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
					; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
				
			||||||
; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpsllw $10, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpslld $26, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpsraw $10, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $26, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm1, %xmm2
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm2, %xmm2
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $26, %xmm0, %xmm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $26, %xmm0, %xmm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm2
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX2-LABEL: sext_8i6_to_8i64:
 | 
					; AVX2-LABEL: sext_8i6_to_8i64:
 | 
				
			||||||
; AVX2:       # %bb.0: # %entry
 | 
					; AVX2:       # %bb.0: # %entry
 | 
				
			||||||
; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
					; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					; AVX2-NEXT:    vpsllw $10, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpslld $26, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpsraw $10, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $26, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpmovsxwq %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxdq %xmm0, %ymm0
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					; AVX2-NEXT:    vpmovsxwq %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpslld $26, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $26, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovsxdq %xmm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX512-LABEL: sext_8i6_to_8i64:
 | 
					; AVX512-LABEL: sext_8i6_to_8i64:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6210,35 +6210,29 @@ define <8 x i64> @sext_8i6_to_8i64(i32 %x) nounwind uwtable readnone ssp {
 | 
				
			||||||
; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
					; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
				
			||||||
; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpsllw $10, %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpslld $26, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpsraw $10, %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpsrad $26, %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm1, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm1, %xmm2
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm2, %xmm2
 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm1, %xmm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm2, %xmm2
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovsxwq %xmm1, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpslld $26, %xmm0, %xmm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpsrad $26, %xmm0, %xmm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm2
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX2-LABEL: sext_8i6_to_8i64:
 | 
					; AVX2-LABEL: sext_8i6_to_8i64:
 | 
				
			||||||
; AVX2:       # %bb.0: # %entry
 | 
					; AVX2:       # %bb.0: # %entry
 | 
				
			||||||
; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
					; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					; AVX2-NEXT:    vpsllw $10, %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpslld $26, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpsraw $10, %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpsrad $26, %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpmovsxwq %xmm1, %ymm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovsxdq %xmm0, %ymm0
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					; AVX2-NEXT:    vpmovsxwq %xmm1, %ymm1
 | 
				
			||||||
; AVX2-NEXT:    vpslld $26, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpsrad $26, %xmm1, %xmm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovsxdq %xmm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX512-LABEL: sext_8i6_to_8i64:
 | 
					; AVX512-LABEL: sext_8i6_to_8i64:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2472,31 +2472,27 @@ define <8 x i64> @zext_8i6_to_8i64(i32 %x) nounwind uwtable readnone ssp {
 | 
				
			||||||
; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
					; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
				
			||||||
; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [63,63,63,63]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX2-LABEL: zext_8i6_to_8i64:
 | 
					; AVX2-LABEL: zext_8i6_to_8i64:
 | 
				
			||||||
; AVX2:       # %bb.0: # %entry
 | 
					; AVX2:       # %bb.0: # %entry
 | 
				
			||||||
; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
					; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
 | 
				
			||||||
; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [63,63,63,63]
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
 | 
				
			||||||
; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX512-LABEL: zext_8i6_to_8i64:
 | 
					; AVX512-LABEL: zext_8i6_to_8i64:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2486,31 +2486,27 @@ define <8 x i64> @zext_8i6_to_8i64(i32 %x) nounwind uwtable readnone ssp {
 | 
				
			||||||
; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
					; AVX1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 | 
				
			||||||
; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
					; AVX1-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
 | 
					; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm1
 | 
				
			||||||
; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
 | 
				
			||||||
 | 
					; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
					; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 | 
				
			||||||
; AVX1-NEXT:    vmovaps {{.*#+}} ymm2 = [63,63,63,63]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vandps %ymm2, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm3, %ymm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
 | 
					 | 
				
			||||||
; AVX1-NEXT:    retq
 | 
					; AVX1-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX2-LABEL: zext_8i6_to_8i64:
 | 
					; AVX2-LABEL: zext_8i6_to_8i64:
 | 
				
			||||||
; AVX2:       # %bb.0: # %entry
 | 
					; AVX2:       # %bb.0: # %entry
 | 
				
			||||||
; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
					; AVX2-NEXT:    vmovd %edi, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
					; AVX2-NEXT:    vpbroadcastw %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm1
 | 
					; AVX2-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm1
 | 
				
			||||||
; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 | 
					; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
 | 
				
			||||||
; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [63,63,63,63]
 | 
					; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
 | 
				
			||||||
; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
 | 
					; AVX2-NEXT:    vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
 | 
				
			||||||
; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
 | 
					 | 
				
			||||||
; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
 | 
					 | 
				
			||||||
; AVX2-NEXT:    retq
 | 
					; AVX2-NEXT:    retq
 | 
				
			||||||
;
 | 
					;
 | 
				
			||||||
; AVX512-LABEL: zext_8i6_to_8i64:
 | 
					; AVX512-LABEL: zext_8i6_to_8i64:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue