diff --git a/llvm/test/CodeGen/X86/vector-shuffle-masked.ll b/llvm/test/CodeGen/X86/vector-shuffle-masked.ll index 2837c28a4841..60dc30afa7de 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-masked.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-masked.ll @@ -236,3 +236,453 @@ define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) { %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> zeroinitializer ret <8 x i32> %res } + +define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v4i32_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti32x4 $0, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v4i32_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x i32> @mask_extract_v16i32_v4i32_2(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v4i32_2: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x i32> @mask_extract_v16i32_v4i32_3(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v4i32_3: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti32x4 $3, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v4f32_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf32x4 $0, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> %passthru + ret <4 x float> %res +} + +define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v4f32_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> %passthru + ret <4 x float> %res +} + +define <4 x float> @mask_extract_v16f32_v4f32_2(<16 x float> %a, <4 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v4f32_2: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> %passthru + ret <4 x float> %res +} + +define <4 x float> @mask_extract_v16f32_v4f32_3(<16 x float> %a, <4 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v4f32_3: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> %passthru + ret <4 x float> %res +} + +define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v8i32_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti32x8 $0, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> %passthru + ret <8 x i32> %res +} + +define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v8i32_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> %passthru + ret <8 x i32> %res +} + +define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v8f32_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf32x8 $0, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x float> %shuffle, <8 x float> %passthru + ret <8 x float> %res +} + +define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v8f32_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x float> %shuffle, <8 x float> %passthru + ret <8 x float> %res +} + +define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v2i64_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti64x2 $0, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v2i64_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x i64> @mask_extract_v8i64_v2i64_2(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v2i64_2: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti64x2 $2, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x i64> @mask_extract_v8i64_v2i64_3(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v2i64_3: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti64x2 $3, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v2f64_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf64x2 $0, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> %passthru + ret <2 x double> %res +} + +define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v2f64_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> %passthru + ret <2 x double> %res +} + +define <2 x double> @mask_extract_v8f64_v2f64_2(<8 x double> %a, <2 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v2f64_2: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf64x2 $2, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> %passthru + ret <2 x double> %res +} + +define <2 x double> @mask_extract_v8f64_v2f64_3(<8 x double> %a, <2 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v2f64_3: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf64x2 $3, %zmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> %passthru + ret <2 x double> %res +} + +define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v4i64_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti64x4 $0, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> %passthru + ret <4 x i64> %res +} + +define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v4i64_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> %passthru + ret <4 x i64> %res +} + +define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v4f64_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf64x4 $0, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x double> %shuffle, <4 x double> %passthru + ret <4 x double> %res +} + +define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v4f64_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x double> %shuffle, <4 x double> %passthru + ret <4 x double> %res +} + +define <8 x i32> @mask_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v8i32_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> + %shuffle.cast = bitcast <4 x i64> %shuffle to <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle.cast, <8 x i32> %passthru + ret <8 x i32> %res +} + +define <8 x float> @mask_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v8f32_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> + %shuffle.cast = bitcast <4 x double> %shuffle to <8 x float> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x float> %shuffle.cast, <8 x float> %passthru + ret <8 x float> %res +} + +define <4 x i32> @mask_cast_extract_v8i64_v4i32_1(<8 x i64> %a, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v8i64_v4i32_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> + %shuffle.cast = bitcast <2 x i64> %shuffle to <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle.cast, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x float> @mask_cast_extract_v8f64_v4f32_1(<8 x double> %a, <4 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v8f64_v4f32_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> + %shuffle.cast = bitcast <2 x double> %shuffle to <4 x float> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle.cast, <4 x float> %passthru + ret <4 x float> %res +} + +define <4 x i64> @mask_cast_extract_v16i32_v4i64_1(<16 x i32> %a, <4 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v16i32_v4i64_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> + %shuffle.cast = bitcast <8 x i32> %shuffle to <4 x i64> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle.cast, <4 x i64> %passthru + ret <4 x i64> %res +} + +define <4 x double> @mask_cast_extract_v16f32_v4f64_1(<16 x float> %a, <4 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v16f32_v4f64_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> + %shuffle.cast = bitcast <8 x float> %shuffle to <4 x double> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x double> %shuffle.cast, <4 x double> %passthru + ret <4 x double> %res +} + +define <2 x i64> @mask_cast_extract_v16i32_v2i64_1(<16 x i32> %a, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v16i32_v2i64_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> + %shuffle.cast = bitcast <4 x i32> %shuffle to <2 x i64> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle.cast, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x double> @mask_cast_extract_v16f32_v2f64_1(<16 x float> %a, <2 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v16f32_v2f64_1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm0 +; CHECK-NEXT: kmovb %edi, %k1 +; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> + %shuffle.cast = bitcast <4 x float> %shuffle to <2 x double> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle.cast, <2 x double> %passthru + ret <2 x double> %res +}