985 lines
		
	
	
		
			48 KiB
		
	
	
	
		
			LLVM
		
	
	
	
			
		
		
	
	
			985 lines
		
	
	
		
			48 KiB
		
	
	
	
		
			LLVM
		
	
	
	
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --force-update
 | |
| ; RUN: llc -verify-machineinstrs -mcpu=sapphirerapids -mattr=+false-deps-range -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=ENABLE
 | |
| ; RUN: llc -verify-machineinstrs -mcpu=sapphirerapids -mattr=-false-deps-range -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=DISABLE
 | |
| 
 | |
| define <4 x float> @rangeps_128(<4 x float> %a0, <4 x float> %a1) {
 | |
| ; ENABLE-LABEL: rangeps_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangeps $88, %xmm2, %xmm0, %xmm1
 | |
| ; ENABLE-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, %xmm2, %xmm0, %xmm1
 | |
| ; DISABLE-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <4 x float> @llvm.x86.avx512.mask.range.ps.128(<4 x float> %a0, <4 x float> %a1, i32 88, <4 x float> undef, i8 -1)
 | |
|   %3 = fadd <4 x float> %a0, %a1
 | |
|   %res = fadd <4 x float> %2, %3
 | |
|   ret <4 x float> %res
 | |
| }
 | |
| 
 | |
| define <4 x float> @rangeps_mem_128(<4 x float> %a0, <4 x float>* %p1) {
 | |
| ; ENABLE-LABEL: rangeps_mem_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangeps $88, (%rdi), %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_mem_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, (%rdi), %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <4 x float>, <4 x float>* %p1, align 64
 | |
|   %2 = call <4 x float> @llvm.x86.avx512.mask.range.ps.128(<4 x float> %a0, <4 x float> %a1, i32 88, <4 x float> undef, i8 -1)
 | |
|   %res = fadd <4 x float> %2, %a0
 | |
|   ret <4 x float> %res
 | |
| }
 | |
| 
 | |
| define <4 x float> @rangeps_broadcast_128(<4 x float> %a0, float* %p1) {
 | |
| ; ENABLE-LABEL: rangeps_broadcast_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangeps $88, (%rdi){1to4}, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_broadcast_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, (%rdi){1to4}, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %v1 = load float, float* %p1, align 4
 | |
|   %t0 = insertelement <4 x float> undef, float %v1, i64 0
 | |
|   %a1 = shufflevector <4 x float> %t0, <4 x float> undef, <4 x i32> zeroinitializer
 | |
|   %2 = call <4 x float> @llvm.x86.avx512.mask.range.ps.128(<4 x float> %a0, <4 x float> %a1, i32 88, <4 x float> undef, i8 -1)
 | |
|   %res = fadd <4 x float> %2, %a0
 | |
|   ret <4 x float> %res
 | |
| }
 | |
| 
 | |
| define <4 x float> @rangeps_maskz_128(<4 x float> %a0, <4 x float> %a1, i8* %pmask) {
 | |
| ; ENABLE-LABEL: rangeps_maskz_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangeps $88, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_maskz_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i8, i8* %pmask
 | |
|   %2 = call <4 x float> @llvm.x86.avx512.mask.range.ps.128(<4 x float> %a0, <4 x float> %a1, i32 88, <4 x float> undef, i8 %mask)
 | |
|   %3 = fadd <4 x float> %a0, %a1
 | |
|   %res = fadd <4 x float> %2, %3
 | |
|   ret <4 x float> %res
 | |
| }
 | |
| 
 | |
| declare <4 x float> @llvm.x86.avx512.mask.range.ps.128(<4 x float>, <4 x float>, i32, <4 x float>, i8) nounwind readnone
 | |
| 
 | |
| define <8 x float> @rangeps_256(<8 x float> %a0, <8 x float> %a1) {
 | |
| ; ENABLE-LABEL: rangeps_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangeps $88, %ymm2, %ymm0, %ymm1
 | |
| ; ENABLE-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    vaddps %ymm0, %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, %ymm2, %ymm0, %ymm1
 | |
| ; DISABLE-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    vaddps %ymm0, %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <8 x float> @llvm.x86.avx512.mask.range.ps.256(<8 x float> %a0, <8 x float> %a1, i32 88, <8 x float> undef, i8 -1)
 | |
|   %3 = fadd <8 x float> %a0, %a1
 | |
|   %res = fadd <8 x float> %2, %3
 | |
|   ret <8 x float> %res
 | |
| }
 | |
| 
 | |
| define <8 x float> @rangeps_mem_256(<8 x float> %a0, <8 x float>* %p1) {
 | |
| ; ENABLE-LABEL: rangeps_mem_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangeps $88, (%rdi), %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_mem_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, (%rdi), %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <8 x float>, <8 x float>* %p1, align 64
 | |
|   %2 = call <8 x float> @llvm.x86.avx512.mask.range.ps.256(<8 x float> %a0, <8 x float> %a1, i32 88, <8 x float> undef, i8 -1)
 | |
|   %res = fadd <8 x float> %2, %a0
 | |
|   ret <8 x float> %res
 | |
| }
 | |
| 
 | |
| define <8 x float> @rangeps_broadcast_256(<8 x float> %a0, float* %p1) {
 | |
| ; ENABLE-LABEL: rangeps_broadcast_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangeps $88, (%rdi){1to8}, %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_broadcast_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, (%rdi){1to8}, %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    vaddps %ymm1, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %v1 = load float, float* %p1, align 4
 | |
|   %t0 = insertelement <8 x float> undef, float %v1, i64 0
 | |
|   %a1 = shufflevector <8 x float> %t0, <8 x float> undef, <8 x i32> zeroinitializer
 | |
|   %2 = call <8 x float> @llvm.x86.avx512.mask.range.ps.256(<8 x float> %a0, <8 x float> %a1, i32 88, <8 x float> undef, i8 -1)
 | |
|   %res = fadd <8 x float> %2, %a0
 | |
|   ret <8 x float> %res
 | |
| }
 | |
| 
 | |
| define <8 x float> @rangeps_maskz_256(<8 x float> %a0, <8 x float> %a1, i8* %pmask) {
 | |
| ; ENABLE-LABEL: rangeps_maskz_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangeps $44, %ymm2, %ymm0, %ymm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    vaddps %ymm0, %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_maskz_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $44, %ymm2, %ymm0, %ymm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    vaddps %ymm0, %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i8, i8* %pmask
 | |
|   %2 = call <8 x float> @llvm.x86.avx512.mask.range.ps.256(<8 x float> %a0, <8 x float> %a1, i32 44, <8 x float> undef, i8 %mask)
 | |
|   %3 = fadd <8 x float> %a0, %a1
 | |
|   %res = fadd <8 x float> %2, %3
 | |
|   ret <8 x float> %res
 | |
| }
 | |
| 
 | |
| declare <8 x float> @llvm.x86.avx512.mask.range.ps.256(<8 x float>, <8 x float>, i32, <8 x float>, i8) nounwind readnone
 | |
| 
 | |
| define <16 x float> @rangeps_512(<16 x float> %a0, <16 x float> %a1) {
 | |
| ; ENABLE-LABEL: rangeps_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangeps $88, %zmm2, %zmm0, %zmm1
 | |
| ; ENABLE-NEXT:    vaddps %zmm2, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    vaddps %zmm0, %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, %zmm2, %zmm0, %zmm1
 | |
| ; DISABLE-NEXT:    vaddps %zmm2, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    vaddps %zmm0, %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <16 x float> @llvm.x86.avx512.mask.range.ps.512(<16 x float> %a0, <16 x float> %a1, i32 88, <16 x float> undef, i16 -1, i32 4)
 | |
|   %3 = fadd <16 x float> %a0, %a1
 | |
|   %res = fadd <16 x float> %2, %3
 | |
|   ret <16 x float> %res
 | |
| }
 | |
| 
 | |
| define <16 x float> @rangeps_mem_512(<16 x float> %a0, <16 x float>* %p1) {
 | |
| ; ENABLE-LABEL: rangeps_mem_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangeps $88, (%rdi), %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    vaddps %zmm1, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_mem_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, (%rdi), %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    vaddps %zmm1, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <16 x float>, <16 x float>* %p1, align 64
 | |
|   %2 = call <16 x float> @llvm.x86.avx512.mask.range.ps.512(<16 x float> %a0, <16 x float> %a1, i32 88, <16 x float> undef, i16 -1, i32 4)
 | |
|   %res = fadd <16 x float> %2, %a0
 | |
|   ret <16 x float> %res
 | |
| }
 | |
| 
 | |
| define <16 x float> @rangeps_broadcast_512(<16 x float> %a0, float* %p1) {
 | |
| ; ENABLE-LABEL: rangeps_broadcast_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangeps $88, (%rdi){1to16}, %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    vaddps %zmm1, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_broadcast_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, (%rdi){1to16}, %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    vaddps %zmm1, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %v1 = load float, float* %p1, align 4
 | |
|   %t0 = insertelement <16 x float> undef, float %v1, i64 0
 | |
|   %a1 = shufflevector <16 x float> %t0, <16 x float> undef, <16 x i32> zeroinitializer
 | |
|   %2 = call <16 x float> @llvm.x86.avx512.mask.range.ps.512(<16 x float> %a0, <16 x float> %a1, i32 88, <16 x float> undef, i16 -1, i32 4)
 | |
|   %res = fadd <16 x float> %2, %a0
 | |
|   ret <16 x float> %res
 | |
| }
 | |
| 
 | |
| define <16 x float> @rangeps_maskz_512(<16 x float> %a0, <16 x float> %a1, i16* %pmask) {
 | |
| ; ENABLE-LABEL: rangeps_maskz_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovw (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangeps $88, %zmm2, %zmm0, %zmm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddps %zmm2, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    vaddps %zmm0, %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangeps_maskz_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovw (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangeps $88, %zmm2, %zmm0, %zmm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddps %zmm2, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    vaddps %zmm0, %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i16, i16* %pmask
 | |
|   %2 = call <16 x float> @llvm.x86.avx512.mask.range.ps.512(<16 x float> %a0, <16 x float> %a1, i32 88, <16 x float> undef, i16 %mask, i32 4)
 | |
|   %3 = fadd <16 x float> %a0, %a1
 | |
|   %res = fadd <16 x float> %2, %3
 | |
|   ret <16 x float> %res
 | |
| }
 | |
| 
 | |
| declare <16 x float> @llvm.x86.avx512.mask.range.ps.512(<16 x float>, <16 x float>, i32, <16 x float>, i16, i32) nounwind readnone
 | |
| 
 | |
| 
 | |
| define <2 x double> @rangepd_128(<2 x double> %a0, <2 x double> %a1) {
 | |
| ; ENABLE-LABEL: rangepd_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangepd $88, %xmm2, %xmm0, %xmm1
 | |
| ; ENABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, %xmm2, %xmm0, %xmm1
 | |
| ; DISABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <2 x double> @llvm.x86.avx512.mask.range.pd.128(<2 x double> %a0, <2 x double> %a1, i32 88, <2 x double> undef, i8 -1)
 | |
|   %3 = fadd <2 x double> %a0, %a1
 | |
|   %res = fadd <2 x double> %2, %3
 | |
|   ret <2 x double> %res
 | |
| }
 | |
| 
 | |
| define <2 x double> @rangepd_mem_128(<2 x double> %a0, <2 x double>* %p1) {
 | |
| ; ENABLE-LABEL: rangepd_mem_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangepd $88, (%rdi), %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_mem_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, (%rdi), %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <2 x double>, <2 x double>* %p1, align 64
 | |
|   %2 = call <2 x double> @llvm.x86.avx512.mask.range.pd.128(<2 x double> %a0, <2 x double> %a1, i32 88, <2 x double> undef, i8 -1)
 | |
|   %res = fadd <2 x double> %2, %a0
 | |
|   ret <2 x double> %res
 | |
| }
 | |
| 
 | |
| define <2 x double> @rangepd_broadcast_128(<2 x double> %a0, double* %p1) {
 | |
| ; ENABLE-LABEL: rangepd_broadcast_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangepd $88, (%rdi){1to2}, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_broadcast_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, (%rdi){1to2}, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %v1 = load double, double* %p1, align 4
 | |
|   %t0 = insertelement <2 x double> undef, double %v1, i64 0
 | |
|   %a1 = shufflevector <2 x double> %t0, <2 x double> undef, <2 x i32> zeroinitializer
 | |
|   %2 = call <2 x double> @llvm.x86.avx512.mask.range.pd.128(<2 x double> %a0, <2 x double> %a1, i32 88, <2 x double> undef, i8 -1)
 | |
|   %res = fadd <2 x double> %2, %a0
 | |
|   ret <2 x double> %res
 | |
| }
 | |
| 
 | |
| define <2 x double> @rangepd_maskz_128(<2 x double> %a0, <2 x double> %a1, i8* %pmask) {
 | |
| ; ENABLE-LABEL: rangepd_maskz_128:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangepd $88, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_maskz_128:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i8, i8* %pmask
 | |
|   %2 = call <2 x double> @llvm.x86.avx512.mask.range.pd.128(<2 x double> %a0, <2 x double> %a1, i32 88, <2 x double> undef, i8 %mask)
 | |
|   %3 = fadd <2 x double> %a0, %a1
 | |
|   %res = fadd <2 x double> %2, %3
 | |
|   ret <2 x double> %res
 | |
| }
 | |
| 
 | |
| declare <2 x double> @llvm.x86.avx512.mask.range.pd.128(<2 x double>, <2 x double>, i32, <2 x double>, i8) nounwind readnone
 | |
| 
 | |
| define <4 x double> @rangepd_256(<4 x double> %a0, <4 x double> %a1) {
 | |
| ; ENABLE-LABEL: rangepd_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangepd $88, %ymm2, %ymm0, %ymm1
 | |
| ; ENABLE-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, %ymm2, %ymm0, %ymm1
 | |
| ; DISABLE-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <4 x double> @llvm.x86.avx512.mask.range.pd.256(<4 x double> %a0, <4 x double> %a1, i32 88, <4 x double> undef, i8 -1)
 | |
|   %3 = fadd <4 x double> %a0, %a1
 | |
|   %res = fadd <4 x double> %2, %3
 | |
|   ret <4 x double> %res
 | |
| }
 | |
| 
 | |
| define <4 x double> @rangepd_mem_256(<4 x double> %a0, <4 x double>* %p1) {
 | |
| ; ENABLE-LABEL: rangepd_mem_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangepd $88, (%rdi), %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_mem_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, (%rdi), %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <4 x double>, <4 x double>* %p1, align 64
 | |
|   %2 = call <4 x double> @llvm.x86.avx512.mask.range.pd.256(<4 x double> %a0, <4 x double> %a1, i32 88, <4 x double> undef, i8 -1)
 | |
|   %res = fadd <4 x double> %2, %a0
 | |
|   ret <4 x double> %res
 | |
| }
 | |
| 
 | |
| define <4 x double> @rangepd_broadcast_256(<4 x double> %a0, double* %p1) {
 | |
| ; ENABLE-LABEL: rangepd_broadcast_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangepd $88, (%rdi){1to4}, %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_broadcast_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, (%rdi){1to4}, %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %v1 = load double, double* %p1, align 4
 | |
|   %t0 = insertelement <4 x double> undef, double %v1, i64 0
 | |
|   %a1 = shufflevector <4 x double> %t0, <4 x double> undef, <4 x i32> zeroinitializer
 | |
|   %2 = call <4 x double> @llvm.x86.avx512.mask.range.pd.256(<4 x double> %a0, <4 x double> %a1, i32 88, <4 x double> undef, i8 -1)
 | |
|   %res = fadd <4 x double> %2, %a0
 | |
|   ret <4 x double> %res
 | |
| }
 | |
| 
 | |
| define <4 x double> @rangepd_maskz_256(<4 x double> %a0, <4 x double> %a1, i8* %pmask) {
 | |
| ; ENABLE-LABEL: rangepd_maskz_256:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangepd $88, %ymm2, %ymm0, %ymm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 | |
| ; ENABLE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_maskz_256:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, %ymm2, %ymm0, %ymm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddpd %ymm2, %ymm0, %ymm0
 | |
| ; DISABLE-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i8, i8* %pmask
 | |
|   %2 = call <4 x double> @llvm.x86.avx512.mask.range.pd.256(<4 x double> %a0, <4 x double> %a1, i32 88, <4 x double> undef, i8 %mask)
 | |
|   %3 = fadd <4 x double> %a0, %a1
 | |
|   %res = fadd <4 x double> %2, %3
 | |
|   ret <4 x double> %res
 | |
| }
 | |
| 
 | |
| declare <4 x double> @llvm.x86.avx512.mask.range.pd.256(<4 x double>, <4 x double>, i32, <4 x double>, i8) nounwind readnone
 | |
| 
 | |
| define <8 x double> @rangepd_512(<8 x double> %a0, <8 x double> %a1) {
 | |
| ; ENABLE-LABEL: rangepd_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangepd $88, %zmm2, %zmm0, %zmm1
 | |
| ; ENABLE-NEXT:    vaddpd %zmm2, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, %zmm2, %zmm0, %zmm1
 | |
| ; DISABLE-NEXT:    vaddpd %zmm2, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <8 x double> @llvm.x86.avx512.mask.range.pd.512(<8 x double> %a0, <8 x double> %a1, i32 88, <8 x double> undef, i8 -1, i32 4)
 | |
|   %3 = fadd <8 x double> %a0, %a1
 | |
|   %res = fadd <8 x double> %2, %3
 | |
|   ret <8 x double> %res
 | |
| }
 | |
| 
 | |
| define <8 x double> @rangepd_mem_512(<8 x double> %a0, <8 x double>* %p1) {
 | |
| ; ENABLE-LABEL: rangepd_mem_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangepd $88, (%rdi), %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_mem_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, (%rdi), %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <8 x double>, <8 x double>* %p1, align 64
 | |
|   %2 = call <8 x double> @llvm.x86.avx512.mask.range.pd.512(<8 x double> %a0, <8 x double> %a1, i32 88, <8 x double> undef, i8 -1, i32 4)
 | |
|   %res = fadd <8 x double> %2, %a0
 | |
|   ret <8 x double> %res
 | |
| }
 | |
| 
 | |
| define <8 x double> @rangepd_broadcast_512(<8 x double> %a0, double* %p1) {
 | |
| ; ENABLE-LABEL: rangepd_broadcast_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangepd $88, (%rdi){1to8}, %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_broadcast_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, (%rdi){1to8}, %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %v1 = load double, double* %p1, align 4
 | |
|   %t0 = insertelement <8 x double> undef, double %v1, i64 0
 | |
|   %a1 = shufflevector <8 x double> %t0, <8 x double> undef, <8 x i32> zeroinitializer
 | |
|   %2 = call <8 x double> @llvm.x86.avx512.mask.range.pd.512(<8 x double> %a0, <8 x double> %a1, i32 88, <8 x double> undef, i8 -1, i32 4)
 | |
|   %res = fadd <8 x double> %2, %a0
 | |
|   ret <8 x double> %res
 | |
| }
 | |
| 
 | |
| define <8 x double> @rangepd_maskz_512(<8 x double> %a0, <8 x double> %a1, i8* %pmask) {
 | |
| ; ENABLE-LABEL: rangepd_maskz_512:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; ENABLE-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangepd $88, %zmm2, %zmm0, %zmm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddpd %zmm2, %zmm0, %zmm0
 | |
| ; ENABLE-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangepd_maskz_512:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 | |
| ; DISABLE-NEXT:    vrangepd $88, %zmm2, %zmm0, %zmm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddpd %zmm2, %zmm0, %zmm0
 | |
| ; DISABLE-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i8, i8* %pmask
 | |
|   %2 = call <8 x double> @llvm.x86.avx512.mask.range.pd.512(<8 x double> %a0, <8 x double> %a1, i32 88, <8 x double> undef, i8 %mask, i32 4)
 | |
|   %3 = fadd <8 x double> %a0, %a1
 | |
|   %res = fadd <8 x double> %2, %3
 | |
|   ret <8 x double> %res
 | |
| }
 | |
| 
 | |
| declare <8 x double> @llvm.x86.avx512.mask.range.pd.512(<8 x double>, <8 x double>, i32, <8 x double>, i8, i32) nounwind readnone
 | |
| 
 | |
| define <4 x float> @rangess(<4 x float> %a0, <4 x float> %a1) {
 | |
| ; ENABLE-LABEL: rangess:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangess $4, %xmm2, %xmm0, %xmm1
 | |
| ; ENABLE-NEXT:    vaddps %xmm0, %xmm2, %xmm0
 | |
| ; ENABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangess:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangess $4, %xmm2, %xmm0, %xmm1
 | |
| ; DISABLE-NEXT:    vaddps %xmm0, %xmm2, %xmm0
 | |
| ; DISABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4, i32 4)
 | |
|   %3 = fadd <4 x float> %a1, %a0
 | |
|   %res = fadd <4 x float> %2, %3
 | |
|   ret <4 x float> %res
 | |
| }
 | |
| 
 | |
| define <4 x float> @rangess_mem(<4 x float> %a0, <4 x float>* %p1) {
 | |
| ; ENABLE-LABEL: rangess_mem:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangess $4, (%rdi), %xmm0, %xmm1
 | |
| ; ENABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangess_mem:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vrangess $4, (%rdi), %xmm0, %xmm1
 | |
| ; DISABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <4 x float>, <4 x float>* %p1, align 64
 | |
|   %2 = call <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4, i32 4)
 | |
|   %res = fadd <4 x float> %2, %a0
 | |
|   ret <4 x float> %res
 | |
| }
 | |
| 
 | |
| define <4 x float> @rangess_maskz(<4 x float> %a0, <4 x float> %a1, i8* %pmask) {
 | |
| ; ENABLE-LABEL: rangess_maskz:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangess $4, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangess_maskz:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangess $4, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i8, i8* %pmask
 | |
|   %2 = call <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 %mask, i32 4, i32 4)
 | |
|   %3 = fadd <4 x float> %a0, %a1
 | |
|   %res = fadd <4 x float> %2, %3
 | |
|   ret <4 x float> %res
 | |
| }
 | |
| 
 | |
| declare <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32, i32)
 | |
| 
 | |
| define <2 x double> @rangesd(<2 x double> %a0, <2 x double> %a1) {
 | |
| ; ENABLE-LABEL: rangesd:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangesd $4, %xmm2, %xmm0, %xmm1
 | |
| ; ENABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangesd:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangesd $4, %xmm2, %xmm0, %xmm1
 | |
| ; DISABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %2 = call <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> undef, i8 -1, i32 4, i32 4)
 | |
|   %3 = fadd <2 x double> %a0, %a1
 | |
|   %res = fadd <2 x double> %2, %3
 | |
|   ret <2 x double> %res
 | |
| }
 | |
| 
 | |
| define <2 x double> @rangesd_mem(<2 x double> %a0, <2 x double>* %p1) {
 | |
| ; ENABLE-LABEL: rangesd_mem:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vrangesd $4, (%rdi), %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangesd_mem:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangesd $4, (%rdi), %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %a1 = load <2 x double>, <2 x double>* %p1, align 64
 | |
|   %2 = call <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> undef, i8 -1, i32 4, i32 4)
 | |
|   %res = fadd <2 x double> %2, %a0
 | |
|   ret <2 x double> %res
 | |
| }
 | |
| 
 | |
| define <2 x double> @rangesd_maskz(<2 x double> %a0, <2 x double> %a1, i8* %pmask) {
 | |
| ; ENABLE-LABEL: rangesd_maskz:
 | |
| ; ENABLE:       # %bb.0:
 | |
| ; ENABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; ENABLE-NEXT:    #APP
 | |
| ; ENABLE-NEXT:    nop
 | |
| ; ENABLE-NEXT:    #NO_APP
 | |
| ; ENABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; ENABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; ENABLE-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 | |
| ; ENABLE-NEXT:    vrangesd $4, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; ENABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; ENABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; ENABLE-NEXT:    retq
 | |
| ;
 | |
| ; DISABLE-LABEL: rangesd_maskz:
 | |
| ; DISABLE:       # %bb.0:
 | |
| ; DISABLE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 | |
| ; DISABLE-NEXT:    #APP
 | |
| ; DISABLE-NEXT:    nop
 | |
| ; DISABLE-NEXT:    #NO_APP
 | |
| ; DISABLE-NEXT:    kmovb (%rdi), %k1
 | |
| ; DISABLE-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 | |
| ; DISABLE-NEXT:    vrangesd $4, %xmm2, %xmm0, %xmm1 {%k1} {z}
 | |
| ; DISABLE-NEXT:    vaddpd %xmm2, %xmm0, %xmm0
 | |
| ; DISABLE-NEXT:    vaddpd %xmm0, %xmm1, %xmm0
 | |
| ; DISABLE-NEXT:    retq
 | |
|   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
 | |
|   %mask = load i8, i8* %pmask
 | |
|   %2 = call <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> undef, i8 %mask, i32 4, i32 4)
 | |
|   %3 = fadd <2 x double> %a0, %a1
 | |
|   %res = fadd <2 x double> %2, %3
 | |
|   ret <2 x double> %res
 | |
| }
 | |
| 
 | |
| declare <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32, i32)
 |