514 lines
23 KiB
LLVM
514 lines
23 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f | FileCheck %s --check-prefix=AVX512
|
|
; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
|
|
; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 | FileCheck %s --check-prefix=SSE2
|
|
|
|
@buf = dso_local global [1024 x i8] zeroinitializer, align 16
|
|
@buf2 = dso_local global [1024 x i8] zeroinitializer, align 16
|
|
|
|
; Function Attrs: nounwind uwtable
|
|
define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) local_unnamed_addr {
|
|
; AVX512-LABEL: test_api:
|
|
; AVX512: # %bb.0: # %entry
|
|
; AVX512-NEXT: pushq %rbp
|
|
; AVX512-NEXT: .cfi_def_cfa_offset 16
|
|
; AVX512-NEXT: .cfi_offset %rbp, -16
|
|
; AVX512-NEXT: movq %rsp, %rbp
|
|
; AVX512-NEXT: .cfi_def_cfa_register %rbp
|
|
; AVX512-NEXT: andq $-1024, %rsp # imm = 0xFC00
|
|
; AVX512-NEXT: subq $6144, %rsp # imm = 0x1800
|
|
; AVX512-NEXT: movw %dx, %ax
|
|
; AVX512-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
|
|
; AVX512-NEXT: movw %si, %ax
|
|
; AVX512-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
|
|
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX512-NEXT: cmpl $0, %edi
|
|
; AVX512-NEXT: je .LBB0_2
|
|
; AVX512-NEXT: # %bb.1: # %if.then
|
|
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %al, %sil
|
|
; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movl $buf, %r9d
|
|
; AVX512-NEXT: movl $32, %r10d
|
|
; AVX512-NEXT: movw $8, %si
|
|
; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX512-NEXT: movl $64, %r8d
|
|
; AVX512-NEXT: tilestored %tmm0, (%r11,%r8)
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX512-NEXT: tilestored %tmm0, (%rdi,%r8)
|
|
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %al, %dil
|
|
; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg (%rsi)
|
|
; AVX512-NEXT: movl $buf, %esi
|
|
; AVX512-NEXT: movl $32, %edi
|
|
; AVX512-NEXT: tileloadd (%rsi,%rdi), %tmm0
|
|
; AVX512-NEXT: movl $64, %esi
|
|
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX512-NEXT: jmp .LBB0_3
|
|
; AVX512-NEXT: .LBB0_2: # %if.else
|
|
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %al, %sil
|
|
; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movl $buf2, %r9d
|
|
; AVX512-NEXT: movl $32, %r10d
|
|
; AVX512-NEXT: movw $8, %si
|
|
; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX512-NEXT: movl $64, %r8d
|
|
; AVX512-NEXT: tilestored %tmm0, (%r11,%r8)
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX512-NEXT: tilestored %tmm0, (%rdi,%r8)
|
|
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %al, %dil
|
|
; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg (%rsi)
|
|
; AVX512-NEXT: movl $buf2, %esi
|
|
; AVX512-NEXT: movl $32, %edi
|
|
; AVX512-NEXT: tileloadd (%rsi,%rdi), %tmm0
|
|
; AVX512-NEXT: movl $64, %esi
|
|
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX512-NEXT: .LBB0_3: # %if.end
|
|
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; AVX512-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %al, %sil
|
|
; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movl $64, %esi
|
|
; AVX512-NEXT: movw $8, %di
|
|
; AVX512-NEXT: tileloadd (%r10,%rsi), %tmm1
|
|
; AVX512-NEXT: tileloadd (%r9,%rsi), %tmm2
|
|
; AVX512-NEXT: tileloadd (%r8,%rsi), %tmm0
|
|
; AVX512-NEXT: tdpbssd %tmm2, %tmm1, %tmm0
|
|
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; AVX512-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movb %al, %dil
|
|
; AVX512-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX512-NEXT: ldtilecfg (%rsi)
|
|
; AVX512-NEXT: movl $64, %esi
|
|
; AVX512-NEXT: tileloadd (%rdx,%rsi), %tmm0
|
|
; AVX512-NEXT: movl $buf, %edx
|
|
; AVX512-NEXT: movl $32, %esi
|
|
; AVX512-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX512-NEXT: movq %rbp, %rsp
|
|
; AVX512-NEXT: popq %rbp
|
|
; AVX512-NEXT: .cfi_def_cfa %rsp, 8
|
|
; AVX512-NEXT: tilerelease
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_api:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: pushq %rbp
|
|
; AVX2-NEXT: .cfi_def_cfa_offset 16
|
|
; AVX2-NEXT: .cfi_offset %rbp, -16
|
|
; AVX2-NEXT: movq %rsp, %rbp
|
|
; AVX2-NEXT: .cfi_def_cfa_register %rbp
|
|
; AVX2-NEXT: andq $-1024, %rsp # imm = 0xFC00
|
|
; AVX2-NEXT: subq $6144, %rsp # imm = 0x1800
|
|
; AVX2-NEXT: movw %dx, %ax
|
|
; AVX2-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
|
|
; AVX2-NEXT: movw %si, %ax
|
|
; AVX2-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
|
|
; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; AVX2-NEXT: cmpl $0, %edi
|
|
; AVX2-NEXT: je .LBB0_2
|
|
; AVX2-NEXT: # %bb.1: # %if.then
|
|
; AVX2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; AVX2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %al, %sil
|
|
; AVX2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movl $buf, %r9d
|
|
; AVX2-NEXT: movl $32, %r10d
|
|
; AVX2-NEXT: movw $8, %si
|
|
; AVX2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX2-NEXT: movl $64, %r8d
|
|
; AVX2-NEXT: tilestored %tmm0, (%r11,%r8)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX2-NEXT: tilestored %tmm0, (%rdi,%r8)
|
|
; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %al, %dil
|
|
; AVX2-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg (%rsi)
|
|
; AVX2-NEXT: movl $buf, %esi
|
|
; AVX2-NEXT: movl $32, %edi
|
|
; AVX2-NEXT: tileloadd (%rsi,%rdi), %tmm0
|
|
; AVX2-NEXT: movl $64, %esi
|
|
; AVX2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX2-NEXT: jmp .LBB0_3
|
|
; AVX2-NEXT: .LBB0_2: # %if.else
|
|
; AVX2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; AVX2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %al, %sil
|
|
; AVX2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movl $buf2, %r9d
|
|
; AVX2-NEXT: movl $32, %r10d
|
|
; AVX2-NEXT: movw $8, %si
|
|
; AVX2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX2-NEXT: movl $64, %r8d
|
|
; AVX2-NEXT: tilestored %tmm0, (%r11,%r8)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; AVX2-NEXT: tilestored %tmm0, (%rdi,%r8)
|
|
; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %al, %dil
|
|
; AVX2-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg (%rsi)
|
|
; AVX2-NEXT: movl $buf2, %esi
|
|
; AVX2-NEXT: movl $32, %edi
|
|
; AVX2-NEXT: tileloadd (%rsi,%rdi), %tmm0
|
|
; AVX2-NEXT: movl $64, %esi
|
|
; AVX2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX2-NEXT: .LBB0_3: # %if.end
|
|
; AVX2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; AVX2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %al, %sil
|
|
; AVX2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movl $64, %esi
|
|
; AVX2-NEXT: movw $8, %di
|
|
; AVX2-NEXT: tileloadd (%r10,%rsi), %tmm1
|
|
; AVX2-NEXT: tileloadd (%r9,%rsi), %tmm2
|
|
; AVX2-NEXT: tileloadd (%r8,%rsi), %tmm0
|
|
; AVX2-NEXT: tdpbssd %tmm2, %tmm1, %tmm0
|
|
; AVX2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movb %al, %dil
|
|
; AVX2-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; AVX2-NEXT: ldtilecfg (%rsi)
|
|
; AVX2-NEXT: movl $64, %esi
|
|
; AVX2-NEXT: tileloadd (%rdx,%rsi), %tmm0
|
|
; AVX2-NEXT: movl $buf, %edx
|
|
; AVX2-NEXT: movl $32, %esi
|
|
; AVX2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; AVX2-NEXT: movq %rbp, %rsp
|
|
; AVX2-NEXT: popq %rbp
|
|
; AVX2-NEXT: .cfi_def_cfa %rsp, 8
|
|
; AVX2-NEXT: tilerelease
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; SSE2-LABEL: test_api:
|
|
; SSE2: # %bb.0: # %entry
|
|
; SSE2-NEXT: pushq %rbp
|
|
; SSE2-NEXT: .cfi_def_cfa_offset 16
|
|
; SSE2-NEXT: .cfi_offset %rbp, -16
|
|
; SSE2-NEXT: movq %rsp, %rbp
|
|
; SSE2-NEXT: .cfi_def_cfa_register %rbp
|
|
; SSE2-NEXT: andq $-1024, %rsp # imm = 0xFC00
|
|
; SSE2-NEXT: subq $6144, %rsp # imm = 0x1800
|
|
; SSE2-NEXT: movw %dx, %ax
|
|
; SSE2-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
|
|
; SSE2-NEXT: movw %si, %ax
|
|
; SSE2-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
|
|
; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %rax
|
|
; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; SSE2-NEXT: cmpl $0, %edi
|
|
; SSE2-NEXT: je .LBB0_2
|
|
; SSE2-NEXT: # %bb.1: # %if.then
|
|
; SSE2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; SSE2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; SSE2-NEXT: xorps %xmm0, %xmm0
|
|
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %al, %sil
|
|
; SSE2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movl $buf, %r9d
|
|
; SSE2-NEXT: movl $32, %r10d
|
|
; SSE2-NEXT: movw $8, %si
|
|
; SSE2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; SSE2-NEXT: movl $64, %r8d
|
|
; SSE2-NEXT: tilestored %tmm0, (%r11,%r8)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; SSE2-NEXT: tilestored %tmm0, (%rdi,%r8)
|
|
; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %al, %dil
|
|
; SSE2-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg (%rsi)
|
|
; SSE2-NEXT: movl $buf, %esi
|
|
; SSE2-NEXT: movl $32, %edi
|
|
; SSE2-NEXT: tileloadd (%rsi,%rdi), %tmm0
|
|
; SSE2-NEXT: movl $64, %esi
|
|
; SSE2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; SSE2-NEXT: jmp .LBB0_3
|
|
; SSE2-NEXT: .LBB0_2: # %if.else
|
|
; SSE2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; SSE2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; SSE2-NEXT: xorps %xmm0, %xmm0
|
|
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %al, %sil
|
|
; SSE2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movl $buf2, %r9d
|
|
; SSE2-NEXT: movl $32, %r10d
|
|
; SSE2-NEXT: movw $8, %si
|
|
; SSE2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; SSE2-NEXT: movl $64, %r8d
|
|
; SSE2-NEXT: tilestored %tmm0, (%r11,%r8)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: tileloadd (%r9,%r10), %tmm0
|
|
; SSE2-NEXT: tilestored %tmm0, (%rdi,%r8)
|
|
; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %al, %dil
|
|
; SSE2-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg (%rsi)
|
|
; SSE2-NEXT: movl $buf2, %esi
|
|
; SSE2-NEXT: movl $32, %edi
|
|
; SSE2-NEXT: tileloadd (%rsi,%rdi), %tmm0
|
|
; SSE2-NEXT: movl $64, %esi
|
|
; SSE2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; SSE2-NEXT: .LBB0_3: # %if.end
|
|
; SSE2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %ax # 2-byte Reload
|
|
; SSE2-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %cx # 2-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; SSE2-NEXT: xorps %xmm0, %xmm0
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %al, %sil
|
|
; SSE2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %sil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movl $64, %esi
|
|
; SSE2-NEXT: movw $8, %di
|
|
; SSE2-NEXT: tileloadd (%r10,%rsi), %tmm1
|
|
; SSE2-NEXT: tileloadd (%r9,%rsi), %tmm2
|
|
; SSE2-NEXT: tileloadd (%r8,%rsi), %tmm0
|
|
; SSE2-NEXT: tdpbssd %tmm2, %tmm1, %tmm0
|
|
; SSE2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movb %al, %dil
|
|
; SSE2-NEXT: movb %dil, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: movw %cx, {{[0-9]+}}(%rsp)
|
|
; SSE2-NEXT: ldtilecfg (%rsi)
|
|
; SSE2-NEXT: movl $64, %esi
|
|
; SSE2-NEXT: tileloadd (%rdx,%rsi), %tmm0
|
|
; SSE2-NEXT: movl $buf, %edx
|
|
; SSE2-NEXT: movl $32, %esi
|
|
; SSE2-NEXT: tilestored %tmm0, (%rdx,%rsi)
|
|
; SSE2-NEXT: movq %rbp, %rsp
|
|
; SSE2-NEXT: popq %rbp
|
|
; SSE2-NEXT: .cfi_def_cfa %rsp, 8
|
|
; SSE2-NEXT: tilerelease
|
|
; SSE2-NEXT: retq
|
|
entry:
|
|
%tobool.not = icmp eq i32 %cond, 0
|
|
br i1 %tobool.not, label %if.else, label %if.then
|
|
|
|
if.then: ; preds = %entry
|
|
%0 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
|
|
%1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
|
|
%2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
|
|
br label %if.end
|
|
|
|
if.else: ; preds = %entry
|
|
%3 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
|
|
%4 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
|
|
%5 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
|
|
br label %if.end
|
|
|
|
if.end: ; preds = %if.else, %if.then
|
|
%a.sroa.1094.0.in = phi x86_amx [ %3, %if.else ], [ %0, %if.then ]
|
|
%b.sroa.1069.0.in = phi x86_amx [ %4, %if.else ], [ %1, %if.then ]
|
|
%c.sroa.1044.0.in = phi x86_amx [ %5, %if.else ], [ %2, %if.then ]
|
|
%6 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %row, i16 %col, i16 8, x86_amx %c.sroa.1044.0.in, x86_amx %a.sroa.1094.0.in, x86_amx %b.sroa.1069.0.in)
|
|
tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32, x86_amx %6)
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: nounwind
|
|
declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
|
|
|
|
; Function Attrs: nounwind
|
|
declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
|
|
|
|
; Function Attrs: nounwind
|
|
declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
|