forked from OSchip/llvm-project
[x86] regenerate checks with update_llc_test_checks.py
The dream of a unified check-line auto-generator for all phases of compilation is dead. The llc script has already diverged to be better at its goal, so having 2 scripts that do almost the same thing is just causing confusion for newcomers. I plan to fix up more x86 tests in a next commit. We can rip out the llc ability in update_test_checks.py after that. llvm-svn: 305202
This commit is contained in:
parent
db7c6a8731
commit
9d13a18845
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mattr=+avx -mtriple=i686-unknown-unknown | FileCheck %s
|
||||
|
||||
define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind {
|
||||
|
|
@ -12,7 +12,6 @@ define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind
|
|||
; CHECK-NEXT: vmovups %ymm0, (%eax)
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retl $4
|
||||
;
|
||||
%b = load <18 x i16>, <18 x i16>* %bp, align 16
|
||||
%x = add <18 x i16> zeroinitializer, %b
|
||||
store <18 x i16> %x, <18 x i16>* %ret, align 16
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=i386-apple-darwin < %s | FileCheck %s
|
||||
|
||||
; PR30841: https://llvm.org/bugs/show_bug.cgi?id=30841
|
||||
|
|
@ -12,7 +12,6 @@ define i8 @PR30841(i64 %argc) {
|
|||
; CHECK-NEXT: negl %eax
|
||||
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
|
||||
; CHECK-NEXT: retl
|
||||
;
|
||||
entry:
|
||||
%or = or i64 %argc, -4294967296
|
||||
br label %end
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s
|
||||
; PR3253
|
||||
|
|
@ -24,7 +24,12 @@ define void @test2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB0_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB0_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -44,7 +49,13 @@ define void @test2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB1_1
|
||||
;
|
||||
; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
; CHECK-NEXT: .LBB1_1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -64,7 +75,12 @@ define void @atest2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB2_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB2_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -84,7 +100,13 @@ define void @atest2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB3_1
|
||||
;
|
||||
; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
; CHECK-NEXT: .LBB3_1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -104,7 +126,13 @@ define void @test3(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB4_1
|
||||
;
|
||||
; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
; CHECK-NEXT: .LBB4_1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %tmp29, %x
|
||||
|
|
@ -124,7 +152,13 @@ define void @test3b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB5_1
|
||||
;
|
||||
; CHECK-NEXT: # BB#2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
; CHECK-NEXT: .LBB5_1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %x, %tmp29
|
||||
|
|
@ -144,7 +178,12 @@ define void @testne2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB6_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB6_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -164,7 +203,12 @@ define void @testne2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB7_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB7_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -184,7 +228,12 @@ define void @atestne2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB8_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB8_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -204,7 +253,12 @@ define void @atestne2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB9_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB9_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -224,7 +278,12 @@ define void @testne3(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB10_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB10_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %tmp29, %x
|
||||
|
|
@ -244,7 +303,12 @@ define void @testne3b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB11_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB11_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %x, %tmp29
|
||||
|
|
@ -264,7 +328,12 @@ define void @query2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB12_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB12_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -284,7 +353,12 @@ define void @query2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB13_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB13_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -304,7 +378,12 @@ define void @aquery2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB14_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB14_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -324,7 +403,12 @@ define void @aquery2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB15_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB15_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -344,7 +428,12 @@ define void @query3(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB16_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB16_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %tmp29, %x
|
||||
|
|
@ -364,7 +453,12 @@ define void @query3b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB17_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB17_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %x, %tmp29
|
||||
|
|
@ -384,7 +478,12 @@ define void @query3x(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB18_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB18_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %tmp29, %x
|
||||
|
|
@ -404,7 +503,12 @@ define void @query3bx(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jae .LBB19_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB19_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %x, %tmp29
|
||||
|
|
@ -424,7 +528,12 @@ define void @queryne2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB20_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB20_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -444,7 +553,12 @@ define void @queryne2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB21_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB21_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = lshr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -464,7 +578,12 @@ define void @aqueryne2(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB22_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB22_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 %tmp29, 1
|
||||
|
|
@ -484,7 +603,12 @@ define void @aqueryne2b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB23_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB23_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = ashr i32 %x, %n
|
||||
%tmp3 = and i32 1, %tmp29
|
||||
|
|
@ -504,7 +628,12 @@ define void @queryne3(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB24_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB24_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %tmp29, %x
|
||||
|
|
@ -524,7 +653,12 @@ define void @queryne3b(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB25_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB25_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %x, %tmp29
|
||||
|
|
@ -544,7 +678,12 @@ define void @queryne3x(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB26_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB26_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %tmp29, %x
|
||||
|
|
@ -564,7 +703,12 @@ define void @queryne3bx(i32 %x, i32 %n) nounwind {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: jb .LBB27_2
|
||||
;
|
||||
; CHECK-NEXT: # BB#1: # %bb
|
||||
; CHECK-NEXT: pushq %rax
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: popq %rax
|
||||
; CHECK-NEXT: .LBB27_2: # %UnifiedReturnBlock
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%tmp29 = shl i32 1, %n
|
||||
%tmp3 = and i32 %x, %tmp29
|
||||
|
|
@ -588,7 +732,6 @@ define zeroext i1 @invert(i32 %flags, i32 %flag) nounwind {
|
|||
; CHECK-NEXT: btl %esi, %edi
|
||||
; CHECK-NEXT: setb %al
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%neg = xor i32 %flags, -1
|
||||
%shl = shl i32 1, %flag
|
||||
%and = and i32 %shl, %neg
|
||||
|
|
@ -598,8 +741,10 @@ define zeroext i1 @invert(i32 %flags, i32 %flag) nounwind {
|
|||
|
||||
define zeroext i1 @extend(i32 %bit, i64 %bits) {
|
||||
; CHECK-LABEL: extend:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: btl %edi, %esi
|
||||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: btl %edi, %esi
|
||||
; CHECK-NEXT: setb %al
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%and = and i32 %bit, 31
|
||||
%sh_prom = zext i32 %and to i64
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
|
||||
|
||||
; cmp with single-use load, should not form branch.
|
||||
|
|
@ -9,7 +9,6 @@ define i32 @test1(double %a, double* nocapture %b, i32 %x, i32 %y) {
|
|||
; CHECK-NEXT: cmovbel %edx, %esi
|
||||
; CHECK-NEXT: movl %esi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%load = load double, double* %b, align 8
|
||||
%cmp = fcmp olt double %load, %a
|
||||
%cond = select i1 %cmp, i32 %x, i32 %y
|
||||
|
|
@ -24,7 +23,6 @@ define i32 @test2(double %a, double %b, i32 %x, i32 %y) {
|
|||
; CHECK-NEXT: cmovbel %esi, %edi
|
||||
; CHECK-NEXT: movl %edi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%cmp = fcmp ogt double %a, %b
|
||||
%cond = select i1 %cmp, i32 %x, i32 %y
|
||||
ret i32 %cond
|
||||
|
|
@ -39,7 +37,6 @@ define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
|
|||
; CHECK-NEXT: cmovael %ecx, %edx
|
||||
; CHECK-NEXT: addl %edx, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%load = load i32, i32* %b, align 4
|
||||
%cmp = icmp ult i32 %load, %a
|
||||
%cond = select i1 %cmp, i32 %x, i32 %y
|
||||
|
|
@ -56,7 +53,6 @@ define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
|
|||
; CHECK-NEXT: cmovael %edx, %ecx
|
||||
; CHECK-NEXT: movl %ecx, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%load = load i32, i32* %b, align 4
|
||||
%cmp = icmp ult i32 %load, %a
|
||||
%cmp1 = icmp ugt i32 %load, %a
|
||||
|
|
@ -73,7 +69,6 @@ define i32 @weighted_select1(i32 %a, i32 %b) {
|
|||
; CHECK-NEXT: cmovnel %edi, %esi
|
||||
; CHECK-NEXT: movl %esi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%cmp = icmp ne i32 %a, 0
|
||||
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !0
|
||||
ret i32 %sel
|
||||
|
|
@ -84,12 +79,12 @@ define i32 @weighted_select2(i32 %a, i32 %b) {
|
|||
; CHECK-LABEL: weighted_select2:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: testl %edi, %edi
|
||||
; CHECK-NEXT: jne [[LABEL_BB5:.*]]
|
||||
; CHECK: movl %esi, %edi
|
||||
; CHECK-NEXT: [[LABEL_BB5]]
|
||||
; CHECK-NEXT: jne .LBB5_2
|
||||
; CHECK-NEXT: # BB#1: # %select.false
|
||||
; CHECK-NEXT: movl %esi, %edi
|
||||
; CHECK-NEXT: .LBB5_2: # %select.end
|
||||
; CHECK-NEXT: movl %edi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%cmp = icmp ne i32 %a, 0
|
||||
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !1
|
||||
ret i32 %sel
|
||||
|
|
@ -103,14 +98,14 @@ define i32 @weighted_select3(i32 %a, i32 %b) {
|
|||
; CHECK-LABEL: weighted_select3:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: testl %edi, %edi
|
||||
; CHECK-NEXT: je [[LABEL_BB6:.*]]
|
||||
; CHECK: movl %edi, %eax
|
||||
; CHECK-NEXT: je .LBB6_1
|
||||
; CHECK-NEXT: # BB#2: # %select.end
|
||||
; CHECK-NEXT: movl %edi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
; CHECK: [[LABEL_BB6]]
|
||||
; CHECK-NEXT: .LBB6_1: # %select.false
|
||||
; CHECK-NEXT: movl %esi, %edi
|
||||
; CHECK-NEXT: movl %edi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%cmp = icmp ne i32 %a, 0
|
||||
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !2
|
||||
ret i32 %sel
|
||||
|
|
@ -124,7 +119,6 @@ define i32 @unweighted_select(i32 %a, i32 %b) {
|
|||
; CHECK-NEXT: cmovnel %edi, %esi
|
||||
; CHECK-NEXT: movl %esi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%cmp = icmp ne i32 %a, 0
|
||||
%sel = select i1 %cmp, i32 %a, i32 %b, !prof !3
|
||||
ret i32 %sel
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE41
|
||||
|
||||
define double @test1_add(double %A, double %B) {
|
||||
|
|
@ -6,7 +6,6 @@ define double @test1_add(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: paddd %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x i32>
|
||||
%2 = bitcast double %B to <2 x i32>
|
||||
%add = add <2 x i32> %1, %2
|
||||
|
|
@ -19,7 +18,6 @@ define double @test2_add(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: paddw %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <4 x i16>
|
||||
%2 = bitcast double %B to <4 x i16>
|
||||
%add = add <4 x i16> %1, %2
|
||||
|
|
@ -32,7 +30,6 @@ define double @test3_add(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: paddb %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <8 x i8>
|
||||
%2 = bitcast double %B to <8 x i8>
|
||||
%add = add <8 x i8> %1, %2
|
||||
|
|
@ -45,7 +42,6 @@ define double @test1_sub(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: psubd %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x i32>
|
||||
%2 = bitcast double %B to <2 x i32>
|
||||
%sub = sub <2 x i32> %1, %2
|
||||
|
|
@ -58,7 +54,6 @@ define double @test2_sub(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: psubw %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <4 x i16>
|
||||
%2 = bitcast double %B to <4 x i16>
|
||||
%sub = sub <4 x i16> %1, %2
|
||||
|
|
@ -71,7 +66,6 @@ define double @test3_sub(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: psubb %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <8 x i8>
|
||||
%2 = bitcast double %B to <8 x i8>
|
||||
%sub = sub <8 x i8> %1, %2
|
||||
|
|
@ -84,7 +78,6 @@ define double @test1_mul(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x i32>
|
||||
%2 = bitcast double %B to <2 x i32>
|
||||
%mul = mul <2 x i32> %1, %2
|
||||
|
|
@ -97,7 +90,6 @@ define double @test2_mul(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <4 x i16>
|
||||
%2 = bitcast double %B to <4 x i16>
|
||||
%mul = mul <4 x i16> %1, %2
|
||||
|
|
@ -114,7 +106,6 @@ define double @test3_mul(double %A, double %B) {
|
|||
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
||||
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <8 x i8>
|
||||
%2 = bitcast double %B to <8 x i8>
|
||||
%mul = mul <8 x i8> %1, %2
|
||||
|
|
@ -127,7 +118,6 @@ define double @test1_and(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: andps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x i32>
|
||||
%2 = bitcast double %B to <2 x i32>
|
||||
%and = and <2 x i32> %1, %2
|
||||
|
|
@ -140,7 +130,6 @@ define double @test2_and(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: andps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <4 x i16>
|
||||
%2 = bitcast double %B to <4 x i16>
|
||||
%and = and <4 x i16> %1, %2
|
||||
|
|
@ -153,7 +142,6 @@ define double @test3_and(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: andps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <8 x i8>
|
||||
%2 = bitcast double %B to <8 x i8>
|
||||
%and = and <8 x i8> %1, %2
|
||||
|
|
@ -166,7 +154,6 @@ define double @test1_or(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: orps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x i32>
|
||||
%2 = bitcast double %B to <2 x i32>
|
||||
%or = or <2 x i32> %1, %2
|
||||
|
|
@ -179,7 +166,6 @@ define double @test2_or(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: orps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <4 x i16>
|
||||
%2 = bitcast double %B to <4 x i16>
|
||||
%or = or <4 x i16> %1, %2
|
||||
|
|
@ -192,7 +178,6 @@ define double @test3_or(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: orps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <8 x i8>
|
||||
%2 = bitcast double %B to <8 x i8>
|
||||
%or = or <8 x i8> %1, %2
|
||||
|
|
@ -205,7 +190,6 @@ define double @test1_xor(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: xorps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x i32>
|
||||
%2 = bitcast double %B to <2 x i32>
|
||||
%xor = xor <2 x i32> %1, %2
|
||||
|
|
@ -218,7 +202,6 @@ define double @test2_xor(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: xorps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <4 x i16>
|
||||
%2 = bitcast double %B to <4 x i16>
|
||||
%xor = xor <4 x i16> %1, %2
|
||||
|
|
@ -231,7 +214,6 @@ define double @test3_xor(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: xorps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <8 x i8>
|
||||
%2 = bitcast double %B to <8 x i8>
|
||||
%xor = xor <8 x i8> %1, %2
|
||||
|
|
@ -244,7 +226,6 @@ define double @test_fadd(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: addps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x float>
|
||||
%2 = bitcast double %B to <2 x float>
|
||||
%add = fadd <2 x float> %1, %2
|
||||
|
|
@ -257,7 +238,6 @@ define double @test_fsub(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: subps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x float>
|
||||
%2 = bitcast double %B to <2 x float>
|
||||
%sub = fsub <2 x float> %1, %2
|
||||
|
|
@ -270,7 +250,6 @@ define double @test_fmul(double %A, double %B) {
|
|||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: mulps %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
%1 = bitcast double %A to <2 x float>
|
||||
%2 = bitcast double %B to <2 x float>
|
||||
%mul = fmul <2 x float> %1, %2
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=SSE
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=SSE
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs -mattr=avx | FileCheck %s --check-prefix=AVX
|
||||
|
|
@ -29,7 +29,6 @@ define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp oeq float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -56,7 +55,6 @@ define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp oeq double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -84,7 +82,6 @@ define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ogt float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -112,7 +109,6 @@ define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ogt double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -140,7 +136,6 @@ define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp oge float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -168,7 +163,6 @@ define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp oge double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -195,7 +189,6 @@ define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp olt float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -222,7 +215,6 @@ define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp olt double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -249,7 +241,6 @@ define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ole float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -276,7 +267,6 @@ define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ole double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -303,7 +293,6 @@ define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ord float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -330,7 +319,6 @@ define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ord double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -357,7 +345,6 @@ define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp uno float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -384,7 +371,6 @@ define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp uno double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -411,7 +397,6 @@ define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ugt float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -438,7 +423,6 @@ define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ugt double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -465,7 +449,6 @@ define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp uge float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -492,7 +475,6 @@ define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp uge double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -520,7 +502,6 @@ define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ult float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -548,7 +529,6 @@ define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ult double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -576,7 +556,6 @@ define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ule float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -604,7 +583,6 @@ define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp ule double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
@ -631,7 +609,6 @@ define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
|
|||
; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovaps %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp une float %a, %b
|
||||
%2 = select i1 %1, float %c, float %d
|
||||
ret float %2
|
||||
|
|
@ -658,7 +635,6 @@ define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
|
|||
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1}
|
||||
; AVX512-NEXT: vmovapd %xmm3, %xmm0
|
||||
; AVX512-NEXT: retq
|
||||
;
|
||||
%1 = fcmp une double %a, %b
|
||||
%2 = select i1 %1, double %c, double %d
|
||||
ret double %2
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+sse2 | FileCheck %s --check-prefix=SSE
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+avx | FileCheck %s --check-prefix=AVX
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -show-mc-encoding -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=AVX512DQ
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2 < %s | FileCheck %s
|
||||
|
||||
; PR22428: https://llvm.org/bugs/show_bug.cgi?id=22428
|
||||
|
|
@ -22,7 +22,6 @@ define i32 @f1(float %x, i32 %y) {
|
|||
; CHECK-NEXT: movd %xmm0, %eax
|
||||
; CHECK-NEXT: andl %edi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 %bc1, %y
|
||||
ret i32 %and
|
||||
|
|
@ -36,7 +35,6 @@ define i32 @f2(float %x, i32 %y) {
|
|||
; CHECK-NEXT: movd %xmm0, %eax
|
||||
; CHECK-NEXT: andl %edi, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 %y, %bc1
|
||||
ret i32 %and
|
||||
|
|
@ -50,7 +48,6 @@ define i32 @f3(float %x) {
|
|||
; CHECK-NEXT: movd %xmm0, %eax
|
||||
; CHECK-NEXT: andl $1, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 %bc1, 1
|
||||
ret i32 %and
|
||||
|
|
@ -64,7 +61,6 @@ define i32 @f4(float %x) {
|
|||
; CHECK-NEXT: movd %xmm0, %eax
|
||||
; CHECK-NEXT: andl $2, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 2, %bc1
|
||||
ret i32 %and
|
||||
|
|
@ -78,7 +74,6 @@ define float @f5(float %x, i32 %y) {
|
|||
; CHECK-NEXT: movd %edi, %xmm1
|
||||
; CHECK-NEXT: pand %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 %bc1, %y
|
||||
%bc2 = bitcast i32 %and to float
|
||||
|
|
@ -93,7 +88,6 @@ define float @f6(float %x, i32 %y) {
|
|||
; CHECK-NEXT: movd %edi, %xmm1
|
||||
; CHECK-NEXT: pand %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 %y, %bc1
|
||||
%bc2 = bitcast i32 %and to float
|
||||
|
|
@ -108,7 +102,6 @@ define float @f7(float %x) {
|
|||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: andps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 %bc1, 3
|
||||
%bc2 = bitcast i32 %and to float
|
||||
|
|
@ -123,7 +116,6 @@ define float @f8(float %x) {
|
|||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: andps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 4, %bc1
|
||||
%bc2 = bitcast i32 %and to float
|
||||
|
|
@ -138,7 +130,6 @@ define i32 @f9(float %x, float %y) {
|
|||
; CHECK-NEXT: pand %xmm1, %xmm0
|
||||
; CHECK-NEXT: movd %xmm0, %eax
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%bc2 = bitcast float %y to i32
|
||||
%and = and i32 %bc1, %bc2
|
||||
|
|
@ -152,7 +143,6 @@ define float @f10(float %x, float %y) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: andps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%bc2 = bitcast float %y to i32
|
||||
%and = and i32 %bc1, %bc2
|
||||
|
|
@ -165,7 +155,6 @@ define float @or(float %x, float %y) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: orps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%bc2 = bitcast float %y to i32
|
||||
%and = or i32 %bc1, %bc2
|
||||
|
|
@ -178,7 +167,6 @@ define float @xor(float %x, float %y) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: xorps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%bc2 = bitcast float %y to i32
|
||||
%and = xor i32 %bc1, %bc2
|
||||
|
|
@ -192,7 +180,6 @@ define float @f7_or(float %x) {
|
|||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: orps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = or i32 %bc1, 3
|
||||
%bc2 = bitcast i32 %and to float
|
||||
|
|
@ -205,7 +192,6 @@ define float @f7_xor(float %x) {
|
|||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: xorps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = xor i32 %bc1, 3
|
||||
%bc2 = bitcast i32 %and to float
|
||||
|
|
@ -219,7 +205,6 @@ define double @doubles(double %x, double %y) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: andps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast double %x to i64
|
||||
%bc2 = bitcast double %y to i64
|
||||
%and = and i64 %bc1, %bc2
|
||||
|
|
@ -233,7 +218,6 @@ define double @f7_double(double %x) {
|
|||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-NEXT: andps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast double %x to i64
|
||||
%and = and i64 %bc1, 3
|
||||
%bc2 = bitcast i64 %and to double
|
||||
|
|
@ -250,7 +234,6 @@ define float @movmsk(float %x) {
|
|||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: andps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%and = and i32 %bc1, 2147483648
|
||||
%bc2 = bitcast i32 %and to float
|
||||
|
|
@ -262,7 +245,6 @@ define double @bitcast_fabs(double %x) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast double %x to i64
|
||||
%and = and i64 %bc1, 9223372036854775807
|
||||
%bc2 = bitcast i64 %and to double
|
||||
|
|
@ -274,7 +256,6 @@ define float @bitcast_fneg(float %x) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast float %x to i32
|
||||
%xor = xor i32 %bc1, 2147483648
|
||||
%bc2 = bitcast i32 %xor to float
|
||||
|
|
@ -286,7 +267,6 @@ define <2 x double> @bitcast_fabs_vec(<2 x double> %x) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast <2 x double> %x to <2 x i64>
|
||||
%and = and <2 x i64> %bc1, <i64 9223372036854775807, i64 9223372036854775807>
|
||||
%bc2 = bitcast <2 x i64> %and to <2 x double>
|
||||
|
|
@ -298,7 +278,6 @@ define <4 x float> @bitcast_fneg_vec(<4 x float> %x) {
|
|||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast <4 x float> %x to <4 x i32>
|
||||
%xor = xor <4 x i32> %bc1, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
|
||||
%bc2 = bitcast <4 x i32> %xor to <4 x float>
|
||||
|
|
|
|||
Loading…
Reference in New Issue