298 lines
		
	
	
		
			8.4 KiB
		
	
	
	
		
			LLVM
		
	
	
	
			
		
		
	
	
			298 lines
		
	
	
		
			8.4 KiB
		
	
	
	
		
			LLVM
		
	
	
	
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 | |
| ; RUN: llc -mtriple=x86_64-apple-macosx -mcpu=core2 < %s | FileCheck %s
 | |
| 
 | |
| declare i64 @testi()
 | |
| 
 | |
| define i64 @test_trivial() {
 | |
| ; CHECK-LABEL: test_trivial:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testi ## TAILCALL
 | |
| entry:
 | |
|  %A = tail call i64 @testi()
 | |
|  ret i64 %A
 | |
| }
 | |
| 
 | |
| define i64 @test_noop_bitcast() {
 | |
| ; CHECK-LABEL: test_noop_bitcast:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testi ## TAILCALL
 | |
| entry:
 | |
|  %A = tail call i64 @testi()
 | |
|  %B = bitcast i64 %A to i64
 | |
|  ret i64 %B
 | |
| }
 | |
| 
 | |
| ; Tail call shouldn't be blocked by no-op inttoptr.
 | |
| define i8* @test_inttoptr() {
 | |
| ; CHECK-LABEL: test_inttoptr:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testi ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call i64 @testi()
 | |
|   %B = inttoptr i64 %A to i8*
 | |
|   ret i8* %B
 | |
| }
 | |
| 
 | |
| declare <4 x float> @testv()
 | |
| 
 | |
| define <4 x i32> @test_vectorbitcast() {
 | |
| ; CHECK-LABEL: test_vectorbitcast:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testv ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call <4 x float> @testv()
 | |
|   %B = bitcast <4 x float> %A to <4 x i32>
 | |
|   ret <4 x i32> %B
 | |
| }
 | |
| 
 | |
| declare { i64, i64 } @testp()
 | |
| 
 | |
| define {i64, i64} @test_pair_trivial() {
 | |
| ; CHECK-LABEL: test_pair_trivial:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testp ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call { i64, i64} @testp()
 | |
|   ret { i64, i64} %A
 | |
| }
 | |
| 
 | |
| define {i64, i64} @test_pair_notail() {
 | |
| ; CHECK-LABEL: test_pair_notail:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    pushq %rax
 | |
| ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 | |
| ; CHECK-NEXT:    callq _testi
 | |
| ; CHECK-NEXT:    movq %rax, %rdx
 | |
| ; CHECK-NEXT:    popq %rcx
 | |
| ; CHECK-NEXT:    retq
 | |
| entry:
 | |
|   %A = tail call i64 @testi()
 | |
| 
 | |
|   %b = insertvalue {i64, i64} undef, i64 %A, 0
 | |
|   %c = insertvalue {i64, i64} %b, i64 %A, 1
 | |
| 
 | |
|   ret { i64, i64} %c
 | |
| }
 | |
| 
 | |
| define {i64, i64} @test_pair_extract_trivial() {
 | |
| ; CHECK-LABEL: test_pair_extract_trivial:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testp ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call { i64, i64} @testp()
 | |
|   %x = extractvalue { i64, i64} %A, 0
 | |
|   %y = extractvalue { i64, i64} %A, 1
 | |
| 
 | |
|   %b = insertvalue {i64, i64} undef, i64 %x, 0
 | |
|   %c = insertvalue {i64, i64} %b, i64 %y, 1
 | |
| 
 | |
|   ret { i64, i64} %c
 | |
| }
 | |
| 
 | |
| define {i64, i64} @test_pair_extract_notail() {
 | |
| ; CHECK-LABEL: test_pair_extract_notail:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    pushq %rax
 | |
| ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 | |
| ; CHECK-NEXT:    callq _testp
 | |
| ; CHECK-NEXT:    movq %rax, %rcx
 | |
| ; CHECK-NEXT:    movq %rdx, %rax
 | |
| ; CHECK-NEXT:    movq %rcx, %rdx
 | |
| ; CHECK-NEXT:    popq %rcx
 | |
| ; CHECK-NEXT:    retq
 | |
| entry:
 | |
|   %A = tail call { i64, i64} @testp()
 | |
|   %x = extractvalue { i64, i64} %A, 0
 | |
|   %y = extractvalue { i64, i64} %A, 1
 | |
| 
 | |
|   %b = insertvalue {i64, i64} undef, i64 %y, 0
 | |
|   %c = insertvalue {i64, i64} %b, i64 %x, 1
 | |
| 
 | |
|   ret { i64, i64} %c
 | |
| }
 | |
| 
 | |
| define {i8*, i64} @test_pair_extract_conv() {
 | |
| ; CHECK-LABEL: test_pair_extract_conv:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testp ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call { i64, i64} @testp()
 | |
|   %x = extractvalue { i64, i64} %A, 0
 | |
|   %y = extractvalue { i64, i64} %A, 1
 | |
| 
 | |
|   %x1 = inttoptr i64 %x to i8*
 | |
| 
 | |
|   %b = insertvalue {i8*, i64} undef, i8* %x1, 0
 | |
|   %c = insertvalue {i8*, i64} %b, i64 %y, 1
 | |
| 
 | |
|   ret { i8*, i64} %c
 | |
| }
 | |
| 
 | |
| define {i64, i64} @test_pair_extract_multiple() {
 | |
| ; CHECK-LABEL: test_pair_extract_multiple:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testp ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call { i64, i64} @testp()
 | |
|   %x = extractvalue { i64, i64} %A, 0
 | |
|   %y = extractvalue { i64, i64} %A, 1
 | |
| 
 | |
|   %b = insertvalue {i64, i64} undef, i64 %x, 0
 | |
|   %c = insertvalue {i64, i64} %b, i64 %y, 1
 | |
| 
 | |
|   %x1 = extractvalue { i64, i64} %b, 0
 | |
|   %y1 = extractvalue { i64, i64} %c, 1
 | |
| 
 | |
|   %d = insertvalue {i64, i64} undef, i64 %x1, 0
 | |
|   %e = insertvalue {i64, i64} %b, i64 %y1, 1
 | |
| 
 | |
|   ret { i64, i64} %e
 | |
| }
 | |
| 
 | |
| define {i64, i64} @test_pair_extract_undef() {
 | |
| ; CHECK-LABEL: test_pair_extract_undef:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testp ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call { i64, i64} @testp()
 | |
|   %x = extractvalue { i64, i64} %A, 0
 | |
| 
 | |
|   %b = insertvalue {i64, i64} undef, i64 %x, 0
 | |
| 
 | |
|   ret { i64, i64} %b
 | |
| }
 | |
| 
 | |
| declare { i64, { i32, i32 } } @testn()
 | |
| 
 | |
| define {i64, {i32, i32}} @test_nest() {
 | |
| ; CHECK-LABEL: test_nest:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testn ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call { i64, { i32, i32 } } @testn()
 | |
|   %x = extractvalue { i64, { i32, i32}} %A, 0
 | |
|   %y = extractvalue { i64, { i32, i32}} %A, 1
 | |
|   %y1 = extractvalue { i32, i32} %y, 0
 | |
|   %y2 = extractvalue { i32, i32} %y, 1
 | |
| 
 | |
|   %b = insertvalue {i64, {i32, i32}} undef, i64 %x, 0
 | |
|   %c1 = insertvalue {i32, i32} undef, i32 %y1, 0
 | |
|   %c2 = insertvalue {i32, i32} %c1, i32 %y2, 1
 | |
|   %c = insertvalue {i64, {i32, i32}} %b, {i32, i32} %c2, 1
 | |
| 
 | |
|   ret { i64, { i32, i32}} %c
 | |
| }
 | |
| 
 | |
| %struct.A = type { i32 }
 | |
| %struct.B = type { %struct.A, i32 }
 | |
| 
 | |
| declare %struct.B* @testu()
 | |
| 
 | |
| define %struct.A* @test_upcast() {
 | |
| ; CHECK-LABEL: test_upcast:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testu ## TAILCALL
 | |
| entry:
 | |
|   %A = tail call %struct.B* @testu()
 | |
|   %x = getelementptr inbounds %struct.B, %struct.B* %A, i32 0, i32 0
 | |
|   ret %struct.A* %x
 | |
| }
 | |
| 
 | |
| ; PR13006
 | |
| define { i64, i64 } @crash(i8* %this) {
 | |
| ; CHECK-LABEL: crash:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _testp ## TAILCALL
 | |
| entry:
 | |
|   %c = tail call { i64, i64 } @testp()
 | |
|   %mrv7 = insertvalue { i64, i64 } %c, i64 undef, 1
 | |
|   ret { i64, i64 } %mrv7
 | |
| }
 | |
| 
 | |
| %struct.funcs = type { i32 (i8*, i32*, i32)*, i32 (i8*)*, i32 (i8*)*, i32 (i8*, i32)*, i32 }
 | |
| 
 | |
| @func_table = external global [0 x %struct.funcs]
 | |
| 
 | |
| ; Check that we can fold an indexed load into a tail call instruction.
 | |
| define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp {
 | |
| ; CHECK-LABEL: fold_indexed_load:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    leaq (%rsi,%rsi,4), %rax
 | |
| ; CHECK-NEXT:    movq _func_table@{{.*}}(%rip), %rcx
 | |
| ; CHECK-NEXT:    jmpq *16(%rcx,%rax,8) ## TAILCALL
 | |
| entry:
 | |
|   %dsplen = getelementptr inbounds [0 x %struct.funcs], [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2
 | |
|   %x1 = load i32 (i8*)*, i32 (i8*)** %dsplen, align 8
 | |
|   %call = tail call i32 %x1(i8* %mbstr) nounwind
 | |
|   ret void
 | |
| }
 | |
| 
 | |
| @funcs = external constant [0 x i32 (i8*, ...)*]
 | |
| 
 | |
| ; <rdar://problem/12282281> Fold an indexed load into the tail call instruction.
 | |
| ; Calling a varargs function with 6 arguments requires 7 registers (%al is the
 | |
| ; vector count for varargs functions). This leaves %r11 as the only available
 | |
| ; scratch register.
 | |
| ;
 | |
| ; It is not possible to fold an indexed load into TCRETURNmi64 in that case.
 | |
| ;
 | |
| ; typedef int (*funcptr)(void*, ...);
 | |
| ; extern const funcptr funcs[];
 | |
| ; int f(int n) {
 | |
| ;   return funcs[n](0, 0, 0, 0, 0, 0);
 | |
| ; }
 | |
| define i32 @rdar12282281(i32 %n) nounwind uwtable ssp {
 | |
| ; CHECK-LABEL: rdar12282281:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    movslq %edi, %rax
 | |
| ; CHECK-NEXT:    movq _funcs@{{.*}}(%rip), %rcx
 | |
| ; CHECK-NEXT:    movq (%rcx,%rax,8), %r11
 | |
| ; CHECK-NEXT:    xorl %edi, %edi
 | |
| ; CHECK-NEXT:    xorl %esi, %esi
 | |
| ; CHECK-NEXT:    xorl %edx, %edx
 | |
| ; CHECK-NEXT:    xorl %ecx, %ecx
 | |
| ; CHECK-NEXT:    xorl %r8d, %r8d
 | |
| ; CHECK-NEXT:    xorl %r9d, %r9d
 | |
| ; CHECK-NEXT:    xorl %eax, %eax
 | |
| ; CHECK-NEXT:    jmpq *%r11 ## TAILCALL
 | |
| entry:
 | |
|   %idxprom = sext i32 %n to i64
 | |
|   %arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*], [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom
 | |
|   %0 = load i32 (i8*, ...)*, i32 (i8*, ...)** %arrayidx, align 8
 | |
|   %call = tail call i32 (i8*, ...) %0(i8* null, i32 0, i32 0, i32 0, i32 0, i32 0) nounwind
 | |
|   ret i32 %call
 | |
| }
 | |
| 
 | |
| declare x86_fp80 @fp80_callee(x86_fp80)
 | |
| 
 | |
| define x86_fp80 @fp80_call(x86_fp80 %x) nounwind  {
 | |
| ; CHECK-LABEL: fp80_call:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    jmp _fp80_callee ## TAILCALL
 | |
| entry:
 | |
|   %call = tail call x86_fp80 @fp80_callee(x86_fp80 %x) nounwind
 | |
|   ret x86_fp80 %call
 | |
| }
 | |
| 
 | |
| declare double @trunc(double) nounwind readnone
 | |
| 
 | |
| ; rdar://12229511 - Don't tail call trunc here.
 | |
| define x86_fp80 @trunc_fp80(x86_fp80 %x) nounwind  {
 | |
| ; CHECK-LABEL: trunc_fp80:
 | |
| ; CHECK:       ## %bb.0: ## %entry
 | |
| ; CHECK-NEXT:    subq $24, %rsp
 | |
| ; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
 | |
| ; CHECK-NEXT:    fstpl {{[0-9]+}}(%rsp)
 | |
| ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 | |
| ; CHECK-NEXT:    callq _trunc
 | |
| ; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
 | |
| ; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
 | |
| ; CHECK-NEXT:    addq $24, %rsp
 | |
| ; CHECK-NEXT:    retq
 | |
| entry:
 | |
|   %conv = fptrunc x86_fp80 %x to double
 | |
|   %call = tail call double @trunc(double %conv) nounwind readnone
 | |
|   %conv1 = fpext double %call to x86_fp80
 | |
|   ret x86_fp80 %conv1
 | |
| }
 |