232 lines
		
	
	
		
			6.3 KiB
		
	
	
	
		
			LLVM
		
	
	
	
			
		
		
	
	
			232 lines
		
	
	
		
			6.3 KiB
		
	
	
	
		
			LLVM
		
	
	
	
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 | |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X64
 | |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X86
 | |
| 
 | |
| ; Make sure that flags are properly preserved despite atomic optimizations.
 | |
| 
 | |
| define i32 @atomic_and_flags_1(i8* %p, i32 %a, i32 %b) {
 | |
| ; X64-LABEL: atomic_and_flags_1:
 | |
| ; X64:       # %bb.0:
 | |
| ; X64-NEXT:    cmpl %edx, %esi
 | |
| ; X64-NEXT:    jne .LBB0_3
 | |
| ; X64-NEXT:  # %bb.1: # %L1
 | |
| ; X64-NEXT:    incb (%rdi)
 | |
| ; X64-NEXT:    cmpl %edx, %esi
 | |
| ; X64-NEXT:    jne .LBB0_2
 | |
| ; X64-NEXT:  # %bb.4: # %L3
 | |
| ; X64-NEXT:    movl $3, %eax
 | |
| ; X64-NEXT:    retq
 | |
| ; X64-NEXT:  .LBB0_3: # %L2
 | |
| ; X64-NEXT:    movl $2, %eax
 | |
| ; X64-NEXT:    retq
 | |
| ; X64-NEXT:  .LBB0_2: # %L4
 | |
| ; X64-NEXT:    movl $4, %eax
 | |
| ; X64-NEXT:    retq
 | |
| ;
 | |
| ; X86-LABEL: atomic_and_flags_1:
 | |
| ; X86:       # %bb.0:
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 | |
| ; X86-NEXT:    cmpl %eax, %ecx
 | |
| ; X86-NEXT:    jne .LBB0_3
 | |
| ; X86-NEXT:  # %bb.1: # %L1
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 | |
| ; X86-NEXT:    incb (%edx)
 | |
| ; X86-NEXT:    cmpl %eax, %ecx
 | |
| ; X86-NEXT:    jne .LBB0_2
 | |
| ; X86-NEXT:  # %bb.4: # %L3
 | |
| ; X86-NEXT:    movl $3, %eax
 | |
| ; X86-NEXT:    retl
 | |
| ; X86-NEXT:  .LBB0_3: # %L2
 | |
| ; X86-NEXT:    movl $2, %eax
 | |
| ; X86-NEXT:    retl
 | |
| ; X86-NEXT:  .LBB0_2: # %L4
 | |
| ; X86-NEXT:    movl $4, %eax
 | |
| ; X86-NEXT:    retl
 | |
|   ; Generate flags value, and use it.
 | |
|   %cmp = icmp eq i32 %a, %b
 | |
|   br i1 %cmp, label %L1, label %L2
 | |
| 
 | |
| L1:
 | |
|   ; The following pattern will get folded.
 | |
|   %1 = load atomic i8, i8* %p seq_cst, align 1
 | |
|   %2 = add i8 %1, 1 ; This forces the INC instruction to be generated.
 | |
|   store atomic i8 %2, i8* %p release, align 1
 | |
| 
 | |
|   ; Use the comparison result again. We need to rematerialize the comparison
 | |
|   ; somehow. This test checks that cmpl gets emitted again, but any
 | |
|   ; rematerialization would work (the optimizer used to clobber the flags with
 | |
|   ; the add).
 | |
|   br i1 %cmp, label %L3, label %L4
 | |
| 
 | |
| L2:
 | |
|   ret i32 2
 | |
| 
 | |
| L3:
 | |
|   ret i32 3
 | |
| 
 | |
| L4:
 | |
|   ret i32 4
 | |
| }
 | |
| 
 | |
| ; Same as above, but using 2 as immediate to avoid the INC instruction.
 | |
| define i32 @atomic_and_flags_2(i8* %p, i32 %a, i32 %b) {
 | |
| ; X64-LABEL: atomic_and_flags_2:
 | |
| ; X64:       # %bb.0:
 | |
| ; X64-NEXT:    cmpl %edx, %esi
 | |
| ; X64-NEXT:    jne .LBB1_3
 | |
| ; X64-NEXT:  # %bb.1: # %L1
 | |
| ; X64-NEXT:    addb $2, (%rdi)
 | |
| ; X64-NEXT:    cmpl %edx, %esi
 | |
| ; X64-NEXT:    jne .LBB1_2
 | |
| ; X64-NEXT:  # %bb.4: # %L3
 | |
| ; X64-NEXT:    movl $3, %eax
 | |
| ; X64-NEXT:    retq
 | |
| ; X64-NEXT:  .LBB1_3: # %L2
 | |
| ; X64-NEXT:    movl $2, %eax
 | |
| ; X64-NEXT:    retq
 | |
| ; X64-NEXT:  .LBB1_2: # %L4
 | |
| ; X64-NEXT:    movl $4, %eax
 | |
| ; X64-NEXT:    retq
 | |
| ;
 | |
| ; X86-LABEL: atomic_and_flags_2:
 | |
| ; X86:       # %bb.0:
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 | |
| ; X86-NEXT:    cmpl %eax, %ecx
 | |
| ; X86-NEXT:    jne .LBB1_3
 | |
| ; X86-NEXT:  # %bb.1: # %L1
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 | |
| ; X86-NEXT:    addb $2, (%edx)
 | |
| ; X86-NEXT:    cmpl %eax, %ecx
 | |
| ; X86-NEXT:    jne .LBB1_2
 | |
| ; X86-NEXT:  # %bb.4: # %L3
 | |
| ; X86-NEXT:    movl $3, %eax
 | |
| ; X86-NEXT:    retl
 | |
| ; X86-NEXT:  .LBB1_3: # %L2
 | |
| ; X86-NEXT:    movl $2, %eax
 | |
| ; X86-NEXT:    retl
 | |
| ; X86-NEXT:  .LBB1_2: # %L4
 | |
| ; X86-NEXT:    movl $4, %eax
 | |
| ; X86-NEXT:    retl
 | |
|   %cmp = icmp eq i32 %a, %b
 | |
|   br i1 %cmp, label %L1, label %L2
 | |
| L1:
 | |
|   %1 = load atomic i8, i8* %p seq_cst, align 1
 | |
|   %2 = add i8 %1, 2
 | |
|   store atomic i8 %2, i8* %p release, align 1
 | |
|   br i1 %cmp, label %L3, label %L4
 | |
| L2:
 | |
|   ret i32 2
 | |
| L3:
 | |
|   ret i32 3
 | |
| L4:
 | |
|   ret i32 4
 | |
| }
 | |
| 
 | |
| ; PR20841 - ensure we don't reuse the ZF flag from XADD for compares with zero,
 | |
| ; the flags are set for the result of the add result (the value stored to memory),
 | |
| ; not the value returned by the atomicrmw add.
 | |
| 
 | |
| define zeroext i1 @xadd_cmp0_i64(i64* %x) nounwind {
 | |
| ; X64-LABEL: xadd_cmp0_i64:
 | |
| ; X64:       # %bb.0:
 | |
| ; X64-NEXT:    movl $1, %eax
 | |
| ; X64-NEXT:    lock xaddq %rax, (%rdi)
 | |
| ; X64-NEXT:    testq %rax, %rax
 | |
| ; X64-NEXT:    sete %al
 | |
| ; X64-NEXT:    retq
 | |
| ;
 | |
| ; X86-LABEL: xadd_cmp0_i64:
 | |
| ; X86:       # %bb.0:
 | |
| ; X86-NEXT:    pushl %ebx
 | |
| ; X86-NEXT:    pushl %esi
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 | |
| ; X86-NEXT:    movl (%esi), %eax
 | |
| ; X86-NEXT:    movl 4(%esi), %edx
 | |
| ; X86-NEXT:    .p2align 4, 0x90
 | |
| ; X86-NEXT:  .LBB2_1: # %atomicrmw.start
 | |
| ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
 | |
| ; X86-NEXT:    movl %eax, %ebx
 | |
| ; X86-NEXT:    addl $1, %ebx
 | |
| ; X86-NEXT:    movl %edx, %ecx
 | |
| ; X86-NEXT:    adcl $0, %ecx
 | |
| ; X86-NEXT:    lock cmpxchg8b (%esi)
 | |
| ; X86-NEXT:    jne .LBB2_1
 | |
| ; X86-NEXT:  # %bb.2: # %atomicrmw.end
 | |
| ; X86-NEXT:    orl %edx, %eax
 | |
| ; X86-NEXT:    sete %al
 | |
| ; X86-NEXT:    popl %esi
 | |
| ; X86-NEXT:    popl %ebx
 | |
| ; X86-NEXT:    retl
 | |
|   %add = atomicrmw add i64* %x, i64 1 seq_cst
 | |
|   %cmp = icmp eq i64 %add, 0
 | |
|   ret i1 %cmp
 | |
| }
 | |
| 
 | |
| define zeroext i1 @xadd_cmp0_i32(i32* %x) nounwind {
 | |
| ; X64-LABEL: xadd_cmp0_i32:
 | |
| ; X64:       # %bb.0:
 | |
| ; X64-NEXT:    movl $1, %eax
 | |
| ; X64-NEXT:    lock xaddl %eax, (%rdi)
 | |
| ; X64-NEXT:    testl %eax, %eax
 | |
| ; X64-NEXT:    setne %al
 | |
| ; X64-NEXT:    retq
 | |
| ;
 | |
| ; X86-LABEL: xadd_cmp0_i32:
 | |
| ; X86:       # %bb.0:
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 | |
| ; X86-NEXT:    movl $1, %ecx
 | |
| ; X86-NEXT:    lock xaddl %ecx, (%eax)
 | |
| ; X86-NEXT:    testl %ecx, %ecx
 | |
| ; X86-NEXT:    setne %al
 | |
| ; X86-NEXT:    retl
 | |
|   %add = atomicrmw add i32* %x, i32 1 seq_cst
 | |
|   %cmp = icmp ne i32 %add, 0
 | |
|   ret i1 %cmp
 | |
| }
 | |
| 
 | |
| define zeroext i1 @xadd_cmp0_i16(i16* %x) nounwind {
 | |
| ; X64-LABEL: xadd_cmp0_i16:
 | |
| ; X64:       # %bb.0:
 | |
| ; X64-NEXT:    movw $1, %ax
 | |
| ; X64-NEXT:    lock xaddw %ax, (%rdi)
 | |
| ; X64-NEXT:    testw %ax, %ax
 | |
| ; X64-NEXT:    sete %al
 | |
| ; X64-NEXT:    retq
 | |
| ;
 | |
| ; X86-LABEL: xadd_cmp0_i16:
 | |
| ; X86:       # %bb.0:
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 | |
| ; X86-NEXT:    movw $1, %cx
 | |
| ; X86-NEXT:    lock xaddw %cx, (%eax)
 | |
| ; X86-NEXT:    testw %cx, %cx
 | |
| ; X86-NEXT:    sete %al
 | |
| ; X86-NEXT:    retl
 | |
|   %add = atomicrmw add i16* %x, i16 1 seq_cst
 | |
|   %cmp = icmp eq i16 %add, 0
 | |
|   ret i1 %cmp
 | |
| }
 | |
| 
 | |
| define zeroext i1 @xadd_cmp0_i8(i8* %x) nounwind {
 | |
| ; X64-LABEL: xadd_cmp0_i8:
 | |
| ; X64:       # %bb.0:
 | |
| ; X64-NEXT:    movb $1, %al
 | |
| ; X64-NEXT:    lock xaddb %al, (%rdi)
 | |
| ; X64-NEXT:    testb %al, %al
 | |
| ; X64-NEXT:    setne %al
 | |
| ; X64-NEXT:    retq
 | |
| ;
 | |
| ; X86-LABEL: xadd_cmp0_i8:
 | |
| ; X86:       # %bb.0:
 | |
| ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 | |
| ; X86-NEXT:    movb $1, %cl
 | |
| ; X86-NEXT:    lock xaddb %cl, (%eax)
 | |
| ; X86-NEXT:    testb %cl, %cl
 | |
| ; X86-NEXT:    setne %al
 | |
| ; X86-NEXT:    retl
 | |
|   %add = atomicrmw add i8* %x, i8 1 seq_cst
 | |
|   %cmp = icmp ne i8 %add, 0
 | |
|   ret i1 %cmp
 | |
| }
 |