1584 lines
54 KiB
LLVM
1584 lines
54 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc %s -o - -enable-shrink-wrap=true -pass-remarks-output=%t | FileCheck %s --check-prefix=ENABLE
|
|
; RUN: cat %t | FileCheck %s --check-prefix=REMARKS
|
|
; RUN: llc %s -o - -enable-shrink-wrap=false | FileCheck %s --check-prefix=DISABLE
|
|
;
|
|
; Note: Lots of tests use inline asm instead of regular calls.
|
|
; This allows to have a better control on what the allocation will do.
|
|
; Otherwise, we may have spill right in the entry block, defeating
|
|
; shrink-wrapping. Moreover, some of the inline asm statement (nop)
|
|
; are here to ensure that the related paths do not end up as critical
|
|
; edges.
|
|
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
|
|
target triple = "x86_64-apple-macosx"
|
|
|
|
|
|
; Initial motivating example: Simple diamond with a call just on one side.
|
|
define i32 @foo(i32 %a, i32 %b) {
|
|
; ENABLE-LABEL: foo:
|
|
; ENABLE: ## %bb.0:
|
|
; ENABLE-NEXT: movl %edi, %eax
|
|
; ENABLE-NEXT: cmpl %esi, %edi
|
|
; ENABLE-NEXT: jge LBB0_2
|
|
; ENABLE-NEXT: ## %bb.1: ## %true
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: movl %eax, {{[0-9]+}}(%rsp)
|
|
; ENABLE-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; ENABLE-NEXT: xorl %edi, %edi
|
|
; ENABLE-NEXT: callq _doSomething
|
|
; ENABLE-NEXT: addq $8, %rsp
|
|
; ENABLE-NEXT: LBB0_2: ## %false
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: foo:
|
|
; DISABLE: ## %bb.0:
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: movl %edi, %eax
|
|
; DISABLE-NEXT: cmpl %esi, %edi
|
|
; DISABLE-NEXT: jge LBB0_2
|
|
; DISABLE-NEXT: ## %bb.1: ## %true
|
|
; DISABLE-NEXT: movl %eax, {{[0-9]+}}(%rsp)
|
|
; DISABLE-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
|
|
; DISABLE-NEXT: xorl %edi, %edi
|
|
; DISABLE-NEXT: callq _doSomething
|
|
; DISABLE-NEXT: LBB0_2: ## %false
|
|
; DISABLE-NEXT: popq %rcx
|
|
; DISABLE-NEXT: retq
|
|
%tmp = alloca i32, align 4
|
|
%tmp2 = icmp slt i32 %a, %b
|
|
br i1 %tmp2, label %true, label %false
|
|
|
|
true:
|
|
store i32 %a, i32* %tmp, align 4
|
|
%tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
|
|
br label %false
|
|
|
|
false:
|
|
%tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
|
|
ret i32 %tmp.0
|
|
}
|
|
|
|
; Function Attrs: optsize
|
|
declare i32 @doSomething(i32, i32*)
|
|
|
|
|
|
; Check that we do not perform the restore inside the loop whereas the save
|
|
; is outside.
|
|
define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
|
|
; ENABLE-LABEL: freqSaveAndRestoreOutsideLoop:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: testl %edi, %edi
|
|
; ENABLE-NEXT: je LBB1_4
|
|
; ENABLE-NEXT: ## %bb.1: ## %for.preheader
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -16
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: movl $10, %ecx
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB1_2: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: movl $1, %edx
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: addl %edx, %eax
|
|
; ENABLE-NEXT: decl %ecx
|
|
; ENABLE-NEXT: jne LBB1_2
|
|
; ENABLE-NEXT: ## %bb.3: ## %for.end
|
|
; ENABLE-NEXT: shll $3, %eax
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB1_4: ## %if.else
|
|
; ENABLE-NEXT: movl %esi, %eax
|
|
; ENABLE-NEXT: addl %esi, %eax
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: freqSaveAndRestoreOutsideLoop:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -16
|
|
; DISABLE-NEXT: testl %edi, %edi
|
|
; DISABLE-NEXT: je LBB1_4
|
|
; DISABLE-NEXT: ## %bb.1: ## %for.preheader
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: movl $10, %ecx
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB1_2: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: movl $1, %edx
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: addl %edx, %eax
|
|
; DISABLE-NEXT: decl %ecx
|
|
; DISABLE-NEXT: jne LBB1_2
|
|
; DISABLE-NEXT: ## %bb.3: ## %for.end
|
|
; DISABLE-NEXT: shll $3, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB1_4: ## %if.else
|
|
; DISABLE-NEXT: movl %esi, %eax
|
|
; DISABLE-NEXT: addl %esi, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%tobool = icmp eq i32 %cond, 0
|
|
br i1 %tobool, label %if.else, label %for.preheader
|
|
|
|
for.preheader:
|
|
tail call void asm "nop", ""()
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %entry, %for.body
|
|
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
|
|
%sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
|
|
%call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
|
|
%add = add nsw i32 %call, %sum.04
|
|
%inc = add nuw nsw i32 %i.05, 1
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end: ; preds = %for.body
|
|
%shl = shl i32 %add, 3
|
|
br label %if.end
|
|
|
|
if.else: ; preds = %entry
|
|
%mul = shl nsw i32 %N, 1
|
|
br label %if.end
|
|
|
|
if.end: ; preds = %if.else, %for.end
|
|
%sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
|
|
ret i32 %sum.1
|
|
}
|
|
|
|
declare i32 @something(...)
|
|
|
|
; Check that we do not perform the shrink-wrapping inside the loop even
|
|
; though that would be legal. The cost model must prevent that.
|
|
define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
|
|
; ENABLE-LABEL: freqSaveAndRestoreOutsideLoop2:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -16
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: movl $10, %ecx
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB2_1: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: movl $1, %edx
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: addl %edx, %eax
|
|
; ENABLE-NEXT: decl %ecx
|
|
; ENABLE-NEXT: jne LBB2_1
|
|
; ENABLE-NEXT: ## %bb.2: ## %for.exit
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: freqSaveAndRestoreOutsideLoop2:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -16
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: movl $10, %ecx
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB2_1: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: movl $1, %edx
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: addl %edx, %eax
|
|
; DISABLE-NEXT: decl %ecx
|
|
; DISABLE-NEXT: jne LBB2_1
|
|
; DISABLE-NEXT: ## %bb.2: ## %for.exit
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
br label %for.preheader
|
|
|
|
for.preheader:
|
|
tail call void asm "nop", ""()
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%i.04 = phi i32 [ 0, %for.preheader ], [ %inc, %for.body ]
|
|
%sum.03 = phi i32 [ 0, %for.preheader ], [ %add, %for.body ]
|
|
%call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
|
|
%add = add nsw i32 %call, %sum.03
|
|
%inc = add nuw nsw i32 %i.04, 1
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
br i1 %exitcond, label %for.exit, label %for.body
|
|
|
|
for.exit:
|
|
tail call void asm "nop", ""()
|
|
br label %for.end
|
|
|
|
for.end: ; preds = %for.body
|
|
ret i32 %add
|
|
}
|
|
|
|
; Check with a more complex case that we do not have save within the loop and
|
|
; restore outside.
|
|
define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
|
|
; ENABLE-LABEL: loopInfoSaveOutsideLoop:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: testl %edi, %edi
|
|
; ENABLE-NEXT: je LBB3_4
|
|
; ENABLE-NEXT: ## %bb.1: ## %for.preheader
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -16
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: movl $10, %ecx
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB3_2: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: movl $1, %edx
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: addl %edx, %eax
|
|
; ENABLE-NEXT: decl %ecx
|
|
; ENABLE-NEXT: jne LBB3_2
|
|
; ENABLE-NEXT: ## %bb.3: ## %for.end
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: shll $3, %eax
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB3_4: ## %if.else
|
|
; ENABLE-NEXT: movl %esi, %eax
|
|
; ENABLE-NEXT: addl %esi, %eax
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: loopInfoSaveOutsideLoop:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -16
|
|
; DISABLE-NEXT: testl %edi, %edi
|
|
; DISABLE-NEXT: je LBB3_4
|
|
; DISABLE-NEXT: ## %bb.1: ## %for.preheader
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: movl $10, %ecx
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB3_2: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: movl $1, %edx
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: addl %edx, %eax
|
|
; DISABLE-NEXT: decl %ecx
|
|
; DISABLE-NEXT: jne LBB3_2
|
|
; DISABLE-NEXT: ## %bb.3: ## %for.end
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: shll $3, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB3_4: ## %if.else
|
|
; DISABLE-NEXT: movl %esi, %eax
|
|
; DISABLE-NEXT: addl %esi, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%tobool = icmp eq i32 %cond, 0
|
|
br i1 %tobool, label %if.else, label %for.preheader
|
|
|
|
for.preheader:
|
|
tail call void asm "nop", ""()
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %entry, %for.body
|
|
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
|
|
%sum.04 = phi i32 [ %add, %for.body ], [ 0, %for.preheader ]
|
|
%call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
|
|
%add = add nsw i32 %call, %sum.04
|
|
%inc = add nuw nsw i32 %i.05, 1
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end: ; preds = %for.body
|
|
tail call void asm "nop", "~{ebx}"()
|
|
%shl = shl i32 %add, 3
|
|
br label %if.end
|
|
|
|
if.else: ; preds = %entry
|
|
%mul = shl nsw i32 %N, 1
|
|
br label %if.end
|
|
|
|
if.end: ; preds = %if.else, %for.end
|
|
%sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
|
|
ret i32 %sum.1
|
|
}
|
|
|
|
; Check with a more complex case that we do not have restore within the loop and
|
|
; save outside.
|
|
define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
|
|
; ENABLE-LABEL: loopInfoRestoreOutsideLoop:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: testl %edi, %edi
|
|
; ENABLE-NEXT: je LBB4_4
|
|
; ENABLE-NEXT: ## %bb.1: ## %if.then
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: movl $10, %ecx
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB4_2: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: movl $1, %edx
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: addl %edx, %eax
|
|
; ENABLE-NEXT: decl %ecx
|
|
; ENABLE-NEXT: jne LBB4_2
|
|
; ENABLE-NEXT: ## %bb.3: ## %for.end
|
|
; ENABLE-NEXT: shll $3, %eax
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB4_4: ## %if.else
|
|
; ENABLE-NEXT: movl %esi, %eax
|
|
; ENABLE-NEXT: addl %esi, %eax
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: loopInfoRestoreOutsideLoop:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: testl %edi, %edi
|
|
; DISABLE-NEXT: je LBB4_4
|
|
; DISABLE-NEXT: ## %bb.1: ## %if.then
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: movl $10, %ecx
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB4_2: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: movl $1, %edx
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: addl %edx, %eax
|
|
; DISABLE-NEXT: decl %ecx
|
|
; DISABLE-NEXT: jne LBB4_2
|
|
; DISABLE-NEXT: ## %bb.3: ## %for.end
|
|
; DISABLE-NEXT: shll $3, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB4_4: ## %if.else
|
|
; DISABLE-NEXT: movl %esi, %eax
|
|
; DISABLE-NEXT: addl %esi, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%tobool = icmp eq i32 %cond, 0
|
|
br i1 %tobool, label %if.else, label %if.then
|
|
|
|
if.then: ; preds = %entry
|
|
tail call void asm "nop", "~{ebx}"()
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body, %if.then
|
|
%i.05 = phi i32 [ 0, %if.then ], [ %inc, %for.body ]
|
|
%sum.04 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
|
|
%call = tail call i32 asm sideeffect "movl $$1, $0", "=r,~{ebx}"()
|
|
%add = add nsw i32 %call, %sum.04
|
|
%inc = add nuw nsw i32 %i.05, 1
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end: ; preds = %for.body
|
|
%shl = shl i32 %add, 3
|
|
br label %if.end
|
|
|
|
if.else: ; preds = %entry
|
|
%mul = shl nsw i32 %N, 1
|
|
br label %if.end
|
|
|
|
if.end: ; preds = %if.else, %for.end
|
|
%sum.1 = phi i32 [ %shl, %for.end ], [ %mul, %if.else ]
|
|
ret i32 %sum.1
|
|
}
|
|
|
|
; Check that we handle function with no frame information correctly.
|
|
define i32 @emptyFrame() {
|
|
; ENABLE-LABEL: emptyFrame:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: emptyFrame:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
ret i32 0
|
|
}
|
|
|
|
; Check that we handle inline asm correctly.
|
|
define i32 @inlineAsm(i32 %cond, i32 %N) {
|
|
; ENABLE-LABEL: inlineAsm:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: testl %edi, %edi
|
|
; ENABLE-NEXT: je LBB6_4
|
|
; ENABLE-NEXT: ## %bb.1: ## %for.preheader
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -16
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: movl $10, %eax
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB6_2: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: addl $1, %ebx
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: decl %eax
|
|
; ENABLE-NEXT: jne LBB6_2
|
|
; ENABLE-NEXT: ## %bb.3: ## %for.exit
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB6_4: ## %if.else
|
|
; ENABLE-NEXT: movl %esi, %eax
|
|
; ENABLE-NEXT: addl %esi, %eax
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: inlineAsm:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -16
|
|
; DISABLE-NEXT: testl %edi, %edi
|
|
; DISABLE-NEXT: je LBB6_4
|
|
; DISABLE-NEXT: ## %bb.1: ## %for.preheader
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: movl $10, %eax
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB6_2: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: addl $1, %ebx
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: decl %eax
|
|
; DISABLE-NEXT: jne LBB6_2
|
|
; DISABLE-NEXT: ## %bb.3: ## %for.exit
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB6_4: ## %if.else
|
|
; DISABLE-NEXT: movl %esi, %eax
|
|
; DISABLE-NEXT: addl %esi, %eax
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%tobool = icmp eq i32 %cond, 0
|
|
br i1 %tobool, label %if.else, label %for.preheader
|
|
|
|
for.preheader:
|
|
tail call void asm "nop", ""()
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %entry, %for.body
|
|
%i.03 = phi i32 [ %inc, %for.body ], [ 0, %for.preheader ]
|
|
tail call void asm "addl $$1, %ebx", "~{ebx}"()
|
|
%inc = add nuw nsw i32 %i.03, 1
|
|
%exitcond = icmp eq i32 %inc, 10
|
|
br i1 %exitcond, label %for.exit, label %for.body
|
|
|
|
for.exit:
|
|
tail call void asm "nop", ""()
|
|
br label %if.end
|
|
|
|
if.else: ; preds = %entry
|
|
%mul = shl nsw i32 %N, 1
|
|
br label %if.end
|
|
|
|
if.end: ; preds = %for.body, %if.else
|
|
%sum.0 = phi i32 [ %mul, %if.else ], [ 0, %for.exit ]
|
|
ret i32 %sum.0
|
|
}
|
|
|
|
; Check that we handle calls to variadic functions correctly.
|
|
define i32 @callVariadicFunc(i32 %cond, i32 %N) {
|
|
; ENABLE-LABEL: callVariadicFunc:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: movl %esi, %eax
|
|
; ENABLE-NEXT: testl %edi, %edi
|
|
; ENABLE-NEXT: je LBB7_2
|
|
; ENABLE-NEXT: ## %bb.1: ## %if.then
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: movl %eax, (%rsp)
|
|
; ENABLE-NEXT: movl %eax, %edi
|
|
; ENABLE-NEXT: movl %eax, %esi
|
|
; ENABLE-NEXT: movl %eax, %edx
|
|
; ENABLE-NEXT: movl %eax, %ecx
|
|
; ENABLE-NEXT: movl %eax, %r8d
|
|
; ENABLE-NEXT: movl %eax, %r9d
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: callq _someVariadicFunc
|
|
; ENABLE-NEXT: shll $3, %eax
|
|
; ENABLE-NEXT: addq $8, %rsp
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB7_2: ## %if.else
|
|
; ENABLE-NEXT: addl %eax, %eax
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: callVariadicFunc:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: movl %esi, %eax
|
|
; DISABLE-NEXT: testl %edi, %edi
|
|
; DISABLE-NEXT: je LBB7_2
|
|
; DISABLE-NEXT: ## %bb.1: ## %if.then
|
|
; DISABLE-NEXT: movl %eax, (%rsp)
|
|
; DISABLE-NEXT: movl %eax, %edi
|
|
; DISABLE-NEXT: movl %eax, %esi
|
|
; DISABLE-NEXT: movl %eax, %edx
|
|
; DISABLE-NEXT: movl %eax, %ecx
|
|
; DISABLE-NEXT: movl %eax, %r8d
|
|
; DISABLE-NEXT: movl %eax, %r9d
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: callq _someVariadicFunc
|
|
; DISABLE-NEXT: shll $3, %eax
|
|
; DISABLE-NEXT: popq %rcx
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB7_2: ## %if.else
|
|
; DISABLE-NEXT: addl %eax, %eax
|
|
; DISABLE-NEXT: popq %rcx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%tobool = icmp eq i32 %cond, 0
|
|
br i1 %tobool, label %if.else, label %if.then
|
|
|
|
if.then: ; preds = %entry
|
|
%call = tail call i32 (i32, ...) @someVariadicFunc(i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N, i32 %N)
|
|
%shl = shl i32 %call, 3
|
|
br label %if.end
|
|
|
|
if.else: ; preds = %entry
|
|
%mul = shl nsw i32 %N, 1
|
|
br label %if.end
|
|
|
|
if.end: ; preds = %if.else, %if.then
|
|
%sum.0 = phi i32 [ %shl, %if.then ], [ %mul, %if.else ]
|
|
ret i32 %sum.0
|
|
}
|
|
|
|
declare i32 @someVariadicFunc(i32, ...)
|
|
|
|
; Check that we use LEA not to clobber EFLAGS.
|
|
%struct.temp_slot = type { %struct.temp_slot*, %struct.rtx_def*, %struct.rtx_def*, i32, i64, %union.tree_node*, %union.tree_node*, i8, i8, i32, i32, i64, i64 }
|
|
%union.tree_node = type { %struct.tree_decl }
|
|
%struct.tree_decl = type { %struct.tree_common, i8*, i32, i32, %union.tree_node*, i48, %union.anon, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %union.anon.1, %union.tree_node*, %union.tree_node*, %union.tree_node*, i64, %struct.lang_decl* }
|
|
%struct.tree_common = type { %union.tree_node*, %union.tree_node*, i32 }
|
|
%union.anon = type { i64 }
|
|
%union.anon.1 = type { %struct.function* }
|
|
%struct.function = type { %struct.eh_status*, %struct.stmt_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, i8*, %union.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.ix86_args, %struct.rtx_def*, %struct.rtx_def*, i8*, %struct.initial_value_struct*, i32, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i64, %union.tree_node*, %union.tree_node*, %struct.rtx_def*, %struct.rtx_def*, i32, %struct.rtx_def**, %struct.temp_slot*, i32, i32, i32, %struct.var_refs_queue*, i32, i32, i8*, %union.tree_node*, %struct.rtx_def*, i32, i32, %struct.machine_function*, i32, i32, %struct.language_function*, %struct.rtx_def*, i24 }
|
|
%struct.eh_status = type opaque
|
|
%struct.stmt_status = type opaque
|
|
%struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
|
|
%struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %union.tree_node*, %struct.sequence_stack*, i32, i32, i8*, i32, i8*, %union.tree_node**, %struct.rtx_def** }
|
|
%struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %union.tree_node*, %struct.sequence_stack* }
|
|
%struct.varasm_status = type opaque
|
|
%struct.ix86_args = type { i32, i32, i32, i32, i32, i32, i32 }
|
|
%struct.initial_value_struct = type opaque
|
|
%struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
|
|
%struct.machine_function = type opaque
|
|
%struct.language_function = type opaque
|
|
%struct.lang_decl = type opaque
|
|
%struct.rtx_def = type { i32, [1 x %union.rtunion_def] }
|
|
%union.rtunion_def = type { i64 }
|
|
|
|
declare hidden fastcc %struct.temp_slot* @find_temp_slot_from_address(%struct.rtx_def* readonly)
|
|
|
|
define void @useLEA(%struct.rtx_def* readonly %x) {
|
|
; ENABLE-LABEL: useLEA:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: testq %rdi, %rdi
|
|
; ENABLE-NEXT: je LBB8_7
|
|
; ENABLE-NEXT: ## %bb.1: ## %if.end
|
|
; ENABLE-NEXT: cmpw $66, (%rdi)
|
|
; ENABLE-NEXT: jne LBB8_7
|
|
; ENABLE-NEXT: ## %bb.2: ## %lor.lhs.false
|
|
; ENABLE-NEXT: movq 8(%rdi), %rdi
|
|
; ENABLE-NEXT: movzwl (%rdi), %eax
|
|
; ENABLE-NEXT: leal -54(%rax), %ecx
|
|
; ENABLE-NEXT: cmpl $14, %ecx
|
|
; ENABLE-NEXT: ja LBB8_3
|
|
; ENABLE-NEXT: ## %bb.8: ## %lor.lhs.false
|
|
; ENABLE-NEXT: movl $24599, %edx ## imm = 0x6017
|
|
; ENABLE-NEXT: btl %ecx, %edx
|
|
; ENABLE-NEXT: jae LBB8_3
|
|
; ENABLE-NEXT: LBB8_7: ## %cleanup
|
|
; ENABLE-NEXT: popq %rax
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB8_3: ## %lor.lhs.false
|
|
; ENABLE-NEXT: cmpl $134, %eax
|
|
; ENABLE-NEXT: je LBB8_7
|
|
; ENABLE-NEXT: ## %bb.4: ## %lor.lhs.false
|
|
; ENABLE-NEXT: cmpl $140, %eax
|
|
; ENABLE-NEXT: je LBB8_7
|
|
; ENABLE-NEXT: ## %bb.5: ## %if.end.55
|
|
; ENABLE-NEXT: callq _find_temp_slot_from_address
|
|
; ENABLE-NEXT: testq %rax, %rax
|
|
; ENABLE-NEXT: je LBB8_7
|
|
; ENABLE-NEXT: ## %bb.6: ## %if.then.60
|
|
; ENABLE-NEXT: movb $1, 57(%rax)
|
|
; ENABLE-NEXT: popq %rax
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: useLEA:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: testq %rdi, %rdi
|
|
; DISABLE-NEXT: je LBB8_7
|
|
; DISABLE-NEXT: ## %bb.1: ## %if.end
|
|
; DISABLE-NEXT: cmpw $66, (%rdi)
|
|
; DISABLE-NEXT: jne LBB8_7
|
|
; DISABLE-NEXT: ## %bb.2: ## %lor.lhs.false
|
|
; DISABLE-NEXT: movq 8(%rdi), %rdi
|
|
; DISABLE-NEXT: movzwl (%rdi), %eax
|
|
; DISABLE-NEXT: leal -54(%rax), %ecx
|
|
; DISABLE-NEXT: cmpl $14, %ecx
|
|
; DISABLE-NEXT: ja LBB8_3
|
|
; DISABLE-NEXT: ## %bb.8: ## %lor.lhs.false
|
|
; DISABLE-NEXT: movl $24599, %edx ## imm = 0x6017
|
|
; DISABLE-NEXT: btl %ecx, %edx
|
|
; DISABLE-NEXT: jae LBB8_3
|
|
; DISABLE-NEXT: LBB8_7: ## %cleanup
|
|
; DISABLE-NEXT: popq %rax
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB8_3: ## %lor.lhs.false
|
|
; DISABLE-NEXT: cmpl $134, %eax
|
|
; DISABLE-NEXT: je LBB8_7
|
|
; DISABLE-NEXT: ## %bb.4: ## %lor.lhs.false
|
|
; DISABLE-NEXT: cmpl $140, %eax
|
|
; DISABLE-NEXT: je LBB8_7
|
|
; DISABLE-NEXT: ## %bb.5: ## %if.end.55
|
|
; DISABLE-NEXT: callq _find_temp_slot_from_address
|
|
; DISABLE-NEXT: testq %rax, %rax
|
|
; DISABLE-NEXT: je LBB8_7
|
|
; DISABLE-NEXT: ## %bb.6: ## %if.then.60
|
|
; DISABLE-NEXT: movb $1, 57(%rax)
|
|
; DISABLE-NEXT: popq %rax
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%cmp = icmp eq %struct.rtx_def* %x, null
|
|
br i1 %cmp, label %cleanup, label %if.end
|
|
|
|
if.end: ; preds = %entry
|
|
%tmp = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %x, i64 0, i32 0
|
|
%bf.load = load i32, i32* %tmp, align 8
|
|
%bf.clear = and i32 %bf.load, 65535
|
|
%cmp1 = icmp eq i32 %bf.clear, 66
|
|
br i1 %cmp1, label %lor.lhs.false, label %cleanup
|
|
|
|
lor.lhs.false: ; preds = %if.end
|
|
%arrayidx = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %x, i64 0, i32 1, i64 0
|
|
%rtx = bitcast %union.rtunion_def* %arrayidx to %struct.rtx_def**
|
|
%tmp1 = load %struct.rtx_def*, %struct.rtx_def** %rtx, align 8
|
|
%tmp2 = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %tmp1, i64 0, i32 0
|
|
%bf.load2 = load i32, i32* %tmp2, align 8
|
|
%bf.clear3 = and i32 %bf.load2, 65535
|
|
switch i32 %bf.clear3, label %if.end.55 [
|
|
i32 67, label %cleanup
|
|
i32 68, label %cleanup
|
|
i32 54, label %cleanup
|
|
i32 55, label %cleanup
|
|
i32 58, label %cleanup
|
|
i32 134, label %cleanup
|
|
i32 56, label %cleanup
|
|
i32 140, label %cleanup
|
|
]
|
|
|
|
if.end.55: ; preds = %lor.lhs.false
|
|
%call = tail call fastcc %struct.temp_slot* @find_temp_slot_from_address(%struct.rtx_def* %tmp1) #2
|
|
%cmp59 = icmp eq %struct.temp_slot* %call, null
|
|
br i1 %cmp59, label %cleanup, label %if.then.60
|
|
|
|
if.then.60: ; preds = %if.end.55
|
|
%addr_taken = getelementptr inbounds %struct.temp_slot, %struct.temp_slot* %call, i64 0, i32 8
|
|
store i8 1, i8* %addr_taken, align 1
|
|
br label %cleanup
|
|
|
|
cleanup: ; preds = %if.then.60, %if.end.55, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %lor.lhs.false, %if.end, %entry
|
|
ret void
|
|
}
|
|
|
|
; Make sure we do not insert unreachable code after noreturn function.
|
|
; Although this is not incorrect to insert such code, it is useless
|
|
; and it hurts the binary size.
|
|
define i32 @noreturn(i8 signext %bad_thing) {
|
|
; ENABLE-LABEL: noreturn:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: testb %dil, %dil
|
|
; ENABLE-NEXT: jne LBB9_2
|
|
; ENABLE-NEXT: ## %bb.1: ## %if.end
|
|
; ENABLE-NEXT: movl $42, %eax
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB9_2: ## %if.abort
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: callq _abort
|
|
;
|
|
; DISABLE-LABEL: noreturn:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: testb %dil, %dil
|
|
; DISABLE-NEXT: jne LBB9_2
|
|
; DISABLE-NEXT: ## %bb.1: ## %if.end
|
|
; DISABLE-NEXT: movl $42, %eax
|
|
; DISABLE-NEXT: popq %rcx
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB9_2: ## %if.abort
|
|
; DISABLE-NEXT: callq _abort
|
|
entry:
|
|
%tobool = icmp eq i8 %bad_thing, 0
|
|
br i1 %tobool, label %if.end, label %if.abort
|
|
|
|
if.abort:
|
|
tail call void @abort() #0
|
|
unreachable
|
|
|
|
if.end:
|
|
ret i32 42
|
|
}
|
|
|
|
declare void @abort() #0
|
|
|
|
attributes #0 = { noreturn nounwind }
|
|
|
|
|
|
; Make sure that we handle infinite loops properly When checking that the Save
|
|
; and Restore blocks are control flow equivalent, the loop searches for the
|
|
; immediate (post) dominator for the (restore) save blocks. When either the Save
|
|
; or Restore block is located in an infinite loop the only immediate (post)
|
|
; dominator is itself. In this case, we cannot perform shrink wrapping, but we
|
|
; should return gracefully and continue compilation.
|
|
; The only condition for this test is the compilation finishes correctly.
|
|
;
|
|
define void @infiniteloop() {
|
|
; ENABLE-LABEL: infiniteloop:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: pushq %rbp
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbp, -16
|
|
; ENABLE-NEXT: movq %rsp, %rbp
|
|
; ENABLE-NEXT: .cfi_def_cfa_register %rbp
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -24
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: testb %al, %al
|
|
; ENABLE-NEXT: jne LBB10_3
|
|
; ENABLE-NEXT: ## %bb.1: ## %if.then
|
|
; ENABLE-NEXT: movq %rsp, %rcx
|
|
; ENABLE-NEXT: addq $-16, %rcx
|
|
; ENABLE-NEXT: movq %rcx, %rsp
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: movl $1, %edx
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB10_2: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: addl %edx, %eax
|
|
; ENABLE-NEXT: movl %eax, (%rcx)
|
|
; ENABLE-NEXT: jmp LBB10_2
|
|
; ENABLE-NEXT: LBB10_3: ## %if.end
|
|
; ENABLE-NEXT: leaq -8(%rbp), %rsp
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: popq %rbp
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: infiniteloop:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbp
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbp, -16
|
|
; DISABLE-NEXT: movq %rsp, %rbp
|
|
; DISABLE-NEXT: .cfi_def_cfa_register %rbp
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -24
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: testb %al, %al
|
|
; DISABLE-NEXT: jne LBB10_3
|
|
; DISABLE-NEXT: ## %bb.1: ## %if.then
|
|
; DISABLE-NEXT: movq %rsp, %rcx
|
|
; DISABLE-NEXT: addq $-16, %rcx
|
|
; DISABLE-NEXT: movq %rcx, %rsp
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: movl $1, %edx
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB10_2: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: addl %edx, %eax
|
|
; DISABLE-NEXT: movl %eax, (%rcx)
|
|
; DISABLE-NEXT: jmp LBB10_2
|
|
; DISABLE-NEXT: LBB10_3: ## %if.end
|
|
; DISABLE-NEXT: leaq -8(%rbp), %rsp
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: popq %rbp
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
br i1 undef, label %if.then, label %if.end
|
|
|
|
if.then:
|
|
%ptr = alloca i32, i32 4
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%sum.03 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
|
|
%call = tail call i32 asm "movl $$1, $0", "=r,~{ebx}"()
|
|
%add = add nsw i32 %call, %sum.03
|
|
store i32 %add, i32* %ptr
|
|
br label %for.body
|
|
|
|
if.end:
|
|
ret void
|
|
}
|
|
|
|
; Another infinite loop test this time with a body bigger than just one block.
|
|
define void @infiniteloop2() {
|
|
; ENABLE-LABEL: infiniteloop2:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: pushq %rbp
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbp, -16
|
|
; ENABLE-NEXT: movq %rsp, %rbp
|
|
; ENABLE-NEXT: .cfi_def_cfa_register %rbp
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -24
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: testb %al, %al
|
|
; ENABLE-NEXT: jne LBB11_5
|
|
; ENABLE-NEXT: ## %bb.1: ## %if.then
|
|
; ENABLE-NEXT: movq %rsp, %rcx
|
|
; ENABLE-NEXT: addq $-16, %rcx
|
|
; ENABLE-NEXT: movq %rcx, %rsp
|
|
; ENABLE-NEXT: xorl %edx, %edx
|
|
; ENABLE-NEXT: jmp LBB11_2
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB11_4: ## %body2
|
|
; ENABLE-NEXT: ## in Loop: Header=BB11_2 Depth=1
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: movl $1, %edx
|
|
; ENABLE-NEXT: LBB11_2: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: movl %edx, %esi
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: movl $1, %edx
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: addl %esi, %edx
|
|
; ENABLE-NEXT: movl %edx, (%rcx)
|
|
; ENABLE-NEXT: testb %al, %al
|
|
; ENABLE-NEXT: jne LBB11_4
|
|
; ENABLE-NEXT: ## %bb.3: ## %body1
|
|
; ENABLE-NEXT: ## in Loop: Header=BB11_2 Depth=1
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: jmp LBB11_2
|
|
; ENABLE-NEXT: LBB11_5: ## %if.end
|
|
; ENABLE-NEXT: leaq -8(%rbp), %rsp
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: popq %rbp
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: infiniteloop2:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbp
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbp, -16
|
|
; DISABLE-NEXT: movq %rsp, %rbp
|
|
; DISABLE-NEXT: .cfi_def_cfa_register %rbp
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -24
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: testb %al, %al
|
|
; DISABLE-NEXT: jne LBB11_5
|
|
; DISABLE-NEXT: ## %bb.1: ## %if.then
|
|
; DISABLE-NEXT: movq %rsp, %rcx
|
|
; DISABLE-NEXT: addq $-16, %rcx
|
|
; DISABLE-NEXT: movq %rcx, %rsp
|
|
; DISABLE-NEXT: xorl %edx, %edx
|
|
; DISABLE-NEXT: jmp LBB11_2
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB11_4: ## %body2
|
|
; DISABLE-NEXT: ## in Loop: Header=BB11_2 Depth=1
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: movl $1, %edx
|
|
; DISABLE-NEXT: LBB11_2: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: movl %edx, %esi
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: movl $1, %edx
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: addl %esi, %edx
|
|
; DISABLE-NEXT: movl %edx, (%rcx)
|
|
; DISABLE-NEXT: testb %al, %al
|
|
; DISABLE-NEXT: jne LBB11_4
|
|
; DISABLE-NEXT: ## %bb.3: ## %body1
|
|
; DISABLE-NEXT: ## in Loop: Header=BB11_2 Depth=1
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: jmp LBB11_2
|
|
; DISABLE-NEXT: LBB11_5: ## %if.end
|
|
; DISABLE-NEXT: leaq -8(%rbp), %rsp
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: popq %rbp
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
br i1 undef, label %if.then, label %if.end
|
|
|
|
if.then:
|
|
%ptr = alloca i32, i32 4
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%sum.03 = phi i32 [ 0, %if.then ], [ %add, %body1 ], [ 1, %body2]
|
|
%call = tail call i32 asm "movl $$1, $0", "=r,~{ebx}"()
|
|
%add = add nsw i32 %call, %sum.03
|
|
store i32 %add, i32* %ptr
|
|
br i1 undef, label %body1, label %body2
|
|
|
|
body1:
|
|
tail call void asm sideeffect "nop", "~{ebx}"()
|
|
br label %for.body
|
|
|
|
body2:
|
|
tail call void asm sideeffect "nop", "~{ebx}"()
|
|
br label %for.body
|
|
|
|
if.end:
|
|
ret void
|
|
}
|
|
|
|
; Another infinite loop test this time with two nested infinite loop.
|
|
define void @infiniteloop3() {
|
|
; ENABLE-LABEL: infiniteloop3:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: testb %al, %al
|
|
; ENABLE-NEXT: jne LBB12_2
|
|
; ENABLE-NEXT: ## %bb.1: ## %body
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: testb %al, %al
|
|
; ENABLE-NEXT: jne LBB12_7
|
|
; ENABLE-NEXT: LBB12_2: ## %loop2a.preheader
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: xorl %ecx, %ecx
|
|
; ENABLE-NEXT: movq %rax, %rsi
|
|
; ENABLE-NEXT: jmp LBB12_4
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB12_3: ## %loop2b
|
|
; ENABLE-NEXT: ## in Loop: Header=BB12_4 Depth=1
|
|
; ENABLE-NEXT: movq %rdx, (%rsi)
|
|
; ENABLE-NEXT: movq %rdx, %rsi
|
|
; ENABLE-NEXT: LBB12_4: ## %loop1
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: movq %rcx, %rdx
|
|
; ENABLE-NEXT: testq %rax, %rax
|
|
; ENABLE-NEXT: movq (%rax), %rcx
|
|
; ENABLE-NEXT: jne LBB12_3
|
|
; ENABLE-NEXT: ## %bb.5: ## in Loop: Header=BB12_4 Depth=1
|
|
; ENABLE-NEXT: movq %rdx, %rax
|
|
; ENABLE-NEXT: movq %rdx, %rsi
|
|
; ENABLE-NEXT: jmp LBB12_4
|
|
; ENABLE-NEXT: LBB12_7: ## %end
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: infiniteloop3:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: testb %al, %al
|
|
; DISABLE-NEXT: jne LBB12_2
|
|
; DISABLE-NEXT: ## %bb.1: ## %body
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: testb %al, %al
|
|
; DISABLE-NEXT: jne LBB12_7
|
|
; DISABLE-NEXT: LBB12_2: ## %loop2a.preheader
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: xorl %ecx, %ecx
|
|
; DISABLE-NEXT: movq %rax, %rsi
|
|
; DISABLE-NEXT: jmp LBB12_4
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB12_3: ## %loop2b
|
|
; DISABLE-NEXT: ## in Loop: Header=BB12_4 Depth=1
|
|
; DISABLE-NEXT: movq %rdx, (%rsi)
|
|
; DISABLE-NEXT: movq %rdx, %rsi
|
|
; DISABLE-NEXT: LBB12_4: ## %loop1
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: movq %rcx, %rdx
|
|
; DISABLE-NEXT: testq %rax, %rax
|
|
; DISABLE-NEXT: movq (%rax), %rcx
|
|
; DISABLE-NEXT: jne LBB12_3
|
|
; DISABLE-NEXT: ## %bb.5: ## in Loop: Header=BB12_4 Depth=1
|
|
; DISABLE-NEXT: movq %rdx, %rax
|
|
; DISABLE-NEXT: movq %rdx, %rsi
|
|
; DISABLE-NEXT: jmp LBB12_4
|
|
; DISABLE-NEXT: LBB12_7: ## %end
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
br i1 undef, label %loop2a, label %body
|
|
|
|
body: ; preds = %entry
|
|
br i1 undef, label %loop2a, label %end
|
|
|
|
loop1: ; preds = %loop2a, %loop2b
|
|
%var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
|
|
%next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
|
|
%0 = icmp eq i32* %var, null
|
|
%next.load = load i32*, i32** undef
|
|
br i1 %0, label %loop2a, label %loop2b
|
|
|
|
loop2a: ; preds = %loop1, %body, %entry
|
|
%var = phi i32* [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
|
|
%next.var = phi i32* [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
|
|
br label %loop1
|
|
|
|
loop2b: ; preds = %loop1
|
|
%gep1 = bitcast i32* %var.phi to i32*
|
|
%next.ptr = bitcast i32* %gep1 to i32**
|
|
store i32* %next.phi, i32** %next.ptr
|
|
br label %loop1
|
|
|
|
end:
|
|
ret void
|
|
}
|
|
|
|
; Check that we just don't bail out on RegMask.
|
|
; In this case, the RegMask does not touch a CSR so we are good to go!
|
|
define i32 @regmask(i32 %a, i32 %b, i32* %addr) {
|
|
; ENABLE-LABEL: regmask:
|
|
; ENABLE: ## %bb.0:
|
|
; ENABLE-NEXT: cmpl %esi, %edi
|
|
; ENABLE-NEXT: jge LBB13_2
|
|
; ENABLE-NEXT: ## %bb.1: ## %true
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -16
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: xorl %edi, %edi
|
|
; ENABLE-NEXT: movq %rdx, %rsi
|
|
; ENABLE-NEXT: callq _doSomething
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB13_2: ## %false
|
|
; ENABLE-NEXT: movl $6, %edi
|
|
; ENABLE-NEXT: movq %rdx, %rsi
|
|
; ENABLE-NEXT: jmp _doSomething ## TAILCALL
|
|
;
|
|
; DISABLE-LABEL: regmask:
|
|
; DISABLE: ## %bb.0:
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -16
|
|
; DISABLE-NEXT: cmpl %esi, %edi
|
|
; DISABLE-NEXT: jge LBB13_2
|
|
; DISABLE-NEXT: ## %bb.1: ## %true
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: xorl %edi, %edi
|
|
; DISABLE-NEXT: movq %rdx, %rsi
|
|
; DISABLE-NEXT: callq _doSomething
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB13_2: ## %false
|
|
; DISABLE-NEXT: movl $6, %edi
|
|
; DISABLE-NEXT: movq %rdx, %rsi
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: jmp _doSomething ## TAILCALL
|
|
%tmp2 = icmp slt i32 %a, %b
|
|
br i1 %tmp2, label %true, label %false
|
|
|
|
true:
|
|
; Clobber a CSR so that we check something on the regmask
|
|
; of the tail call.
|
|
tail call void asm sideeffect "nop", "~{ebx}"()
|
|
%tmp4 = call i32 @doSomething(i32 0, i32* %addr)
|
|
br label %end
|
|
|
|
false:
|
|
%tmp5 = tail call i32 @doSomething(i32 6, i32* %addr)
|
|
br label %end
|
|
|
|
end:
|
|
%tmp.0 = phi i32 [ %tmp4, %true ], [ %tmp5, %false ]
|
|
ret i32 %tmp.0
|
|
}
|
|
|
|
@b = internal unnamed_addr global i1 false
|
|
@c = internal unnamed_addr global i8 0, align 1
|
|
@a = common global i32 0, align 4
|
|
|
|
; Make sure the prologue does not clobber the EFLAGS when
|
|
; it is live accross.
|
|
; PR25629.
|
|
; Note: The registers may change in the following patterns, but
|
|
; because they imply register hierarchy (e.g., eax, al) this is
|
|
; tricky to write robust patterns.
|
|
define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
|
|
; ENABLE-LABEL: useLEAForPrologue:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: subq $16, %rsp
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: cmpb $0, {{.*}}(%rip)
|
|
; ENABLE-NEXT: movl $48, %ecx
|
|
; ENABLE-NEXT: cmovnel %eax, %ecx
|
|
; ENABLE-NEXT: movb %cl, {{.*}}(%rip)
|
|
; ENABLE-NEXT: je LBB14_4
|
|
; ENABLE-NEXT: ## %bb.1: ## %for.body.lr.ph
|
|
; ENABLE-NEXT: ## InlineAsm Start
|
|
; ENABLE-NEXT: nop
|
|
; ENABLE-NEXT: ## InlineAsm End
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB14_2: ## %for.body
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: cmpl %esi, %edi
|
|
; ENABLE-NEXT: setl %al
|
|
; ENABLE-NEXT: xorl %esi, %esi
|
|
; ENABLE-NEXT: movb %al, %sil
|
|
; ENABLE-NEXT: incb %dl
|
|
; ENABLE-NEXT: cmpb $45, %dl
|
|
; ENABLE-NEXT: jl LBB14_2
|
|
; ENABLE-NEXT: ## %bb.3: ## %for.cond.for.end_crit_edge
|
|
; ENABLE-NEXT: movq _a@{{.*}}(%rip), %rax
|
|
; ENABLE-NEXT: movl %esi, (%rax)
|
|
; ENABLE-NEXT: LBB14_4: ## %for.end
|
|
; ENABLE-NEXT: xorl %edi, %edi
|
|
; ENABLE-NEXT: callq _varfunc
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: addq $16, %rsp
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: useLEAForPrologue:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: subq $16, %rsp
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: cmpb $0, {{.*}}(%rip)
|
|
; DISABLE-NEXT: movl $48, %ecx
|
|
; DISABLE-NEXT: cmovnel %eax, %ecx
|
|
; DISABLE-NEXT: movb %cl, {{.*}}(%rip)
|
|
; DISABLE-NEXT: je LBB14_4
|
|
; DISABLE-NEXT: ## %bb.1: ## %for.body.lr.ph
|
|
; DISABLE-NEXT: ## InlineAsm Start
|
|
; DISABLE-NEXT: nop
|
|
; DISABLE-NEXT: ## InlineAsm End
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB14_2: ## %for.body
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: cmpl %esi, %edi
|
|
; DISABLE-NEXT: setl %al
|
|
; DISABLE-NEXT: xorl %esi, %esi
|
|
; DISABLE-NEXT: movb %al, %sil
|
|
; DISABLE-NEXT: incb %dl
|
|
; DISABLE-NEXT: cmpb $45, %dl
|
|
; DISABLE-NEXT: jl LBB14_2
|
|
; DISABLE-NEXT: ## %bb.3: ## %for.cond.for.end_crit_edge
|
|
; DISABLE-NEXT: movq _a@{{.*}}(%rip), %rax
|
|
; DISABLE-NEXT: movl %esi, (%rax)
|
|
; DISABLE-NEXT: LBB14_4: ## %for.end
|
|
; DISABLE-NEXT: xorl %edi, %edi
|
|
; DISABLE-NEXT: callq _varfunc
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: addq $16, %rsp
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%tmp = alloca i3
|
|
%.b = load i1, i1* @b, align 1
|
|
%bool = select i1 %.b, i8 0, i8 48
|
|
store i8 %bool, i8* @c, align 1
|
|
br i1 %.b, label %for.body.lr.ph, label %for.end
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
tail call void asm sideeffect "nop", "~{ebx}"()
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
%inc6 = phi i8 [ %c, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
%cond5 = phi i32 [ %a, %for.body.lr.ph ], [ %conv3, %for.body ]
|
|
%cmp2 = icmp slt i32 %d, %cond5
|
|
%conv3 = zext i1 %cmp2 to i32
|
|
%inc = add i8 %inc6, 1
|
|
%cmp = icmp slt i8 %inc, 45
|
|
br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
|
|
|
|
for.cond.for.end_crit_edge: ; preds = %for.body
|
|
store i32 %conv3, i32* @a, align 4
|
|
br label %for.end
|
|
|
|
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
|
|
%call = tail call i32 (i8*) @varfunc(i8* null)
|
|
ret i32 0
|
|
}
|
|
|
|
declare i32 @varfunc(i8* nocapture readonly)
|
|
|
|
@sum1 = external hidden thread_local global i32, align 4
|
|
|
|
|
|
; Function Attrs: nounwind
|
|
; Make sure the TLS call used to access @sum1 happens after the prologue
|
|
; and before the epilogue.
|
|
; TLS calls used to be wrongly model and shrink-wrapping would have inserted
|
|
; the prologue and epilogue just around the call to doSomething.
|
|
; PR25820.
|
|
define i32 @tlsCall(i1 %bool1, i32 %arg, i32* readonly dereferenceable(4) %sum1) #3 {
|
|
; ENABLE-LABEL: tlsCall:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: testb $1, %dil
|
|
; ENABLE-NEXT: je LBB15_2
|
|
; ENABLE-NEXT: ## %bb.1: ## %master
|
|
; ENABLE-NEXT: movl (%rdx), %ecx
|
|
; ENABLE-NEXT: movq _sum1@{{.*}}(%rip), %rdi
|
|
; ENABLE-NEXT: callq *(%rdi)
|
|
; ENABLE-NEXT: movl %ecx, (%rax)
|
|
; ENABLE-NEXT: jmp LBB15_3
|
|
; ENABLE-NEXT: LBB15_2: ## %else
|
|
; ENABLE-NEXT: xorl %edi, %edi
|
|
; ENABLE-NEXT: xorl %esi, %esi
|
|
; ENABLE-NEXT: callq _doSomething
|
|
; ENABLE-NEXT: movl %eax, %esi
|
|
; ENABLE-NEXT: LBB15_3: ## %exit
|
|
; ENABLE-NEXT: movl %esi, %eax
|
|
; ENABLE-NEXT: popq %rcx
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: tlsCall:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: testb $1, %dil
|
|
; DISABLE-NEXT: je LBB15_2
|
|
; DISABLE-NEXT: ## %bb.1: ## %master
|
|
; DISABLE-NEXT: movl (%rdx), %ecx
|
|
; DISABLE-NEXT: movq _sum1@{{.*}}(%rip), %rdi
|
|
; DISABLE-NEXT: callq *(%rdi)
|
|
; DISABLE-NEXT: movl %ecx, (%rax)
|
|
; DISABLE-NEXT: jmp LBB15_3
|
|
; DISABLE-NEXT: LBB15_2: ## %else
|
|
; DISABLE-NEXT: xorl %edi, %edi
|
|
; DISABLE-NEXT: xorl %esi, %esi
|
|
; DISABLE-NEXT: callq _doSomething
|
|
; DISABLE-NEXT: movl %eax, %esi
|
|
; DISABLE-NEXT: LBB15_3: ## %exit
|
|
; DISABLE-NEXT: movl %esi, %eax
|
|
; DISABLE-NEXT: popq %rcx
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
br i1 %bool1, label %master, label %else
|
|
|
|
master:
|
|
%tmp1 = load i32, i32* %sum1, align 4
|
|
store i32 %tmp1, i32* @sum1, align 4
|
|
br label %exit
|
|
|
|
else:
|
|
%call = call i32 @doSomething(i32 0, i32* null)
|
|
br label %exit
|
|
|
|
exit:
|
|
%res = phi i32 [ %arg, %master], [ %call, %else ]
|
|
ret i32 %res
|
|
}
|
|
|
|
attributes #3 = { nounwind }
|
|
|
|
@irreducibleCFGa = common global i32 0, align 4
|
|
@irreducibleCFGf = common global i8 0, align 1
|
|
@irreducibleCFGb = common global i32 0, align 4
|
|
|
|
; Check that we do not run shrink-wrapping on irreducible CFGs until
|
|
; it is actually supported.
|
|
; At the moment, on those CFGs the loop information may be incorrect
|
|
; and since we use that information to do the placement, we may end up
|
|
; inserting the prologue/epilogue at incorrect places.
|
|
; PR25988.
|
|
; Make sure we emit missed optimization remarks for this.
|
|
; REMARKS: Pass: shrink-wrap
|
|
; REMARKS-NEXT: Name: UnsupportedIrreducibleCFG
|
|
; REMARKS-NEXT: Function: irreducibleCFG
|
|
; REMARKS-NEXT: Args:
|
|
; REMARKS-NEXT: - String: Irreducible CFGs are not supported yet
|
|
|
|
define i32 @irreducibleCFG() #4 {
|
|
; ENABLE-LABEL: irreducibleCFG:
|
|
; ENABLE: ## %bb.0: ## %entry
|
|
; ENABLE-NEXT: pushq %rbp
|
|
; ENABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; ENABLE-NEXT: .cfi_offset %rbp, -16
|
|
; ENABLE-NEXT: movq %rsp, %rbp
|
|
; ENABLE-NEXT: .cfi_def_cfa_register %rbp
|
|
; ENABLE-NEXT: pushq %rbx
|
|
; ENABLE-NEXT: pushq %rax
|
|
; ENABLE-NEXT: .cfi_offset %rbx, -24
|
|
; ENABLE-NEXT: movq _irreducibleCFGf@{{.*}}(%rip), %rax
|
|
; ENABLE-NEXT: cmpb $0, (%rax)
|
|
; ENABLE-NEXT: je LBB16_2
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB16_1: ## %preheader
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: jmp LBB16_1
|
|
; ENABLE-NEXT: LBB16_2: ## %split
|
|
; ENABLE-NEXT: movq _irreducibleCFGb@{{.*}}(%rip), %rax
|
|
; ENABLE-NEXT: cmpl $0, (%rax)
|
|
; ENABLE-NEXT: je LBB16_3
|
|
; ENABLE-NEXT: ## %bb.4: ## %for.body4.i
|
|
; ENABLE-NEXT: movq _irreducibleCFGa@{{.*}}(%rip), %rax
|
|
; ENABLE-NEXT: movl (%rax), %edi
|
|
; ENABLE-NEXT: xorl %ebx, %ebx
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: callq _something
|
|
; ENABLE-NEXT: jmp LBB16_5
|
|
; ENABLE-NEXT: LBB16_3:
|
|
; ENABLE-NEXT: xorl %ebx, %ebx
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB16_5: ## %for.inc
|
|
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: incl %ebx
|
|
; ENABLE-NEXT: cmpl $7, %ebx
|
|
; ENABLE-NEXT: jl LBB16_5
|
|
; ENABLE-NEXT: ## %bb.6: ## %fn1.exit
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: addq $8, %rsp
|
|
; ENABLE-NEXT: popq %rbx
|
|
; ENABLE-NEXT: popq %rbp
|
|
; ENABLE-NEXT: retq
|
|
;
|
|
; DISABLE-LABEL: irreducibleCFG:
|
|
; DISABLE: ## %bb.0: ## %entry
|
|
; DISABLE-NEXT: pushq %rbp
|
|
; DISABLE-NEXT: .cfi_def_cfa_offset 16
|
|
; DISABLE-NEXT: .cfi_offset %rbp, -16
|
|
; DISABLE-NEXT: movq %rsp, %rbp
|
|
; DISABLE-NEXT: .cfi_def_cfa_register %rbp
|
|
; DISABLE-NEXT: pushq %rbx
|
|
; DISABLE-NEXT: pushq %rax
|
|
; DISABLE-NEXT: .cfi_offset %rbx, -24
|
|
; DISABLE-NEXT: movq _irreducibleCFGf@{{.*}}(%rip), %rax
|
|
; DISABLE-NEXT: cmpb $0, (%rax)
|
|
; DISABLE-NEXT: je LBB16_2
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB16_1: ## %preheader
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: jmp LBB16_1
|
|
; DISABLE-NEXT: LBB16_2: ## %split
|
|
; DISABLE-NEXT: movq _irreducibleCFGb@{{.*}}(%rip), %rax
|
|
; DISABLE-NEXT: cmpl $0, (%rax)
|
|
; DISABLE-NEXT: je LBB16_3
|
|
; DISABLE-NEXT: ## %bb.4: ## %for.body4.i
|
|
; DISABLE-NEXT: movq _irreducibleCFGa@{{.*}}(%rip), %rax
|
|
; DISABLE-NEXT: movl (%rax), %edi
|
|
; DISABLE-NEXT: xorl %ebx, %ebx
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: callq _something
|
|
; DISABLE-NEXT: jmp LBB16_5
|
|
; DISABLE-NEXT: LBB16_3:
|
|
; DISABLE-NEXT: xorl %ebx, %ebx
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB16_5: ## %for.inc
|
|
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: incl %ebx
|
|
; DISABLE-NEXT: cmpl $7, %ebx
|
|
; DISABLE-NEXT: jl LBB16_5
|
|
; DISABLE-NEXT: ## %bb.6: ## %fn1.exit
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: addq $8, %rsp
|
|
; DISABLE-NEXT: popq %rbx
|
|
; DISABLE-NEXT: popq %rbp
|
|
; DISABLE-NEXT: retq
|
|
entry:
|
|
%i0 = load i32, i32* @irreducibleCFGa, align 4
|
|
%.pr = load i8, i8* @irreducibleCFGf, align 1
|
|
%bool = icmp eq i8 %.pr, 0
|
|
br i1 %bool, label %split, label %preheader
|
|
|
|
preheader:
|
|
br label %preheader
|
|
|
|
split:
|
|
%i1 = load i32, i32* @irreducibleCFGb, align 4
|
|
%tobool1.i = icmp ne i32 %i1, 0
|
|
br i1 %tobool1.i, label %for.body4.i, label %for.cond8.i.preheader
|
|
|
|
for.body4.i:
|
|
%call.i = tail call i32 (...) @something(i32 %i0)
|
|
br label %for.cond8
|
|
|
|
for.cond8:
|
|
%p1 = phi i32 [ %inc18.i, %for.inc ], [ 0, %for.body4.i ]
|
|
%.pr1.pr = load i32, i32* @irreducibleCFGb, align 4
|
|
br label %for.cond8.i.preheader
|
|
|
|
for.cond8.i.preheader:
|
|
%.pr1 = phi i32 [ %.pr1.pr, %for.cond8 ], [ %i1, %split ]
|
|
%p13 = phi i32 [ %p1, %for.cond8 ], [ 0, %split ]
|
|
br label %for.inc
|
|
|
|
fn1.exit:
|
|
ret i32 0
|
|
|
|
for.inc:
|
|
%inc18.i = add nuw nsw i32 %p13, 1
|
|
%cmp = icmp slt i32 %inc18.i, 7
|
|
br i1 %cmp, label %for.cond8, label %fn1.exit
|
|
}
|
|
|
|
attributes #4 = { "frame-pointer"="all" }
|
|
|
|
@x = external global i32, align 4
|
|
@y = external global i32, align 4
|
|
|
|
; The post-dominator tree does not include the branch containing the infinite
|
|
; loop, which can occur into a misplacement of the restore block, if we're
|
|
; looking for the nearest common post-dominator of an "unreachable" block.
|
|
|
|
define void @infiniteLoopNoSuccessor() #5 {
|
|
; ENABLE-LABEL: infiniteLoopNoSuccessor:
|
|
; ENABLE: ## %bb.0:
|
|
; ENABLE-NEXT: pushq %rbp
|
|
; ENABLE-NEXT: movq %rsp, %rbp
|
|
; ENABLE-NEXT: movq _x@{{.*}}(%rip), %rax
|
|
; ENABLE-NEXT: cmpl $0, (%rax)
|
|
; ENABLE-NEXT: je LBB17_2
|
|
; ENABLE-NEXT: ## %bb.1:
|
|
; ENABLE-NEXT: movl $0, (%rax)
|
|
; ENABLE-NEXT: LBB17_2:
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: callq _somethingElse
|
|
; ENABLE-NEXT: movq _y@{{.*}}(%rip), %rax
|
|
; ENABLE-NEXT: cmpl $0, (%rax)
|
|
; ENABLE-NEXT: je LBB17_3
|
|
; ENABLE-NEXT: ## %bb.5:
|
|
; ENABLE-NEXT: popq %rbp
|
|
; ENABLE-NEXT: retq
|
|
; ENABLE-NEXT: LBB17_3:
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: callq _something
|
|
; ENABLE-NEXT: .p2align 4, 0x90
|
|
; ENABLE-NEXT: LBB17_4: ## =>This Inner Loop Header: Depth=1
|
|
; ENABLE-NEXT: xorl %eax, %eax
|
|
; ENABLE-NEXT: callq _somethingElse
|
|
; ENABLE-NEXT: jmp LBB17_4
|
|
;
|
|
; DISABLE-LABEL: infiniteLoopNoSuccessor:
|
|
; DISABLE: ## %bb.0:
|
|
; DISABLE-NEXT: pushq %rbp
|
|
; DISABLE-NEXT: movq %rsp, %rbp
|
|
; DISABLE-NEXT: movq _x@{{.*}}(%rip), %rax
|
|
; DISABLE-NEXT: cmpl $0, (%rax)
|
|
; DISABLE-NEXT: je LBB17_2
|
|
; DISABLE-NEXT: ## %bb.1:
|
|
; DISABLE-NEXT: movl $0, (%rax)
|
|
; DISABLE-NEXT: LBB17_2:
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: callq _somethingElse
|
|
; DISABLE-NEXT: movq _y@{{.*}}(%rip), %rax
|
|
; DISABLE-NEXT: cmpl $0, (%rax)
|
|
; DISABLE-NEXT: je LBB17_3
|
|
; DISABLE-NEXT: ## %bb.5:
|
|
; DISABLE-NEXT: popq %rbp
|
|
; DISABLE-NEXT: retq
|
|
; DISABLE-NEXT: LBB17_3:
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: callq _something
|
|
; DISABLE-NEXT: .p2align 4, 0x90
|
|
; DISABLE-NEXT: LBB17_4: ## =>This Inner Loop Header: Depth=1
|
|
; DISABLE-NEXT: xorl %eax, %eax
|
|
; DISABLE-NEXT: callq _somethingElse
|
|
; DISABLE-NEXT: jmp LBB17_4
|
|
%1 = load i32, i32* @x, align 4
|
|
%2 = icmp ne i32 %1, 0
|
|
br i1 %2, label %3, label %4
|
|
|
|
; <label>:3:
|
|
store i32 0, i32* @x, align 4
|
|
br label %4
|
|
|
|
; <label>:4:
|
|
call void (...) @somethingElse()
|
|
%5 = load i32, i32* @y, align 4
|
|
%6 = icmp ne i32 %5, 0
|
|
br i1 %6, label %10, label %7
|
|
|
|
; <label>:7:
|
|
%8 = call i32 (...) @something()
|
|
br label %9
|
|
|
|
; <label>:9:
|
|
call void (...) @somethingElse()
|
|
br label %9
|
|
|
|
; <label>:10:
|
|
ret void
|
|
}
|
|
|
|
declare void @somethingElse(...)
|
|
|
|
attributes #5 = { nounwind "frame-pointer"="non-leaf" }
|