238 lines
9.3 KiB
LLVM
238 lines
9.3 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -stackrealign -mtriple i386-apple-darwin -mcpu=i486 | FileCheck %s
|
|
|
|
%struct.foo = type { [88 x i8] }
|
|
|
|
declare void @bar(i8* nocapture, %struct.foo* align 4 byval(%struct.foo)) nounwind
|
|
declare void @baz(i8*) nounwind
|
|
|
|
; PR15249
|
|
; We can't use rep;movsl here because it clobbers the base pointer in %esi.
|
|
define void @test1(%struct.foo* nocapture %x, i32 %y) nounwind {
|
|
; CHECK-LABEL: test1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: pushl %ebp
|
|
; CHECK-NEXT: movl %esp, %ebp
|
|
; CHECK-NEXT: pushl %ebx
|
|
; CHECK-NEXT: pushl %edi
|
|
; CHECK-NEXT: pushl %esi
|
|
; CHECK-NEXT: andl $-16, %esp
|
|
; CHECK-NEXT: subl $80, %esp
|
|
; CHECK-NEXT: movl %esp, %esi
|
|
; CHECK-NEXT: movl 8(%ebp), %ecx
|
|
; CHECK-NEXT: movl 12(%ebp), %edx
|
|
; CHECK-NEXT: movl %esp, %eax
|
|
; CHECK-NEXT: addl $15, %edx
|
|
; CHECK-NEXT: andl $-16, %edx
|
|
; CHECK-NEXT: subl %edx, %eax
|
|
; CHECK-NEXT: movl %eax, %esp
|
|
; CHECK-NEXT: subl $4, %esp
|
|
; CHECK-NEXT: movl 84(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 68(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 80(%ecx), %edi
|
|
; CHECK-NEXT: movl 76(%ecx), %ebx
|
|
; CHECK-NEXT: movl 72(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 64(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 68(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 60(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 64(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 56(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 60(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 52(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 56(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 48(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 52(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 44(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 48(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 40(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 44(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 36(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 40(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 32(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 36(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 28(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 32(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 24(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 28(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 20(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 24(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 16(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 20(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 12(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 16(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 8(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 12(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 4(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 8(%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, (%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl (%ecx), %edx
|
|
; CHECK-NEXT: movl %edx, 72(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 4(%ecx), %ecx
|
|
; CHECK-NEXT: pushl 68(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl %edi
|
|
; CHECK-NEXT: pushl %ebx
|
|
; CHECK-NEXT: pushl 64(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 60(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 56(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 52(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 48(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 44(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 40(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 36(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 32(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 28(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 24(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 20(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 16(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 12(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 8(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 4(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl (%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl %ecx
|
|
; CHECK-NEXT: pushl 72(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl %eax
|
|
; CHECK-NEXT: calll _bar
|
|
; CHECK-NEXT: leal -12(%ebp), %esp
|
|
; CHECK-NEXT: popl %esi
|
|
; CHECK-NEXT: popl %edi
|
|
; CHECK-NEXT: popl %ebx
|
|
; CHECK-NEXT: popl %ebp
|
|
; CHECK-NEXT: retl
|
|
%dynalloc = alloca i8, i32 %y, align 1
|
|
call void @bar(i8* %dynalloc, %struct.foo* align 4 byval(%struct.foo) %x)
|
|
ret void
|
|
}
|
|
|
|
; PR19012
|
|
; Also don't clobber %esi if the dynamic alloca comes after the memcpy.
|
|
define void @test2(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
|
|
; CHECK-LABEL: test2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: pushl %ebp
|
|
; CHECK-NEXT: movl %esp, %ebp
|
|
; CHECK-NEXT: pushl %ebx
|
|
; CHECK-NEXT: pushl %edi
|
|
; CHECK-NEXT: pushl %esi
|
|
; CHECK-NEXT: andl $-16, %esp
|
|
; CHECK-NEXT: subl $80, %esp
|
|
; CHECK-NEXT: movl %esp, %esi
|
|
; CHECK-NEXT: movl 12(%ebp), %edi
|
|
; CHECK-NEXT: movl 8(%ebp), %eax
|
|
; CHECK-NEXT: subl $4, %esp
|
|
; CHECK-NEXT: movl 84(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 68(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 80(%eax), %edx
|
|
; CHECK-NEXT: movl 76(%eax), %ebx
|
|
; CHECK-NEXT: movl 72(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 64(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 68(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 60(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 64(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 56(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 60(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 52(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 56(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 48(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 52(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 44(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 48(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 40(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 44(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 36(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 40(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 32(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 36(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 28(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 32(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 24(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 28(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 20(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 24(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 16(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 20(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 12(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 16(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 8(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 12(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 4(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 8(%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, (%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl (%eax), %ecx
|
|
; CHECK-NEXT: movl %ecx, 72(%esi) ## 4-byte Spill
|
|
; CHECK-NEXT: movl 4(%eax), %eax
|
|
; CHECK-NEXT: pushl 68(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl %edx
|
|
; CHECK-NEXT: pushl %ebx
|
|
; CHECK-NEXT: pushl 64(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 60(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 56(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 52(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 48(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 44(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 40(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 36(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 32(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 28(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 24(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 20(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 16(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 12(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 8(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 4(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl (%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl %eax
|
|
; CHECK-NEXT: pushl 72(%esi) ## 4-byte Folded Reload
|
|
; CHECK-NEXT: pushl 16(%ebp)
|
|
; CHECK-NEXT: calll _bar
|
|
; CHECK-NEXT: addl $96, %esp
|
|
; CHECK-NEXT: movl %esp, %eax
|
|
; CHECK-NEXT: addl $15, %edi
|
|
; CHECK-NEXT: andl $-16, %edi
|
|
; CHECK-NEXT: subl %edi, %eax
|
|
; CHECK-NEXT: movl %eax, %esp
|
|
; CHECK-NEXT: subl $12, %esp
|
|
; CHECK-NEXT: pushl %eax
|
|
; CHECK-NEXT: calll _baz
|
|
; CHECK-NEXT: leal -12(%ebp), %esp
|
|
; CHECK-NEXT: popl %esi
|
|
; CHECK-NEXT: popl %edi
|
|
; CHECK-NEXT: popl %ebx
|
|
; CHECK-NEXT: popl %ebp
|
|
; CHECK-NEXT: retl
|
|
call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x)
|
|
%dynalloc = alloca i8, i32 %y, align 1
|
|
call void @baz(i8* %dynalloc)
|
|
ret void
|
|
}
|
|
|
|
; Check that we do use rep movs if we make the alloca static.
|
|
define void @test3(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
|
|
; CHECK-LABEL: test3:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: pushl %ebp
|
|
; CHECK-NEXT: movl %esp, %ebp
|
|
; CHECK-NEXT: pushl %edi
|
|
; CHECK-NEXT: pushl %esi
|
|
; CHECK-NEXT: andl $-16, %esp
|
|
; CHECK-NEXT: subl $112, %esp
|
|
; CHECK-NEXT: movl 16(%ebp), %eax
|
|
; CHECK-NEXT: movl 8(%ebp), %esi
|
|
; CHECK-NEXT: leal {{[0-9]+}}(%esp), %edi
|
|
; CHECK-NEXT: movl $22, %ecx
|
|
; CHECK-NEXT: rep;movsl (%esi), %es:(%edi)
|
|
; CHECK-NEXT: movl %eax, (%esp)
|
|
; CHECK-NEXT: calll _bar
|
|
; CHECK-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
; CHECK-NEXT: movl %eax, (%esp)
|
|
; CHECK-NEXT: calll _baz
|
|
; CHECK-NEXT: leal -8(%ebp), %esp
|
|
; CHECK-NEXT: popl %esi
|
|
; CHECK-NEXT: popl %edi
|
|
; CHECK-NEXT: popl %ebp
|
|
; CHECK-NEXT: retl
|
|
call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x)
|
|
%statalloc = alloca i8, i32 8, align 1
|
|
call void @baz(i8* %statalloc)
|
|
ret void
|
|
}
|