llvm-for-llvmta/test/CodeGen/X86/x86-64-double-precision-shi...

81 lines
2.0 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver1 | FileCheck %s
; Verify that for the architectures that are known to have poor latency
; double precision shift instructions we generate alternative sequence
; of instructions with lower latencies instead of shld instruction.
;uint64_t lshift1(uint64_t a, uint64_t b)
;{
; return (a << 1) | (b >> 63);
;}
define i64 @lshift1(i64 %a, i64 %b) nounwind readnone uwtable {
; CHECK-LABEL: lshift1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrq $63, %rsi
; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax
; CHECK-NEXT: retq
entry:
%shl = shl i64 %a, 1
%shr = lshr i64 %b, 63
%or = or i64 %shr, %shl
ret i64 %or
}
;uint64_t lshift2(uint64_t a, uint64_t b)
;{
; return (a << 2) | (b >> 62);
;}
define i64 @lshift2(i64 %a, i64 %b) nounwind readnone uwtable {
; CHECK-LABEL: lshift2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrq $62, %rsi
; CHECK-NEXT: leaq (%rsi,%rdi,4), %rax
; CHECK-NEXT: retq
entry:
%shl = shl i64 %a, 2
%shr = lshr i64 %b, 62
%or = or i64 %shr, %shl
ret i64 %or
}
;uint64_t lshift7(uint64_t a, uint64_t b)
;{
; return (a << 7) | (b >> 57);
;}
define i64 @lshift7(i64 %a, i64 %b) nounwind readnone uwtable {
; CHECK-LABEL: lshift7:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrq $57, %rsi
; CHECK-NEXT: shlq $7, %rdi
; CHECK-NEXT: leaq (%rdi,%rsi), %rax
; CHECK-NEXT: retq
entry:
%shl = shl i64 %a, 7
%shr = lshr i64 %b, 57
%or = or i64 %shr, %shl
ret i64 %or
}
;uint64_t lshift63(uint64_t a, uint64_t b)
;{
; return (a << 63) | (b >> 1);
;}
define i64 @lshift63(i64 %a, i64 %b) nounwind readnone uwtable {
; CHECK-LABEL: lshift63:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: shrq %rsi
; CHECK-NEXT: shlq $63, %rdi
; CHECK-NEXT: leaq (%rdi,%rsi), %rax
; CHECK-NEXT: retq
entry:
%shl = shl i64 %a, 63
%shr = lshr i64 %b, 1
%or = or i64 %shr, %shl
ret i64 %or
}