llvm-for-llvmta/test/CodeGen/PowerPC/f128-conv.ll

2210 lines
69 KiB
LLVM
Raw Normal View History

2022-04-25 10:02:23 +02:00
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -relocation-model=pic -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-vsr-nums-as-vr -verify-machineinstrs -ppc-asm-full-reg-names < %s \
; RUN: | FileCheck %s
; RUN: llc -relocation-model=pic -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-vsr-nums-as-vr -verify-machineinstrs -ppc-asm-full-reg-names < %s \
; RUN: -enable-soft-fp128 | FileCheck %s -check-prefix=CHECK-P8
@mem = global [5 x i64] [i64 56, i64 63, i64 3, i64 5, i64 6], align 8
@umem = global [5 x i64] [i64 560, i64 100, i64 34, i64 2, i64 5], align 8
@swMem = global [5 x i32] [i32 5, i32 2, i32 3, i32 4, i32 0], align 4
@uwMem = global [5 x i32] [i32 5, i32 2, i32 3, i32 4, i32 0], align 4
@uhwMem = local_unnamed_addr global [5 x i16] [i16 5, i16 2, i16 3, i16 4, i16 0], align 2
@ubMem = local_unnamed_addr global [5 x i8] c"\05\02\03\04\00", align 1
; Function Attrs: norecurse nounwind
define void @sdwConv2qp(fp128* nocapture %a, i64 %b) {
; CHECK-LABEL: sdwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrd v2, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: sdwConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatdikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i64 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @sdwConv2qp_01(fp128* nocapture %a, i128 %b) {
; CHECK-LABEL: sdwConv2qp_01:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: .cfi_offset r30, -16
; CHECK-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r0, 16(r1)
; CHECK-NEXT: stdu r1, -48(r1)
; CHECK-NEXT: mr r30, r3
; CHECK-NEXT: mr r3, r4
; CHECK-NEXT: mr r4, r5
; CHECK-NEXT: bl __floattikf
; CHECK-NEXT: nop
; CHECK-NEXT: stxv v2, 0(r30)
; CHECK-NEXT: addi r1, r1, 48
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: sdwConv2qp_01:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: mr r4, r5
; CHECK-P8-NEXT: bl __floattikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i128 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @sdwConv2qp_02(fp128* nocapture %a) {
; CHECK-LABEL: sdwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxsd v2, 16(r4)
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: sdwConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: ld r4, 16(r4)
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatdikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i64, i64* getelementptr inbounds
([5 x i64], [5 x i64]* @mem, i64 0, i64 2), align 8
%conv = sitofp i64 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @sdwConv2qp_03(fp128* nocapture %a, i64* nocapture readonly %b) {
; CHECK-LABEL: sdwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 0(r4)
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: sdwConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: ld r4, 0(r4)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatdikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i64, i64* %b, align 8
%conv = sitofp i64 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @sdwConv2qp_04(fp128* nocapture %a, i1 %b) {
; CHECK-LABEL: sdwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: andi. r4, r4, 1
; CHECK-NEXT: li r4, 0
; CHECK-NEXT: li r5, -1
; CHECK-NEXT: iselgt r4, r5, r4
; CHECK-NEXT: mtvsrwa v2, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: sdwConv2qp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: andi. r3, r4, 1
; CHECK-P8-NEXT: li r4, -1
; CHECK-P8-NEXT: li r3, 0
; CHECK-P8-NEXT: iselgt r3, r4, r3
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i1 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @udwConv2qp(fp128* nocapture %a, i64 %b) {
; CHECK-LABEL: udwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrd v2, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: udwConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatundikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i64 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @udwConv2qp_01(fp128* nocapture %a, i128 %b) {
; CHECK-LABEL: udwConv2qp_01:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: .cfi_offset r30, -16
; CHECK-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r0, 16(r1)
; CHECK-NEXT: stdu r1, -48(r1)
; CHECK-NEXT: mr r30, r3
; CHECK-NEXT: mr r3, r4
; CHECK-NEXT: mr r4, r5
; CHECK-NEXT: bl __floatuntikf
; CHECK-NEXT: nop
; CHECK-NEXT: stxv v2, 0(r30)
; CHECK-NEXT: addi r1, r1, 48
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: udwConv2qp_01:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: mr r4, r5
; CHECK-P8-NEXT: bl __floatuntikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i128 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @udwConv2qp_02(fp128* nocapture %a) {
; CHECK-LABEL: udwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC1@toc@ha
; CHECK-NEXT: ld r4, .LC1@toc@l(r4)
; CHECK-NEXT: lxsd v2, 32(r4)
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: udwConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC1@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC1@toc@l(r4)
; CHECK-P8-NEXT: ld r4, 32(r4)
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatundikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i64, i64* getelementptr inbounds
([5 x i64], [5 x i64]* @umem, i64 0, i64 4), align 8
%conv = uitofp i64 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @udwConv2qp_03(fp128* nocapture %a, i64* nocapture readonly %b) {
; CHECK-LABEL: udwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 0(r4)
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: udwConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: ld r4, 0(r4)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatundikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i64, i64* %b, align 8
%conv = uitofp i64 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @udwConv2qp_04(fp128* nocapture %a, i1 %b) {
; CHECK-LABEL: udwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrlwi r4, r4, 31
; CHECK-NEXT: mtvsrwa v2, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: udwConv2qp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: clrldi r3, r4, 63
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i1 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define fp128* @sdwConv2qp_testXForm(fp128* returned %sink,
; CHECK-LABEL: sdwConv2qp_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis r5, 1
; CHECK-NEXT: ori r5, r5, 7797
; CHECK-NEXT: lxsdx v2, r4, r5
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: sdwConv2qp_testXForm:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lis r5, 1
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ori r5, r5, 7797
; CHECK-P8-NEXT: ldx r4, r4, r5
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatdikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: mr r3, r30
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
i8* nocapture readonly %a) {
entry:
%add.ptr = getelementptr inbounds i8, i8* %a, i64 73333
%0 = bitcast i8* %add.ptr to i64*
%1 = load i64, i64* %0, align 8
%conv = sitofp i64 %1 to fp128
store fp128 %conv, fp128* %sink, align 16
ret fp128* %sink
}
; Function Attrs: norecurse nounwind
define fp128* @udwConv2qp_testXForm(fp128* returned %sink,
; CHECK-LABEL: udwConv2qp_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lis r5, 1
; CHECK-NEXT: ori r5, r5, 7797
; CHECK-NEXT: lxsdx v2, r4, r5
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: udwConv2qp_testXForm:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lis r5, 1
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ori r5, r5, 7797
; CHECK-P8-NEXT: ldx r4, r4, r5
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatundikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: mr r3, r30
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
i8* nocapture readonly %a) {
entry:
%add.ptr = getelementptr inbounds i8, i8* %a, i64 73333
%0 = bitcast i8* %add.ptr to i64*
%1 = load i64, i64* %0, align 8
%conv = uitofp i64 %1 to fp128
store fp128 %conv, fp128* %sink, align 16
ret fp128* %sink
}
; Function Attrs: norecurse nounwind
define void @swConv2qp(fp128* nocapture %a, i32 signext %b) {
; CHECK-LABEL: swConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwa v2, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: swConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = sitofp i32 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @swConv2qp_02(fp128* nocapture %a, i32* nocapture readonly %b) {
; CHECK-LABEL: swConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsiwax v2, 0, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: swConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lwa r4, 0(r4)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i32, i32* %b, align 4
%conv = sitofp i32 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @swConv2qp_03(fp128* nocapture %a) {
; CHECK-LABEL: swConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC2@toc@ha
; CHECK-NEXT: ld r4, .LC2@toc@l(r4)
; CHECK-NEXT: addi r4, r4, 12
; CHECK-NEXT: lxsiwax v2, 0, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: swConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC2@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC2@toc@l(r4)
; CHECK-P8-NEXT: lwa r4, 12(r4)
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i32, i32* getelementptr inbounds
([5 x i32], [5 x i32]* @swMem, i64 0, i64 3), align 4
%conv = sitofp i32 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uwConv2qp(fp128* nocapture %a, i32 zeroext %b) {
; CHECK-LABEL: uwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwz v2, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uwConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i32 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uwConv2qp_02(fp128* nocapture %a, i32* nocapture readonly %b) {
; CHECK-LABEL: uwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsiwzx v2, 0, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uwConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lwz r4, 0(r4)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i32, i32* %b, align 4
%conv = uitofp i32 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uwConv2qp_03(fp128* nocapture %a) {
; CHECK-LABEL: uwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC3@toc@ha
; CHECK-NEXT: ld r4, .LC3@toc@l(r4)
; CHECK-NEXT: addi r4, r4, 12
; CHECK-NEXT: lxsiwzx v2, 0, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uwConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC3@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC3@toc@l(r4)
; CHECK-P8-NEXT: lwz r4, 12(r4)
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i32, i32* getelementptr inbounds
([5 x i32], [5 x i32]* @uwMem, i64 0, i64 3), align 4
%conv = uitofp i32 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uwConv2qp_04(fp128* nocapture %a,
; CHECK-LABEL: uwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lwz r5, 0(r5)
; CHECK-NEXT: add r4, r5, r4
; CHECK-NEXT: mtvsrwz v2, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uwConv2qp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: lwz r3, 0(r5)
; CHECK-P8-NEXT: add r3, r3, r4
; CHECK-P8-NEXT: clrldi r3, r3, 32
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
i32 zeroext %b, i32* nocapture readonly %c) {
entry:
%0 = load i32, i32* %c, align 4
%add = add i32 %0, %b
%conv = uitofp i32 %add to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uhwConv2qp(fp128* nocapture %a, i16 zeroext %b) {
; CHECK-LABEL: uhwConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwz v2, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uhwConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i16 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uhwConv2qp_02(fp128* nocapture %a, i16* nocapture readonly %b) {
; CHECK-LABEL: uhwConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsihzx v2, 0, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uhwConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lhz r4, 0(r4)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i16, i16* %b, align 2
%conv = uitofp i16 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uhwConv2qp_03(fp128* nocapture %a) {
; CHECK-LABEL: uhwConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC4@toc@ha
; CHECK-NEXT: ld r4, .LC4@toc@l(r4)
; CHECK-NEXT: addi r4, r4, 6
; CHECK-NEXT: lxsihzx v2, 0, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uhwConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC4@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC4@toc@l(r4)
; CHECK-P8-NEXT: lhz r4, 6(r4)
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i16, i16* getelementptr inbounds
([5 x i16], [5 x i16]* @uhwMem, i64 0, i64 3), align 2
%conv = uitofp i16 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @uhwConv2qp_04(fp128* nocapture %a, i16 zeroext %b,
; CHECK-LABEL: uhwConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lhz r5, 0(r5)
; CHECK-NEXT: add r4, r5, r4
; CHECK-NEXT: mtvsrwa v2, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: uhwConv2qp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: lhz r3, 0(r5)
; CHECK-P8-NEXT: add r3, r3, r4
; CHECK-P8-NEXT: clrldi r3, r3, 32
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
i16* nocapture readonly %c) {
entry:
%conv = zext i16 %b to i32
%0 = load i16, i16* %c, align 2
%conv1 = zext i16 %0 to i32
%add = add nuw nsw i32 %conv1, %conv
%conv2 = sitofp i32 %add to fp128
store fp128 %conv2, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @ubConv2qp(fp128* nocapture %a, i8 zeroext %b) {
; CHECK-LABEL: ubConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mtvsrwz v2, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: ubConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = uitofp i8 %b to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @ubConv2qp_02(fp128* nocapture %a, i8* nocapture readonly %b) {
; CHECK-LABEL: ubConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsibzx v2, 0, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: ubConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lbz r4, 0(r4)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i8, i8* %b, align 1
%conv = uitofp i8 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @ubConv2qp_03(fp128* nocapture %a) {
; CHECK-LABEL: ubConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC5@toc@ha
; CHECK-NEXT: ld r4, .LC5@toc@l(r4)
; CHECK-NEXT: addi r4, r4, 2
; CHECK-NEXT: lxsibzx v2, 0, r4
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: ubConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC5@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC5@toc@l(r4)
; CHECK-P8-NEXT: lbz r4, 2(r4)
; CHECK-P8-NEXT: mr r3, r4
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load i8, i8* getelementptr inbounds
([5 x i8], [5 x i8]* @ubMem, i64 0, i64 2), align 1
%conv = uitofp i8 %0 to fp128
store fp128 %conv, fp128* %a, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @ubConv2qp_04(fp128* nocapture %a, i8 zeroext %b,
; CHECK-LABEL: ubConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lbz r5, 0(r5)
; CHECK-NEXT: add r4, r5, r4
; CHECK-NEXT: mtvsrwa v2, r4
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: ubConv2qp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: lbz r3, 0(r5)
; CHECK-P8-NEXT: add r3, r3, r4
; CHECK-P8-NEXT: clrldi r3, r3, 32
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
i8* nocapture readonly %c) {
entry:
%conv = zext i8 %b to i32
%0 = load i8, i8* %c, align 1
%conv1 = zext i8 %0 to i32
%add = add nuw nsw i32 %conv1, %conv
%conv2 = sitofp i32 %add to fp128
store fp128 %conv2, fp128* %a, align 16
ret void
}
; Convert QP to DP
@f128Array = global [4 x fp128]
[fp128 0xL00000000000000004004C00000000000,
fp128 0xLF000000000000000400808AB851EB851,
fp128 0xL5000000000000000400E0C26324C8366,
fp128 0xL8000000000000000400A24E2E147AE14], align 16
@f128global = global fp128 0xL300000000000000040089CA8F5C28F5C, align 16
; Function Attrs: norecurse nounwind readonly
define double @qpConv2dp(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2dp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpdp v2, v2
; CHECK-NEXT: xscpsgndp f1, v2, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2dp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __trunckfdf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptrunc fp128 %0 to double
ret double %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2dp_02(double* nocapture %res) {
; CHECK-LABEL: qpConv2dp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC6@toc@ha
; CHECK-NEXT: ld r4, .LC6@toc@l(r4)
; CHECK-NEXT: lxvx v2, 0, r4
; CHECK-NEXT: xscvqpdp v2, v2
; CHECK-NEXT: stxsd v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2dp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC6@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC6@toc@l(r4)
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __trunckfdf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stfdx f1, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* @f128global, align 16
%conv = fptrunc fp128 %0 to double
store double %conv, double* %res, align 8
ret void
}
; Function Attrs: norecurse nounwind
define void @qpConv2dp_03(double* nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2dp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC7@toc@ha
; CHECK-NEXT: sldi r4, r4, 3
; CHECK-NEXT: ld r5, .LC7@toc@l(r5)
; CHECK-NEXT: lxvx v2, 0, r5
; CHECK-NEXT: xscvqpdp v2, v2
; CHECK-NEXT: stxsdx v2, r3, r4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2dp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 64
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r29, -24
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -64(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: addis r4, r2, .LC7@toc@ha
; CHECK-P8-NEXT: mr r29, r3
; CHECK-P8-NEXT: ld r4, .LC7@toc@l(r4)
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __trunckfdf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sldi r3, r30, 3
; CHECK-P8-NEXT: stfdx f1, r29, r3
; CHECK-P8-NEXT: addi r1, r1, 64
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds ([4 x fp128], [4 x fp128]* @f128Array, i64 0, i64 0), align 16
%conv = fptrunc fp128 %0 to double
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds double, double* %res, i64 %idxprom
store double %conv, double* %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind
define void @qpConv2dp_04(fp128* nocapture readonly %a, fp128* nocapture readonly %b, double* nocapture %res) {
; CHECK-LABEL: qpConv2dp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpdp v2, v2
; CHECK-NEXT: stxsd v2, 0(r5)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2dp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __trunckfdf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stfdx f1, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptrunc fp128 %add to double
store double %conv, double* %res, align 8
ret void
}
; Convert QP to SP
; Function Attrs: norecurse nounwind readonly
define float @qpConv2sp(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2sp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpdpo v2, v2
; CHECK-NEXT: xsrsp f1, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __trunckfsf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptrunc fp128 %0 to float
ret float %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2sp_02(float* nocapture %res) {
; CHECK-LABEL: qpConv2sp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC6@toc@ha
; CHECK-NEXT: ld r4, .LC6@toc@l(r4)
; CHECK-NEXT: lxvx v2, 0, r4
; CHECK-NEXT: xscvqpdpo v2, v2
; CHECK-NEXT: xsrsp f0, v2
; CHECK-NEXT: stfs f0, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC6@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC6@toc@l(r4)
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __trunckfsf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stfsx f1, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* @f128global, align 16
%conv = fptrunc fp128 %0 to float
store float %conv, float* %res, align 4
ret void
}
; Function Attrs: norecurse nounwind
define void @qpConv2sp_03(float* nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2sp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC7@toc@ha
; CHECK-NEXT: sldi r4, r4, 2
; CHECK-NEXT: ld r5, .LC7@toc@l(r5)
; CHECK-NEXT: lxv v2, 48(r5)
; CHECK-NEXT: xscvqpdpo v2, v2
; CHECK-NEXT: xsrsp f0, v2
; CHECK-NEXT: stfsx f0, r3, r4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 64
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r29, -24
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -64(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: addis r4, r2, .LC7@toc@ha
; CHECK-P8-NEXT: mr r29, r3
; CHECK-P8-NEXT: ld r4, .LC7@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 48
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __trunckfsf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sldi r3, r30, 2
; CHECK-P8-NEXT: stfsx f1, r29, r3
; CHECK-P8-NEXT: addi r1, r1, 64
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds ([4 x fp128], [4 x fp128]* @f128Array, i64 0, i64 3), align 16
%conv = fptrunc fp128 %0 to float
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds float, float* %res, i64 %idxprom
store float %conv, float* %arrayidx, align 4
ret void
}
; Function Attrs: norecurse nounwind
define void @qpConv2sp_04(fp128* nocapture readonly %a, fp128* nocapture readonly %b, float* nocapture %res) {
; CHECK-LABEL: qpConv2sp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpdpo v2, v2
; CHECK-NEXT: xsrsp f0, v2
; CHECK-NEXT: stfs f0, 0(r5)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __trunckfsf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stfsx f1, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptrunc fp128 %add to float
store float %conv, float* %res, align 4
ret void
}
@f128Glob = common global fp128 0xL00000000000000000000000000000000, align 16
; Function Attrs: norecurse nounwind readnone
define fp128 @dpConv2qp(double %a) {
; CHECK-LABEL: dpConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: dpConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl __extenddfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fpext double %a to fp128
ret fp128 %conv
}
; Function Attrs: norecurse nounwind
define void @dpConv2qp_02(double* nocapture readonly %a) {
; CHECK-LABEL: dpConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxsd v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxvx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: dpConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lfdx f1, 0, r3
; CHECK-P8-NEXT: bl __extenddfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-P8-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-P8-NEXT: stvx v2, 0, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load double, double* %a, align 8
%conv = fpext double %0 to fp128
store fp128 %conv, fp128* @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @dpConv2qp_02b(double* nocapture readonly %a, i32 signext %idx) {
; CHECK-LABEL: dpConv2qp_02b:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r4, r4, 3
; CHECK-NEXT: lxsdx v2, r3, r4
; CHECK-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxvx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: dpConv2qp_02b:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: sldi r4, r4, 3
; CHECK-P8-NEXT: lfdx f1, r3, r4
; CHECK-P8-NEXT: bl __extenddfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-P8-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-P8-NEXT: stvx v2, 0, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom
%0 = load double, double* %arrayidx, align 8
%conv = fpext double %0 to fp128
store fp128 %conv, fp128* @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @dpConv2qp_03(fp128* nocapture %res, i32 signext %idx, double %a) {
; CHECK-LABEL: dpConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-NEXT: sldi r4, r4, 4
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxvx v2, r3, r4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: dpConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 64
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r29, -24
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -64(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mr r29, r3
; CHECK-P8-NEXT: bl __extenddfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sldi r3, r30, 4
; CHECK-P8-NEXT: stvx v2, r29, r3
; CHECK-P8-NEXT: addi r1, r1, 64
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fpext double %a to fp128
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds fp128, fp128* %res, i64 %idxprom
store fp128 %conv, fp128* %arrayidx, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @dpConv2qp_04(double %a, fp128* nocapture %res) {
; CHECK-LABEL: dpConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: dpConv2qp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: bl __extenddfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fpext double %a to fp128
store fp128 %conv, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind readnone
define fp128 @spConv2qp(float %a) {
; CHECK-LABEL: spConv2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: spConv2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl __extendsfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fpext float %a to fp128
ret fp128 %conv
}
; Function Attrs: norecurse nounwind
define void @spConv2qp_02(float* nocapture readonly %a) {
; CHECK-LABEL: spConv2qp_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxssp v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxvx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: spConv2qp_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lfsx f1, 0, r3
; CHECK-P8-NEXT: bl __extendsfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-P8-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-P8-NEXT: stvx v2, 0, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load float, float* %a, align 4
%conv = fpext float %0 to fp128
store fp128 %conv, fp128* @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @spConv2qp_02b(float* nocapture readonly %a, i32 signext %idx) {
; CHECK-LABEL: spConv2qp_02b:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sldi r4, r4, 2
; CHECK-NEXT: lxsspx v2, r3, r4
; CHECK-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxvx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: spConv2qp_02b:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: sldi r4, r4, 2
; CHECK-P8-NEXT: lfsx f1, r3, r4
; CHECK-P8-NEXT: bl __extendsfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addis r3, r2, .LC8@toc@ha
; CHECK-P8-NEXT: ld r3, .LC8@toc@l(r3)
; CHECK-P8-NEXT: stvx v2, 0, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
%0 = load float, float* %arrayidx, align 4
%conv = fpext float %0 to fp128
store fp128 %conv, fp128* @f128Glob, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @spConv2qp_03(fp128* nocapture %res, i32 signext %idx, float %a) {
; CHECK-LABEL: spConv2qp_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-NEXT: sldi r4, r4, 4
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxvx v2, r3, r4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: spConv2qp_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 64
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r29, -24
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -64(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mr r29, r3
; CHECK-P8-NEXT: bl __extendsfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sldi r3, r30, 4
; CHECK-P8-NEXT: stvx v2, r29, r3
; CHECK-P8-NEXT: addi r1, r1, 64
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fpext float %a to fp128
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds fp128, fp128* %res, i64 %idxprom
store fp128 %conv, fp128* %arrayidx, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @spConv2qp_04(float %a, fp128* nocapture %res) {
; CHECK-LABEL: spConv2qp_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscpsgndp v2, f1, f1
; CHECK-NEXT: xscvdpqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: spConv2qp_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: bl __extendsfkf2
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fpext float %a to fp128
store fp128 %conv, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvdp2sw2qp(double %val, fp128* nocapture %res) {
; CHECK-LABEL: cvdp2sw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws v2, f1
; CHECK-NEXT: vextsw2d v2, v2
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvdp2sw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprwz r3, f0
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %val to i32
%conv1 = sitofp i32 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvdp2sdw2qp(double %val, fp128* nocapture %res) {
; CHECK-LABEL: cvdp2sdw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds v2, f1
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvdp2sdw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpsxds f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: bl __floatdikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi double %val to i64
%conv1 = sitofp i64 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvsp2sw2qp(float %val, fp128* nocapture %res) {
; CHECK-LABEL: cvsp2sw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxws v2, f1
; CHECK-NEXT: vextsw2d v2, v2
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvsp2sw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpsxws f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprwz r3, f0
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: bl __floatsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %val to i32
%conv1 = sitofp i32 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvsp2sdw2qp(float %val, fp128* nocapture %res) {
; CHECK-LABEL: cvsp2sdw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpsxds v2, f1
; CHECK-NEXT: xscvsdqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvsp2sdw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpsxds f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: bl __floatdikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptosi float %val to i64
%conv1 = sitofp i64 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvdp2uw2qp(double %val, fp128* nocapture %res) {
; CHECK-LABEL: cvdp2uw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: xxextractuw v2, vs0, 8
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvdp2uw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprwz r3, f0
; CHECK-P8-NEXT: clrldi r3, r3, 32
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %val to i32
%conv1 = uitofp i32 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvdp2udw2qp(double %val, fp128* nocapture %res) {
; CHECK-LABEL: cvdp2udw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds v2, f1
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvdp2udw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpuxds f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: bl __floatundikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui double %val to i64
%conv1 = uitofp i64 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvsp2uw2qp(float %val, fp128* nocapture %res) {
; CHECK-LABEL: cvsp2uw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxws f0, f1
; CHECK-NEXT: xxextractuw v2, vs0, 8
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvsp2uw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpuxws f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprwz r3, f0
; CHECK-P8-NEXT: clrldi r3, r3, 32
; CHECK-P8-NEXT: bl __floatunsikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %val to i32
%conv1 = uitofp i32 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind
define void @cvsp2udw2qp(float %val, fp128* nocapture %res) {
; CHECK-LABEL: cvsp2udw2qp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xscvdpuxds v2, f1
; CHECK-NEXT: xscvudqp v2, v2
; CHECK-NEXT: stxv v2, 0(r4)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: cvsp2udw2qp:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: xscvdpuxds f0, f1
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: mffprd r3, f0
; CHECK-P8-NEXT: bl __floatundikf
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stvx v2, 0, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%conv = fptoui float %val to i64
%conv1 = uitofp i64 %conv to fp128
store fp128 %conv1, fp128* %res, align 16
ret void
}
; Function Attrs: norecurse nounwind readonly
define i128 @qpConv2i128(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2i128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: std r0, 16(r1)
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: bl __fixkfti
; CHECK-NEXT: nop
; CHECK-NEXT: addi r1, r1, 32
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2i128:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfti
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptosi fp128 %0 to i128
ret i128 %conv
}
; Function Attrs: norecurse nounwind readonly
define i128 @qpConv2ui128(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2ui128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mflr r0
; CHECK-NEXT: std r0, 16(r1)
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: bl __fixunskfti
; CHECK-NEXT: nop
; CHECK-NEXT: addi r1, r1, 32
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2ui128:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixunskfti
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptoui fp128 %0 to i128
ret i128 %conv
}
; Function Attrs: norecurse nounwind readonly
define i1 @qpConv2ui1(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2ui1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2ui1:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptoui fp128 %0 to i1
ret i1 %conv
}
; Function Attrs: norecurse nounwind readonly
define i1 @qpConv2si1(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2si1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2si1:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptosi fp128 %0 to i1
ret i1 %conv
}