197 lines
5.9 KiB
YAML
197 lines
5.9 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
|
|
#
|
|
# Test widening and narrowing on test bit operations using subregister copies
|
|
# or SUBREG_TO_REG.
|
|
--- |
|
|
@glob = external dso_local unnamed_addr global i1, align 4
|
|
define void @s1_no_copy() { ret void }
|
|
define void @s16_no_copy() { ret void }
|
|
define void @p0_no_copy() { ret void }
|
|
define void @widen_s32_to_s64() { ret void }
|
|
define void @widen_s16_to_s64() { ret void }
|
|
define void @narrow_s64_to_s32() { ret void }
|
|
|
|
...
|
|
---
|
|
name: s1_no_copy
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
; CHECK-LABEL: name: s1_no_copy
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %narrow:gpr32 = IMPLICIT_DEF
|
|
; CHECK: TBNZW %narrow, 0, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
%narrow:gpr(s1) = G_IMPLICIT_DEF
|
|
|
|
; There should be no copy here, because the s1 can be selected to a GPR32.
|
|
G_BRCOND %narrow(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: s16_no_copy
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
; CHECK-LABEL: name: s16_no_copy
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %narrow:gpr32 = IMPLICIT_DEF
|
|
; CHECK: TBNZW %narrow, 0, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
%narrow:gpr(s16) = G_IMPLICIT_DEF
|
|
%trunc:gpr(s1) = G_TRUNC %narrow(s16)
|
|
|
|
; Look through the G_TRUNC to get the G_IMPLICIT_DEF. We don't need a
|
|
; SUBREG_TO_REG here, because the s16 will end up on a 32-bit register.
|
|
G_BRCOND %trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: p0_no_copy
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
; CHECK-LABEL: name: p0_no_copy
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %glob:gpr64common = MOVaddr target-flags(aarch64-page) @glob, target-flags(aarch64-pageoff, aarch64-nc) @glob
|
|
; CHECK: %load:gpr32 = LDRBBui %glob, 0 :: (dereferenceable load 1 from @glob, align 4)
|
|
; CHECK: TBNZW %load, 0, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
%glob:gpr(p0) = G_GLOBAL_VALUE @glob
|
|
%load:gpr(s8) = G_LOAD %glob(p0) :: (dereferenceable load 1 from @glob, align 4)
|
|
%trunc:gpr(s1) = G_TRUNC %load(s8)
|
|
|
|
; Look through G_TRUNC to get the load. The load is into a s8, which will
|
|
; be selected to a GPR32, so we don't need a copy.
|
|
G_BRCOND %trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: widen_s32_to_s64
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
; CHECK-LABEL: name: widen_s32_to_s64
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: liveins: $w0
|
|
; CHECK: %reg:gpr32all = COPY $w0
|
|
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, %reg, %subreg.sub_32
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
|
|
; CHECK: TBZX [[COPY]], 33, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
liveins: $w0
|
|
%reg:gpr(s32) = COPY $w0
|
|
%zext:gpr(s64) = G_ZEXT %reg(s32)
|
|
%bit:gpr(s64) = G_CONSTANT i64 8589934592
|
|
%zero:gpr(s64) = G_CONSTANT i64 0
|
|
%and:gpr(s64) = G_AND %zext, %bit
|
|
%cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
|
|
|
|
; We should widen using a SUBREG_TO_REG here, because we need a TBZX to get
|
|
; bit 33. The subregister should be sub_32.
|
|
%trunc:gpr(s1) = G_TRUNC %cmp(s32)
|
|
G_BRCOND %trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: widen_s16_to_s64
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
; CHECK-LABEL: name: widen_s16_to_s64
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %reg:gpr32 = IMPLICIT_DEF
|
|
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, %reg, %subreg.sub_32
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
|
|
; CHECK: TBZX [[COPY]], 33, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
%reg:gpr(s16) = G_IMPLICIT_DEF
|
|
%zext:gpr(s64) = G_ZEXT %reg(s16)
|
|
%bit:gpr(s64) = G_CONSTANT i64 8589934592
|
|
%zero:gpr(s64) = G_CONSTANT i64 0
|
|
%and:gpr(s64) = G_AND %zext, %bit
|
|
%cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
|
|
|
|
; We should widen using a SUBREG_TO_REG here, because we need a TBZX to get
|
|
; bit 33. The subregister should be sub_32, because s16 will end up on a
|
|
; GPR32.
|
|
%trunc:gpr(s1) = G_TRUNC %cmp(s32)
|
|
G_BRCOND %trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: narrow_s64_to_s32
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
; CHECK-LABEL: name: narrow_s64_to_s32
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: liveins: $x0
|
|
; CHECK: %wide:gpr64all = COPY $x0
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %wide.sub_32
|
|
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; CHECK: TBNZW [[COPY1]], 0, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
liveins: $x0
|
|
%wide:gpr(s64) = COPY $x0
|
|
|
|
; We should narrow using a subregister copy here.
|
|
%trunc:gpr(s1) = G_TRUNC %wide(s64)
|
|
G_BRCOND %trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|