189 lines
5.4 KiB
YAML
189 lines
5.4 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
|
|
...
|
|
---
|
|
name: flip_eq
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
body: |
|
|
; CHECK-LABEL: name: flip_eq
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %copy:gpr64all = COPY $x0
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
|
|
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; CHECK: TBNZW [[COPY1]], 3, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
liveins: $x0
|
|
%copy:gpr(s64) = COPY $x0
|
|
|
|
; Check bit 3.
|
|
%bit:gpr(s64) = G_CONSTANT i64 8
|
|
%zero:gpr(s64) = G_CONSTANT i64 0
|
|
|
|
; 8 has the third bit set.
|
|
%fold_cst:gpr(s64) = G_CONSTANT i64 8
|
|
|
|
; This only has the third bit set if %copy does not. So, to walk through
|
|
; this, we want to use a TBNZW on %copy.
|
|
%fold_me:gpr(s64) = G_XOR %copy, %fold_cst
|
|
|
|
%and:gpr(s64) = G_AND %fold_me, %bit
|
|
%cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
|
|
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
|
|
G_BRCOND %cmp_trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: flip_ne
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
body: |
|
|
; CHECK-LABEL: name: flip_ne
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %copy:gpr64all = COPY $x0
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
|
|
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; CHECK: TBZW [[COPY1]], 3, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
liveins: $x0
|
|
|
|
; Same as eq case, but we should get a TBZW instead.
|
|
|
|
%copy:gpr(s64) = COPY $x0
|
|
%bit:gpr(s64) = G_CONSTANT i64 8
|
|
%zero:gpr(s64) = G_CONSTANT i64 0
|
|
%fold_cst:gpr(s64) = G_CONSTANT i64 8
|
|
%fold_me:gpr(s64) = G_XOR %copy, %fold_cst
|
|
%and:gpr(s64) = G_AND %fold_me, %bit
|
|
%cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
|
|
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
|
|
G_BRCOND %cmp_trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: dont_flip_eq
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
body: |
|
|
; CHECK-LABEL: name: dont_flip_eq
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %copy:gpr64all = COPY $x0
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
|
|
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; CHECK: TBZW [[COPY1]], 3, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
liveins: $x0
|
|
%copy:gpr(s64) = COPY $x0
|
|
|
|
; Check bit 3.
|
|
%bit:gpr(s64) = G_CONSTANT i64 8
|
|
%zero:gpr(s64) = G_CONSTANT i64 0
|
|
|
|
; 7 does not have the third bit set.
|
|
%fold_cst:gpr(s64) = G_CONSTANT i64 7
|
|
|
|
; This only has the third bit set if %copy does. So, to walk through this,
|
|
; we should have a TBZW on %copy.
|
|
%fold_me:gpr(s64) = G_XOR %fold_cst, %copy
|
|
|
|
%and:gpr(s64) = G_AND %fold_me, %bit
|
|
%cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
|
|
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
|
|
G_BRCOND %cmp_trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: dont_flip_ne
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
body: |
|
|
; CHECK-LABEL: name: dont_flip_ne
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %copy:gpr64all = COPY $x0
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
|
|
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; CHECK: TBNZW [[COPY1]], 3, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
liveins: $x0
|
|
|
|
; Same as eq case, but we should get a TBNZW instead.
|
|
|
|
%copy:gpr(s64) = COPY $x0
|
|
%bit:gpr(s64) = G_CONSTANT i64 8
|
|
%zero:gpr(s64) = G_CONSTANT i64 0
|
|
%fold_cst:gpr(s64) = G_CONSTANT i64 7
|
|
%fold_me:gpr(s64) = G_XOR %fold_cst, %copy
|
|
%and:gpr(s64) = G_AND %fold_me, %bit
|
|
%cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
|
|
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
|
|
G_BRCOND %cmp_trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|
|
...
|
|
---
|
|
name: xor_chain
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
body: |
|
|
; CHECK-LABEL: name: xor_chain
|
|
; CHECK: bb.0:
|
|
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
|
|
; CHECK: %copy:gpr64all = COPY $x0
|
|
; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
|
|
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; CHECK: TBZW [[COPY1]], 3, %bb.1
|
|
; CHECK: B %bb.0
|
|
; CHECK: bb.1:
|
|
; CHECK: RET_ReallyLR
|
|
bb.0:
|
|
successors: %bb.0, %bb.1
|
|
liveins: $x0
|
|
%copy:gpr(s64) = COPY $x0
|
|
%bit:gpr(s64) = G_CONSTANT i64 8
|
|
%zero:gpr(s64) = G_CONSTANT i64 0
|
|
%fold_cst:gpr(s64) = G_CONSTANT i64 8
|
|
|
|
; The G_XORs cancel each other out, so we should get a TBZW.
|
|
%xor1:gpr(s64) = G_XOR %copy, %fold_cst
|
|
%xor2:gpr(s64) = G_XOR %xor1, %fold_cst
|
|
|
|
%and:gpr(s64) = G_AND %xor2, %bit
|
|
%cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
|
|
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
|
|
G_BRCOND %cmp_trunc(s1), %bb.1
|
|
G_BR %bb.0
|
|
bb.1:
|
|
RET_ReallyLR
|