llvm-for-llvmta/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-rev.mir

86 lines
2.5 KiB
Plaintext
Raw Permalink Normal View History

2022-04-25 10:02:23 +02:00
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s
#
# Test producing a G_REV from an appropriate G_SHUFFLE_VECTOR.
...
---
name: rev64_mask_1_0
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $d0, $d1
; CHECK-LABEL: name: rev64_mask_1_0
; CHECK: liveins: $d0, $d1
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; CHECK: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]]
; CHECK: $d0 = COPY [[REV64_]](<2 x s32>)
; CHECK: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s32>) = COPY $d1
%2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, 0)
$d0 = COPY %2(<2 x s32>)
RET_ReallyLR implicit $d0
...
---
name: rev64_mask_1_undef
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $d0, $d1
; CHECK-LABEL: name: rev64_mask_1_undef
; CHECK: liveins: $d0, $d1
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; CHECK: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]]
; CHECK: $d0 = COPY [[REV64_]](<2 x s32>)
; CHECK: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s32>) = COPY $d1
%2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, undef)
$d0 = COPY %2(<2 x s32>)
RET_ReallyLR implicit $d0
...
---
name: no_rev64_mask_1
alignment: 4
legalized: true
tracksRegLiveness: true
body: |
bb.1.entry:
liveins: $d0, $d1
; Verify that we don't produce a G_REV64 when
;
; M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)
; In this example, BlockElts = 2
;
; At i = 1
; M[i] = 3
; i % BlockElts = i % 2 = 1
;
; So
;
; 3 != (1 - 1) + (2 - 1 - 1)
; 3 != 0
;
; And so we should not produce a G_REV64.
;
; CHECK-LABEL: name: no_rev64_mask_1
; CHECK: liveins: $d0, $d1
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
; CHECK: [[ZIP2_:%[0-9]+]]:_(<2 x s32>) = G_ZIP2 [[COPY]], [[COPY1]]
; CHECK: $d0 = COPY [[ZIP2_]](<2 x s32>)
; CHECK: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s32>) = COPY $d1
%2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, 3)
$d0 = COPY %2(<2 x s32>)
RET_ReallyLR implicit $d0