llvm-for-llvmta/test/CodeGen/AArch64/GlobalISel/legalizer-combiner.mir

123 lines
4.5 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple aarch64-- -run-pass=legalizer %s -o - | FileCheck %s
---
name: test_unmerge
body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: test_unmerge
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: $w0 = COPY [[COPY]](s32)
%0:_(s32) = COPY $w0
%1:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %0(s32), %0(s32), %0(s32)
%2:_(s32), %3:_(s32), %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<4 x s32>)
$w0 = COPY %2(s32)
...
---
name: test_legal_const_ext
body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: test_legal_const_ext
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[C]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK: $w0 = COPY [[COPY2]](s32)
%0:_(s32) = COPY $w0
%1:_(s1) = G_TRUNC %0(s32)
%2:_(s1) = G_CONSTANT i1 2
%3:_(s1) = G_ADD %1(s1), %2(s1)
%4:_(s32) = G_ANYEXT %3(s1)
$w0 = COPY %4(s32)
...
# Check that the artifact combiner can get rid of the big
# vector type (4 x s64) by combining the G_UNMERGE_VALUES
# with the G_CONCAT_VECTORS and turning that into bitcast.
---
name: concat_vectors_unmerge_to_bitcast
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0, $q1
; CHECK-LABEL: name: concat_vectors_unmerge_to_bitcast
; CHECK: liveins: $q0, $q1
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[COPY]](<2 x s64>)
; CHECK: [[BITCAST1:%[0-9]+]]:_(s128) = G_BITCAST [[COPY1]](<2 x s64>)
; CHECK: $q0 = COPY [[BITCAST]](s128)
; CHECK: $q1 = COPY [[BITCAST1]](s128)
%0:_(<2 x s64>) = COPY $q0
%1:_(<2 x s64>) = COPY $q1
%2:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
%3:_(s128), %4:_(s128) = G_UNMERGE_VALUES %2(<4 x s64>)
$q0 = COPY %3(s128)
$q1 = COPY %4(s128)
...
# Check that the artifact combiner can get rid of the big
# vector type (4 x s64) by combining the G_UNMERGE_VALUES
# with the G_CONCAT_VECTORS and turning that into smaller
# 2x64-bit G_UNMERGE_VALUES.
---
name: concat_vectors_unmerge_to_unmerge
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0, $q1
; CHECK-LABEL: name: concat_vectors_unmerge_to_unmerge
; CHECK: liveins: $q0, $q1
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
; CHECK: $x0 = COPY [[UV]](s64)
; CHECK: $x1 = COPY [[UV1]](s64)
; CHECK: $x2 = COPY [[UV2]](s64)
; CHECK: $x3 = COPY [[UV3]](s64)
%0:_(<2 x s64>) = COPY $q0
%1:_(<2 x s64>) = COPY $q1
%2:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
%3:_(s64), %4:_(s64), %5:_(s64), %6:_(s64) = G_UNMERGE_VALUES %2(<4 x s64>)
$x0 = COPY %3(s64)
$x1 = COPY %4(s64)
$x2 = COPY %5(s64)
$x3 = COPY %6(s64)
...
---
name: unmerge_merge_combine
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: unmerge_merge_combine
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD1]](s128)
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV]]
; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[UV]]
; CHECK: [[MUL2:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV1]]
; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[UV]]
; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
; CHECK: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]]
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MUL]](s64), [[ADD1]](s64)
; CHECK: $q0 = COPY [[MV]](s128)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s128) = G_ZEXTLOAD %0:_(p0) :: (load 8)
%2:_(s128) = G_LOAD %0:_(p0) :: (load 16)
%3:_(s128) = G_MUL %1:_, %2:_
$q0 = COPY %3
RET_ReallyLR
...