llvm-for-llvmta/test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-sext.mir

96 lines
4.3 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck %s
---
name: test_sext_trunc_v2s32_to_v2s16_to_v2s32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: test_sext_trunc_v2s32_to_v2s16_to_v2s32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY [[COPY]](<2 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s16>) = G_TRUNC %0
%2:_(<2 x s32>) = G_SEXT %1
$vgpr0_vgpr1 = COPY %2
...
---
name: test_sext_trunc_v2s32_to_v2s16_to_v2s64
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: test_sext_trunc_v2s32_to_v2s16_to_v2s64
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ANYEXT]], 16
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ANYEXT1]], 16
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SEXT_INREG]](s64), [[SEXT_INREG1]](s64)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s16>) = G_TRUNC %0
%2:_(<2 x s64>) = G_SEXT %1
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
...
---
name: test_sext_trunc_v2s32_to_v2s8_to_v2s16
body: |
bb.0:
liveins: $vgpr0_vgpr1
; The G_SEXT_INREG doesn't lower here because G_TRUNC is both illegal and
; unable to legalize. This prevents further legalization.
; CHECK-LABEL: name: test_sext_trunc_v2s32_to_v2s8_to_v2s16
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY2]], 8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SEXT_INREG]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SEXT_INREG1]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; CHECK: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_TRUNC %0
%2:_(<2 x s16>) = G_SEXT %1
$vgpr0 = COPY %2
...
---
name: test_sext_trunc_v3s32_to_v3s16_to_v3s32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2
; CHECK-LABEL: name: test_sext_trunc_v3s32_to_v3s16_to_v3s32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 16
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32)
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s16>) = G_TRUNC %0
%2:_(<3 x s32>) = G_SEXT %1
$vgpr0_vgpr1_vgpr2 = COPY %2
...