llvm-for-llvmta/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir

1212 lines
57 KiB
Plaintext
Raw Permalink Normal View History

2022-04-25 10:02:23 +02:00
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck -check-prefix=SI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck -check-prefix=VI %s
---
name: test_store_global_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_i32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; SI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
; VI-LABEL: name: test_store_global_i32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; VI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
G_STORE %1, %0 :: (store 4, addrspace 1)
...
---
name: test_store_global_i64
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_i64
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](s64), [[COPY]](p1) :: (store 8, addrspace 1)
; VI-LABEL: name: test_store_global_i64
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](s64), [[COPY]](p1) :: (store 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store 8, addrspace 1)
...
---
name: test_store_global_p1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_p1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](p1), [[COPY]](p1) :: (store 8, addrspace 1)
; VI-LABEL: name: test_store_global_p1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](p1), [[COPY]](p1) :: (store 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(p1) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store 8, addrspace 1)
...
---
name: test_store_global_p4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_p4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(p4) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](p4), [[COPY]](p1) :: (store 8, addrspace 1)
; VI-LABEL: name: test_store_global_p4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(p4) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](p4), [[COPY]](p1) :: (store 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(p4) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store 8, addrspace 1)
...
---
name: test_store_global_p3
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_p3
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2
; SI: G_STORE [[COPY1]](p3), [[COPY]](p1) :: (store 4, addrspace 1)
; VI-LABEL: name: test_store_global_p3
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2
; VI: G_STORE [[COPY1]](p3), [[COPY]](p1) :: (store 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(p3) = COPY $vgpr2
G_STORE %1, %0 :: (store 4, addrspace 1)
...
---
name: test_store_global_v2s32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_v2s32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](<2 x s32>), [[COPY]](p1) :: (store 8, addrspace 1)
; VI-LABEL: name: test_store_global_v2s32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](<2 x s32>), [[COPY]](p1) :: (store 8, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store 8, addrspace 1)
...
---
name: test_store_global_v2s16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_v2s16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
; SI: G_STORE [[COPY1]](<2 x s16>), [[COPY]](p1) :: (store 4, addrspace 1)
; VI-LABEL: name: test_store_global_v2s16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
; VI: G_STORE [[COPY1]](<2 x s16>), [[COPY]](p1) :: (store 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s16>) = COPY $vgpr2
G_STORE %1, %0 :: (store 4, addrspace 1)
...
---
name: test_store_global_v3s32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[COPY1]](<3 x s32>), 0
; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<3 x s32>), 64
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, align 4, addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, addrspace 1)
; VI-LABEL: name: test_store_global_v3s32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: G_STORE [[COPY1]](<3 x s32>), [[COPY]](p1) :: (store 12, align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
G_STORE %1, %0 :: (store 12, align 4, addrspace 1)
...
---
name: test_truncstore_global_s64_to_s8
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s8
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s8
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store 1, addrspace 1)
...
---
name: test_truncstore_global_s64_to_s16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store 1, addrspace 1)
...
---
name: test_truncstore_global_s64_to_s32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store 4, addrspace 1)
...
---
name: test_truncstore_global_s128_to_s16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_s128_to_s16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s128_to_s16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store 2, addrspace 1)
...
---
name: test_truncstore_global_s128_to_s8
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_s128_to_s8
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s128_to_s8
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store 1, addrspace 1)
...
name: test_store_global_i1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s1) = G_TRUNC %1
G_STORE %2, %0 :: (store 1, addrspace 1)
...
---
name: test_store_global_i8
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_i8
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI-LABEL: name: test_store_global_i8
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s8) = G_TRUNC %1
G_STORE %2, %0 :: (store 1, addrspace 1)
...
---
name: test_store_global_i16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_i16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; VI-LABEL: name: test_store_global_i16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s16) = G_TRUNC %1
G_STORE %2, %0 :: (store 2, addrspace 1)
...
---
name: test_store_global_96
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4
; SI-LABEL: name: test_store_global_96
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
; SI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4
; SI: [[BITCAST:%[0-9]+]]:_(<3 x s32>) = G_BITCAST [[COPY]](s96)
; SI: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[BITCAST]](<3 x s32>), 0
; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[BITCAST]](<3 x s32>), 64
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY1]](p1) :: (store 8, align 16, addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, align 8, addrspace 1)
; VI-LABEL: name: test_store_global_96
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
; VI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4
; VI: [[BITCAST:%[0-9]+]]:_(<3 x s32>) = G_BITCAST [[COPY]](s96)
; VI: G_STORE [[BITCAST]](<3 x s32>), [[COPY1]](p1) :: (store 12, align 16, addrspace 1)
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
%1:_(p1) = COPY $vgpr3_vgpr4
G_STORE %0, %1 :: (store 12, addrspace 1, align 16)
...
---
name: test_store_global_i128
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_i128
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](s128)
; SI: G_STORE [[BITCAST]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
; VI-LABEL: name: test_store_global_i128
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](s128)
; VI: G_STORE [[BITCAST]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store 16, addrspace 1)
...
---
name: test_store_global_v2s64
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s64
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1)
; VI-LABEL: name: test_store_global_v2s64
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store 16, addrspace 1)
...
---
name: test_store_global_v2s8_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s8_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; SI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: G_STORE [[COPY2]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
; VI-LABEL: name: test_store_global_v2s8_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; VI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI: G_STORE [[COPY2]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store 2, addrspace 1, align 1)
...
---
name: test_store_global_v2s8_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s8_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; VI-LABEL: name: test_store_global_v2s8_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store 2, addrspace 1, align 2)
...
---
name: test_store_global_v2s8_align4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s8_align4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1)
; VI-LABEL: name: test_store_global_v2s8_align4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store 2, addrspace 1, align 4)
...
---
name: test_store_global_v3s8_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s8_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC2]]
; SI: [[COPY4:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY4]](s16)
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[C1]](s32)
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY4]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1)
; VI-LABEL: name: test_store_global_v3s8_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[COPY2]], [[C1]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 3, addrspace 1, align 1)
...
---
name: test_store_global_v3s8_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s8_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC2]]
; SI: [[COPY3:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY3]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1)
; VI-LABEL: name: test_store_global_v3s8_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 3, addrspace 1, align 2)
...
---
name: test_store_global_v3s8_align4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s8_align4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
; SI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY5]], [[C3]](s32)
; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
; SI: G_STORE [[COPY5]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1)
; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1)
; VI-LABEL: name: test_store_global_v3s8_align4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32)
; VI: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; VI: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C1]](s16)
; VI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
; VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; VI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C2]](s32)
; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1)
; VI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 3, addrspace 1, align 4)
...
---
name: test_store_global_v4s8_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v4s8_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1)
; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1)
; VI-LABEL: name: test_store_global_v4s8_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 4, addrspace 1, align 1)
...
---
name: test_store_global_v4s8_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v4s8_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
; SI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[OR1]](s16)
; SI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1)
; VI-LABEL: name: test_store_global_v4s8_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[UV3]](s32)
; VI: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; VI: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C1]](s16)
; VI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[OR1]](s16)
; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 4, addrspace 1, align 2)
...
---
name: test_store_global_v4s8_align4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v4s8_align4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C2]](s32)
; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C3]](s32)
; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; SI: G_STORE [[OR2]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
; VI-LABEL: name: test_store_global_v4s8_align4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C2]](s32)
; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; VI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C3]](s32)
; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; VI: G_STORE [[OR2]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 4, addrspace 1, align 4)
...
---
name: test_truncstore_global_v2s8_to_1_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_v2s8_to_1_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI-LABEL: name: test_truncstore_global_v2s8_to_1_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store 1, addrspace 1, align 1)
...
---
name: test_truncstore_global_v3s8_to_1_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_truncstore_global_v3s8_to_1_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
; SI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI: G_STORE [[COPY5]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI-LABEL: name: test_truncstore_global_v3s8_to_1_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32)
; VI: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; VI: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C1]](s16)
; VI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
; VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; VI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C2]](s32)
; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 1, addrspace 1, align 1)
...
---
name: test_truncstore_global_v3s8_to_2_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_truncstore_global_v3s8_to_2_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
; SI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI: G_STORE [[COPY5]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; VI-LABEL: name: test_truncstore_global_v3s8_to_2_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32)
; VI: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; VI: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C1]](s16)
; VI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
; VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; VI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C2]](s32)
; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 2, addrspace 1, align 2)
...
---
name: test_truncstore_global_v4s8_to_3_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_v4s8_to_3_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC2]]
; SI: [[COPY4:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY4]](s16)
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[C1]](s32)
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY4]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1)
; VI-LABEL: name: test_truncstore_global_v4s8_to_3_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[COPY2]], [[C1]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1)
; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 3, addrspace 1, align 1)
...
---
name: test_truncstore_global_v4s8_to_3_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_v4s8_to_3_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC2]]
; SI: [[COPY3:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY3]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; SI: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1)
; VI-LABEL: name: test_truncstore_global_v4s8_to_3_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 3, addrspace 1, align 2)
...
---
name: test_truncstore_global_v4s8_to_3_align4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_v4s8_to_3_align4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[TRUNC:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[COPY1]](<4 x s32>)
; SI: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[TRUNC]](<4 x s8>)
; SI: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store 3, align 4, addrspace 1)
; VI-LABEL: name: test_truncstore_global_v4s8_to_3_align4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[TRUNC:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[COPY1]](<4 x s32>)
; VI: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[TRUNC]](<4 x s8>)
; VI: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store 3, align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store 3, addrspace 1, align 4)
...