llvm-for-llvmta/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-...

80 lines
3.1 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -debugify-and-strip-all-safe -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=NOT_STRICT
# RUN: llc -debugify-and-strip-all-safe -mattr=+strict-align -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=STRICT
# REQUIRES: asserts
# Check that the load-or combine respects alignment requirements.
...
---
name: misaligned
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0, $x1
; NOT_STRICT-LABEL: name: misaligned
; NOT_STRICT: liveins: $x0, $x1
; NOT_STRICT: %ptr:_(p0) = COPY $x1
; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; NOT_STRICT: $w1 = COPY %full_load(s32)
; NOT_STRICT: RET_ReallyLR implicit $w1
; STRICT-LABEL: name: misaligned
; STRICT: liveins: $x0, $x1
; STRICT: %cst_1:_(s64) = G_CONSTANT i64 1
; STRICT: %cst_16:_(s32) = G_CONSTANT i32 16
; STRICT: %ptr:_(p0) = COPY $x1
; STRICT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; STRICT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; STRICT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; STRICT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; STRICT: %full_load:_(s32) = G_OR %low_half, %high_half
; STRICT: $w1 = COPY %full_load(s32)
; STRICT: RET_ReallyLR implicit $w1
%cst_1:_(s64) = G_CONSTANT i64 1
%cst_16:_(s32) = G_CONSTANT i32 16
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2, align 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2, align 2)
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
$w1 = COPY %full_load(s32)
RET_ReallyLR implicit $w1
...
---
name: aligned
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0, $x1
; NOT_STRICT-LABEL: name: aligned
; NOT_STRICT: liveins: $x0, $x1
; NOT_STRICT: %ptr:_(p0) = COPY $x1
; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4)
; NOT_STRICT: $w1 = COPY %full_load(s32)
; NOT_STRICT: RET_ReallyLR implicit $w1
; STRICT-LABEL: name: aligned
; STRICT: liveins: $x0, $x1
; STRICT: %ptr:_(p0) = COPY $x1
; STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4)
; STRICT: $w1 = COPY %full_load(s32)
; STRICT: RET_ReallyLR implicit $w1
%cst_1:_(s64) = G_CONSTANT i64 1
%cst_16:_(s32) = G_CONSTANT i32 16
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2, align 4)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2, align 4)
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
$w1 = COPY %full_load(s32)
RET_ReallyLR implicit $w1