llvm-for-llvmta/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir

38 lines
1.1 KiB
Plaintext
Raw Permalink Normal View History

2022-04-25 10:02:23 +02:00
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=aarch64 -run-pass=aarch64-prelegalizer-combiner %s -o - -verify-machineinstrs | FileCheck %s
--- |
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64"
define i32 @ld_zext_i24(i24* %ptr, i24* %ptr2) {
%load = load i24, i24* %ptr, align 1
%ext = zext i24 %load to i32
ret i32 %ext
}
...
---
name: ld_zext_i24
alignment: 4
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.1 (%ir-block.0):
liveins: $x0, $x1
; CHECK-LABEL: name: ld_zext_i24
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[COPY]](p0) :: (load 3 from %ir.ptr, align 1)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
; CHECK: $w0 = COPY [[ZEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s24) = G_LOAD %0(p0) :: (load 3 from %ir.ptr, align 1)
%3:_(s32) = G_ZEXT %2(s24)
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...