llvm-for-llvmta/test/Transforms/InstCombine/ARM/vmldava.ll

108 lines
4.3 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S -mtriple=arm -o - %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
define arm_aapcs_vfpcc i32 @test_vmladavaq_s32(i32 %z, <4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @test_vmladavaq_s32(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 %z, <4 x i32> %x, <4 x i32> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 0, <4 x i32> %x, <4 x i32> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmladavaq_s16(i32 %z, <8 x i16> %x, <8 x i16> %y) {
; CHECK-LABEL: @test_vmladavaq_s16(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %z, <8 x i16> %x, <8 x i16> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 0, <8 x i16> %x, <8 x i16> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmladavaq_s8(i32 %z, <16 x i8> %x, <16 x i8> %y) {
; CHECK-LABEL: @test_vmladavaq_s8(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 %z, <16 x i8> %x, <16 x i8> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 0, <16 x i8> %x, <16 x i8> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmladavaq_u32(i32 %z, <4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @test_vmladavaq_u32(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 %z, <4 x i32> %x, <4 x i32> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 0, <4 x i32> %x, <4 x i32> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmladavaq_u16(i32 %z, <8 x i16> %x, <8 x i16> %y) {
; CHECK-LABEL: @test_vmladavaq_u16(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 %z, <8 x i16> %x, <8 x i16> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 0, <8 x i16> %x, <8 x i16> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmladavaq_u8(i32 %z, <16 x i8> %x, <16 x i8> %y) {
; CHECK-LABEL: @test_vmladavaq_u8(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 %z, <16 x i8> %x, <16 x i8> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 0, <16 x i8> %x, <16 x i8> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmlsdavaq_s32(i32 %z, <4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @test_vmlsdavaq_s32(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 %z, <4 x i32> %x, <4 x i32> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 0, <4 x i32> %x, <4 x i32> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmlsdavaq_s16(i32 %z, <8 x i16> %x, <8 x i16> %y) {
; CHECK-LABEL: @test_vmlsdavaq_s16(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 %z, <8 x i16> %x, <8 x i16> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 0, <8 x i16> %x, <8 x i16> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
define arm_aapcs_vfpcc i32 @test_vmlsdavaq_s8(i32 %z, <16 x i8> %x, <16 x i8> %y) {
; CHECK-LABEL: @test_vmlsdavaq_s8(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 %z, <16 x i8> %x, <16 x i8> %y)
; CHECK-NEXT: ret i32 %0
entry:
%0 = tail call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 0, <16 x i8> %x, <16 x i8> %y)
%1 = add nsw i32 %0, %z
ret i32 %1
}
declare i32 @llvm.arm.mve.vmldava.v4i32(i32, i32, i32, i32, <4 x i32>, <4 x i32>)
declare i32 @llvm.arm.mve.vmldava.v8i16(i32, i32, i32, i32, <8 x i16>, <8 x i16>)
declare i32 @llvm.arm.mve.vmldava.v16i8(i32, i32, i32, i32, <16 x i8>, <16 x i8>)