1296 lines
58 KiB
C
1296 lines
58 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s
|
|
|
|
#include <arm_mve.h>
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlaldavaq_s16(int64_t a, int16x8_t b, int16x8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq(a, b, c);
|
|
#else
|
|
return vmlaldavaq_s16(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlaldavaq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq(a, b, c);
|
|
#else
|
|
return vmlaldavaq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 1, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
uint64_t test_vmlaldavaq_u16(uint64_t a, uint16x8_t b, uint16x8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq(a, b, c);
|
|
#else
|
|
return vmlaldavaq_u16(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 1, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
uint64_t test_vmlaldavaq_u32(uint64_t a, uint32x4_t b, uint32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq(a, b, c);
|
|
#else
|
|
return vmlaldavaq_u32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaxq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 1, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlaldavaxq_s16(int64_t a, int16x8_t b, int16x8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaxq(a, b, c);
|
|
#else
|
|
return vmlaldavaxq_s16(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 0, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlaldavaxq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaxq(a, b, c);
|
|
#else
|
|
return vmlaldavaxq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavaq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 1, i32 0, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlsldavaq_s16(int64_t a, int16x8_t b, int16x8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaq(a, b, c);
|
|
#else
|
|
return vmlsldavaq_s16(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavaq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 1, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlsldavaq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaq(a, b, c);
|
|
#else
|
|
return vmlsldavaq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldaxvaq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 1, i32 1, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlsldaxvaq_s16(int64_t a, int16x8_t b, int16x8_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaxq(a, b, c);
|
|
#else
|
|
return vmlsldavaxq_s16(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavaxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 1, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vmlsldavaxq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaxq(a, b, c);
|
|
#else
|
|
return vmlsldavaxq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhaq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vrmlaldavhaq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhaq(a, b, c);
|
|
#else
|
|
return vrmlaldavhaq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhaq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 1, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
uint64_t test_vrmlaldavhaq_u32(uint64_t a, uint32x4_t b, uint32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhaq(a, b, c);
|
|
#else
|
|
return vrmlaldavhaq_u32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhaxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 0, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vrmlaldavhaxq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhaxq(a, b, c);
|
|
#else
|
|
return vrmlaldavhaxq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhaq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 1, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vrmlsldavhaq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhaq(a, b, c);
|
|
#else
|
|
return vrmlsldavhaq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhaxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 1, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP3]], 1
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32
|
|
// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { i32, i32 } [[TMP3]], 0
|
|
// CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP6]], [[TMP8]]
|
|
// CHECK-NEXT: ret i64 [[TMP9]]
|
|
//
|
|
int64_t test_vrmlsldavhaxq_s32(int64_t a, int32x4_t b, int32x4_t c) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhaxq(a, b, c);
|
|
#else
|
|
return vrmlsldavhaxq_s32(a, b, c);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlaldavaq_p_s16(int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq_p(a, b, c, p);
|
|
#else
|
|
return vmlaldavaq_p_s16(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlaldavaq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq_p(a, b, c, p);
|
|
#else
|
|
return vmlaldavaq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_p_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 1, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
uint64_t test_vmlaldavaq_p_u16(uint64_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq_p(a, b, c, p);
|
|
#else
|
|
return vmlaldavaq_p_u16(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaq_p_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
uint64_t test_vmlaldavaq_p_u32(uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaq_p(a, b, c, p);
|
|
#else
|
|
return vmlaldavaq_p_u32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaxq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 1, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlaldavaxq_p_s16(int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaxq_p(a, b, c, p);
|
|
#else
|
|
return vmlaldavaxq_p_s16(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavaxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlaldavaxq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavaxq_p(a, b, c, p);
|
|
#else
|
|
return vmlaldavaxq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavaq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 0, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlsldavaq_p_s16(int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaq_p(a, b, c, p);
|
|
#else
|
|
return vmlsldavaq_p_s16(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavaq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlsldavaq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaq_p(a, b, c, p);
|
|
#else
|
|
return vmlsldavaq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldaxvaq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 1, i32 [[TMP2]], i32 [[TMP1]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlsldaxvaq_p_s16(int64_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaxq_p(a, b, c, p);
|
|
#else
|
|
return vmlsldavaxq_p_s16(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavaxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vmlsldavaxq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavaxq_p(a, b, c, p);
|
|
#else
|
|
return vmlsldavaxq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhaq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vrmlaldavhaq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhaq_p(a, b, c, p);
|
|
#else
|
|
return vrmlaldavhaq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhaq_p_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
uint64_t test_vrmlaldavhaq_p_u32(uint64_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhaq_p(a, b, c, p);
|
|
#else
|
|
return vrmlaldavhaq_p_u32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhaxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vrmlaldavhaxq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhaxq_p(a, b, c, p);
|
|
#else
|
|
return vrmlaldavhaxq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhaq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vrmlsldavhaq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhaq_p(a, b, c, p);
|
|
#else
|
|
return vrmlsldavhaq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhaxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[A]] to i32
|
|
// CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 [[TMP2]], i32 [[TMP1]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP4]])
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP5]], 1
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 32
|
|
// CHECK-NEXT: [[TMP9:%.*]] = extractvalue { i32, i32 } [[TMP5]], 0
|
|
// CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]]
|
|
// CHECK-NEXT: ret i64 [[TMP11]]
|
|
//
|
|
int64_t test_vrmlsldavhaxq_p_s32(int64_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhaxq_p(a, b, c, p);
|
|
#else
|
|
return vrmlsldavhaxq_p_s32(a, b, c, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlaldavq_s16(int16x8_t a, int16x8_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq(a, b);
|
|
#else
|
|
return vmlaldavq_s16(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlaldavq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq(a, b);
|
|
#else
|
|
return vmlaldavq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 1, i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
uint64_t test_vmlaldavq_u16(uint16x8_t a, uint16x8_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq(a, b);
|
|
#else
|
|
return vmlaldavq_u16(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 1, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
uint64_t test_vmlaldavq_u32(uint32x4_t a, uint32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq(a, b);
|
|
#else
|
|
return vmlaldavq_u32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavxq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlaldavxq_s16(int16x8_t a, int16x8_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavxq(a, b);
|
|
#else
|
|
return vmlaldavxq_s16(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlaldavxq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavxq(a, b);
|
|
#else
|
|
return vmlaldavxq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 1, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlsldavq_s16(int16x8_t a, int16x8_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavq(a, b);
|
|
#else
|
|
return vmlsldavq_s16(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlsldavq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavq(a, b);
|
|
#else
|
|
return vmlsldavq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavxvq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 1, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlsldavxvq_s16(int16x8_t a, int16x8_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavxq(a, b);
|
|
#else
|
|
return vmlsldavxq_s16(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.v4i32(i32 0, i32 1, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vmlsldavxq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavxq(a, b);
|
|
#else
|
|
return vmlsldavxq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vrmlaldavhq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhq(a, b);
|
|
#else
|
|
return vrmlaldavhq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 1, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
uint64_t test_vrmlaldavhq_u32(uint32x4_t a, uint32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhq(a, b);
|
|
#else
|
|
return vrmlaldavhq_u32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vrmlaldavhxq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhxq(a, b);
|
|
#else
|
|
return vrmlaldavhxq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vrmlsldavhq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhq(a, b);
|
|
#else
|
|
return vrmlsldavhq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhxq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.v4i32(i32 0, i32 1, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
|
|
// CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 32
|
|
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0
|
|
// CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
|
|
// CHECK-NEXT: [[TMP6:%.*]] = or i64 [[TMP3]], [[TMP5]]
|
|
// CHECK-NEXT: ret i64 [[TMP6]]
|
|
//
|
|
int64_t test_vrmlsldavhxq_s32(int32x4_t a, int32x4_t b) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhxq(a, b);
|
|
#else
|
|
return vrmlsldavhxq_s32(a, b);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlaldavq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq_p(a, b, p);
|
|
#else
|
|
return vmlaldavq_p_s16(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlaldavq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq_p(a, b, p);
|
|
#else
|
|
return vmlaldavq_p_s32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_p_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 1, i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
uint64_t test_vmlaldavq_p_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq_p(a, b, p);
|
|
#else
|
|
return vmlaldavq_p_u16(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavq_p_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
uint64_t test_vmlaldavq_p_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavq_p(a, b, p);
|
|
#else
|
|
return vmlaldavq_p_u32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavxq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlaldavxq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavxq_p(a, b, p);
|
|
#else
|
|
return vmlaldavxq_p_s16(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlaldavxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlaldavxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlaldavxq_p(a, b, p);
|
|
#else
|
|
return vmlaldavxq_p_s32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlsldavq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavq_p(a, b, p);
|
|
#else
|
|
return vmlsldavq_p_s16(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlsldavq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavq_p(a, b, p);
|
|
#else
|
|
return vmlsldavq_p_s32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldaxvq_p_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlsldaxvq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavxq_p(a, b, p);
|
|
#else
|
|
return vmlsldavxq_p_s16(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vmlsldavxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vmlldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vmlsldavxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vmlsldavxq_p(a, b, p);
|
|
#else
|
|
return vmlsldavxq_p_s32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vrmlaldavhq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhq_p(a, b, p);
|
|
#else
|
|
return vrmlaldavhq_p_s32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhq_p_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
uint64_t test_vrmlaldavhq_p_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhq_p(a, b, p);
|
|
#else
|
|
return vrmlaldavhq_p_u32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlaldavhxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vrmlaldavhxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlaldavhxq_p(a, b, p);
|
|
#else
|
|
return vrmlaldavhxq_p_s32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vrmlsldavhq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhq_p(a, b, p);
|
|
#else
|
|
return vrmlsldavhq_p_s32(a, b, p);
|
|
#endif
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrmlsldavhxq_p_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call { i32, i32 } @llvm.arm.mve.vrmlldavha.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
|
|
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32 } [[TMP2]], 1
|
|
// CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
|
|
// CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 32
|
|
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i32 } [[TMP2]], 0
|
|
// CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
|
|
// CHECK-NEXT: [[TMP8:%.*]] = or i64 [[TMP5]], [[TMP7]]
|
|
// CHECK-NEXT: ret i64 [[TMP8]]
|
|
//
|
|
int64_t test_vrmlsldavhxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
|
|
#ifdef POLYMORPHIC
|
|
return vrmlsldavhxq_p(a, b, p);
|
|
#else
|
|
return vrmlsldavhxq_p_s32(a, b, p);
|
|
#endif
|
|
}
|