3115 lines
122 KiB
C
3115 lines
122 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg -sroa | FileCheck %s
|
|
|
|
#include <arm_mve.h>
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_f16(float16x8_t a, float16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_f32(float32x4_t a, float32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_s8(int8x16_t a, int8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_s16(int16x8_t a, int16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_s32(int32x4_t a, int32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_u8(uint8x16_t a, uint8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_u16(uint16x8_t a, uint16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_u32(uint32x4_t a, uint32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_f16(float16x8_t a, float16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_f32(float32x4_t a, float32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_s8(int8x16_t a, int8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_s16(int16x8_t a, int16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_s32(int32x4_t a, int32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_u8(uint8x16_t a, uint8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_u16(uint16x8_t a, uint16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_n_u32(uint32x4_t a, uint32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_n_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpeqq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpeqq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpeqq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpeqq_m_n_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp une <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_f16(float16x8_t a, float16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp une <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_f32(float32x4_t a, float32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_s8(int8x16_t a, int8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_s16(int16x8_t a, int16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_s32(int32x4_t a, int32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_u8(uint8x16_t a, uint8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_u16(uint16x8_t a, uint16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_u32(uint32x4_t a, uint32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp une <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_f16(float16x8_t a, float16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp une <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_f32(float32x4_t a, float32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_s8(int8x16_t a, int8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_s16(int16x8_t a, int16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_s32(int32x4_t a, int32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_u8(uint8x16_t a, uint8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_u16(uint16x8_t a, uint16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_n_u32(uint32x4_t a, uint32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_n_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp une <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp une <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp une <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp une <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpneq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpneq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpneq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpneq_m_n_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_f16(float16x8_t a, float16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_f32(float32x4_t a, float32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_s8(int8x16_t a, int8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_s16(int16x8_t a, int16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_s32(int32x4_t a, int32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_u8(uint8x16_t a, uint8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_u16(uint16x8_t a, uint16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_u32(uint32x4_t a, uint32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_n_f16(float16x8_t a, float16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_n_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_n_f32(float32x4_t a, float32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_n_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_n_s8(int8x16_t a, int8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_n_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_n_s16(int16x8_t a, int16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_n_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_n_s32(int32x4_t a, int32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_n_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_n_u8(uint8x16_t a, uint8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_n_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_n_u16(uint16x8_t a, uint16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_n_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_n_u32(uint32x4_t a, uint32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_n_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_m_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_m_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_m_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_m_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_m_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_m_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_n_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_n_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_n_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_n_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgeq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgeq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgeq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgeq_m_n_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_m_n_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_m_n_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpcsq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpcsq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpcsq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpcsq_m_n_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_f16(float16x8_t a, float16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_f32(float32x4_t a, float32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_s8(int8x16_t a, int8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_s16(int16x8_t a, int16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_s32(int32x4_t a, int32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_u8(uint8x16_t a, uint8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_u16(uint16x8_t a, uint16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_u32(uint32x4_t a, uint32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_n_f16(float16x8_t a, float16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_n_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_n_f32(float32x4_t a, float32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_n_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_n_s8(int8x16_t a, int8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_n_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_n_s16(int16x8_t a, int16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_n_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_n_s32(int32x4_t a, int32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_n_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_n_u8(uint8x16_t a, uint8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_n_u8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_n_u16(uint16x8_t a, uint16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_n_u16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_n_u32(uint32x4_t a, uint32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_n_u32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_m_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_m_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_m_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_m_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_m_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_m_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_m_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_m_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_m_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_n_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_n_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_n_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_n_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpgtq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpgtq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpgtq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpgtq_m_n_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_m_n_u8(uint8x16_t a, uint8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_m_n_u8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_m_n_u16(uint16x8_t a, uint16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_m_n_u16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmphiq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmphiq_m_n_u32(uint32x4_t a, uint32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmphiq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmphiq_m_n_u32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_f16(float16x8_t a, float16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_f32(float32x4_t a, float32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_s8(int8x16_t a, int8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_s16(int16x8_t a, int16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_s32(int32x4_t a, int32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_n_f16(float16x8_t a, float16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_n_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_n_f32(float32x4_t a, float32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_n_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_n_s8(int8x16_t a, int8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_n_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_n_s16(int16x8_t a, int16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_n_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_n_s32(int32x4_t a, int32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_n_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_n_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_n_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_n_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_n_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpleq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpleq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpleq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpleq_m_n_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_f16(float16x8_t a, float16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_f32(float32x4_t a, float32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_s8(int8x16_t a, int8x16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_s16(int16x8_t a, int16x8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_s32(int32x4_t a, int32x4_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_n_f16(float16x8_t a, float16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_n_f16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_n_f32(float32x4_t a, float32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_n_f32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_n_s8(int8x16_t a, int8_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_n_s8(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_n_s16(int16x8_t a, int16_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_n_s16(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP2]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_n_s32(int32x4_t a, int32_t b)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq(a, b);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_n_s32(a, b);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[B:%.*]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_n_f16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_n_f16(float16x8_t a, float16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_n_f16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_n_f32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_n_f32(float32x4_t a, float32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_n_f32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_n_s8(int8x16_t a, int8_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_n_s8(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_n_s16(int16x8_t a, int16_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_n_s16(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vcmpltq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i32 0
|
|
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
|
|
// CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
|
|
// CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
|
|
// CHECK-NEXT: ret i16 [[TMP5]]
|
|
//
|
|
mve_pred16_t test_vcmpltq_m_n_s32(int32x4_t a, int32_t b, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vcmpltq_m(a, b, p);
|
|
#else /* POLYMORPHIC */
|
|
return vcmpltq_m_n_s32(a, b, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|