1638 lines
57 KiB
C
1638 lines
57 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
|
|
// RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
|
|
|
|
#include <arm_mve.h>
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <16 x i8> [[A:%.*]], <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
int8x16_t test_vshlq_n_s8(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 5);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_s8(a, 5);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <8 x i16> [[A:%.*]], <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vshlq_n_s16(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 5);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_s16(a, 5);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <4 x i32> [[A:%.*]], <i32 18, i32 18, i32 18, i32 18>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vshlq_n_s32(int32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 18);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_s32(a, 18);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_s8_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <16 x i8> [[A:%.*]], zeroinitializer
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
int8x16_t test_vshlq_n_s8_trivial(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 0);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_s8(a, 0);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_s16_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <8 x i16> [[A:%.*]], zeroinitializer
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vshlq_n_s16_trivial(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 0);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_s16(a, 0);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_s32_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <4 x i32> [[A:%.*]], zeroinitializer
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vshlq_n_s32_trivial(int32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 0);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_s32(a, 0);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <16 x i8> [[A:%.*]], <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
uint8x16_t test_vshlq_n_u8(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 3);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_u8(a, 3);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <8 x i16> [[A:%.*]], <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vshlq_n_u16(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 11);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_u16(a, 11);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <4 x i32> [[A:%.*]], <i32 7, i32 7, i32 7, i32 7>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vshlq_n_u32(uint32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 7);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_u32(a, 7);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_u8_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <16 x i8> [[A:%.*]], zeroinitializer
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
uint8x16_t test_vshlq_n_u8_trivial(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 0);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_u8(a, 0);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_u16_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <8 x i16> [[A:%.*]], zeroinitializer
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vshlq_n_u16_trivial(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 0);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_u16(a, 0);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_n_u32_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = shl <4 x i32> [[A:%.*]], zeroinitializer
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vshlq_n_u32_trivial(uint32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_n(a, 0);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_n_u32(a, 0);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = ashr <16 x i8> [[A:%.*]], <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
int8x16_t test_vshrq_n_s8(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 4);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_s8(a, 4);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = ashr <8 x i16> [[A:%.*]], <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vshrq_n_s16(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 10);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_s16(a, 10);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = ashr <4 x i32> [[A:%.*]], <i32 19, i32 19, i32 19, i32 19>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vshrq_n_s32(int32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 19);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_s32(a, 19);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_s8_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = ashr <16 x i8> [[A:%.*]], <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
int8x16_t test_vshrq_n_s8_trivial(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 8);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_s8(a, 8);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_s16_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = ashr <8 x i16> [[A:%.*]], <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vshrq_n_s16_trivial(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 16);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_s16(a, 16);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_s32_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = ashr <4 x i32> [[A:%.*]], <i32 31, i32 31, i32 31, i32 31>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vshrq_n_s32_trivial(int32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 32);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_s32(a, 32);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr <16 x i8> [[A:%.*]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
uint8x16_t test_vshrq_n_u8(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 1);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_u8(a, 1);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr <8 x i16> [[A:%.*]], <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vshrq_n_u16(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 10);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_u16(a, 10);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = lshr <4 x i32> [[A:%.*]], <i32 10, i32 10, i32 10, i32 10>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vshrq_n_u32(uint32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 10);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_u32(a, 10);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_u8_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: ret <16 x i8> zeroinitializer
|
|
//
|
|
uint8x16_t test_vshrq_n_u8_trivial(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 8);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_u8(a, 8);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_u16_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: ret <8 x i16> zeroinitializer
|
|
//
|
|
uint16x8_t test_vshrq_n_u16_trivial(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 16);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_u16(a, 16);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_n_u32_trivial(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: ret <4 x i32> zeroinitializer
|
|
//
|
|
uint32x4_t test_vshrq_n_u32_trivial(uint32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq(a, 32);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_n_u32(a, 32);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shl.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 6, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
int8x16_t test_vshlq_m_n_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_m_n(inactive, a, 6, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_m_n_s8(inactive, a, 6, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shl.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 13, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshlq_m_n_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_m_n(inactive, a, 13, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_m_n_s16(inactive, a, 13, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shl.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshlq_m_n_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_m_n(inactive, a, 0, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_m_n_s32(inactive, a, 0, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shl.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 3, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vshlq_m_n_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_m_n(inactive, a, 3, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_m_n_u8(inactive, a, 3, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shl.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshlq_m_n_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_m_n(inactive, a, 1, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_m_n_u16(inactive, a, 1, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shl.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 24, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshlq_m_n_u32(uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_m_n(inactive, a, 24, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_m_n_u32(inactive, a, 24, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 2, i32 0, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
int8x16_t test_vshrq_m_n_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_m(inactive, a, 2, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_m_n_s8(inactive, a, 2, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 3, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshrq_m_n_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_m(inactive, a, 3, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_m_n_s16(inactive, a, 3, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 13, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshrq_m_n_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_m(inactive, a, 13, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_m_n_s32(inactive, a, 13, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 4, i32 1, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vshrq_m_n_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_m(inactive, a, 4, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_m_n_u8(inactive, a, 4, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 14, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshrq_m_n_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_m(inactive, a, 14, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_m_n_u16(inactive, a, 14, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 21, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshrq_m_n_u32(uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_m(inactive, a, 21, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_m_n_u32(inactive, a, 21, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_x_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shl.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> undef)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
int8x16_t test_vshlq_x_n_s8(int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_x_n(a, 1, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_x_n_s8(a, 1, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_x_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shl.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 15, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshlq_x_n_s16(int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_x_n(a, 15, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_x_n_s16(a, 15, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_x_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shl.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 13, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshlq_x_n_s32(int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_x_n(a, 13, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_x_n_s32(a, 13, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_x_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shl.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 4, <16 x i1> [[TMP1]], <16 x i8> undef)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vshlq_x_n_u8(uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_x_n(a, 4, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_x_n_u8(a, 4, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_x_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shl.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 10, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshlq_x_n_u16(uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_x_n(a, 10, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_x_n_u16(a, 10, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlq_x_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shl.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 30, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshlq_x_n_u32(uint32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlq_x_n(a, 30, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlq_x_n_u32(a, 30, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_x_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 4, i32 0, <16 x i1> [[TMP1]], <16 x i8> undef)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
int8x16_t test_vshrq_x_n_s8(int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_x(a, 4, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_x_n_s8(a, 4, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_x_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 10, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshrq_x_n_s16(int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_x(a, 10, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_x_n_s16(a, 10, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_x_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 7, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshrq_x_n_s32(int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_x(a, 7, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_x_n_s32(a, 7, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_x_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.shr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 7, i32 1, <16 x i1> [[TMP1]], <16 x i8> undef)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vshrq_x_n_u8(uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_x(a, 7, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_x_n_u8(a, 7, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_x_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.shr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 7, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshrq_x_n_u16(uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_x(a, 7, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_x_n_u16(a, 7, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshrq_x_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.shr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 6, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshrq_x_n_u32(uint32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshrq_x(a, 6, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshrq_x_n_u32(a, 6, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqshl.imm.v16i8(<16 x i8> [[A:%.*]], i32 3, i32 0)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
int8x16_t test_vqshlq_n_s8(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_n(a, 3);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_n_s8(a, 3);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqshl.imm.v8i16(<8 x i16> [[A:%.*]], i32 4, i32 0)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vqshlq_n_s16(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_n(a, 4);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_n_s16(a, 4);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqshl.imm.v4i32(<4 x i32> [[A:%.*]], i32 4, i32 0)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vqshlq_n_s32(int32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_n(a, 4);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_n_s32(a, 4);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqshl.imm.v16i8(<16 x i8> [[A:%.*]], i32 0, i32 1)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
uint8x16_t test_vqshlq_n_u8(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_n(a, 0);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_n_u8(a, 0);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqshl.imm.v8i16(<8 x i16> [[A:%.*]], i32 13, i32 1)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vqshlq_n_u16(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_n(a, 13);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_n_u16(a, 13);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqshl.imm.v4i32(<4 x i32> [[A:%.*]], i32 6, i32 1)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vqshlq_n_u32(uint32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_n(a, 6);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_n_u32(a, 6);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshluq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vqshlu.imm.v16i8(<16 x i8> [[A:%.*]], i32 5)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
uint8x16_t test_vqshluq_n_s8(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshluq(a, 5);
|
|
#else /* POLYMORPHIC */
|
|
return vqshluq_n_s8(a, 5);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshluq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vqshlu.imm.v8i16(<8 x i16> [[A:%.*]], i32 5)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vqshluq_n_s16(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshluq(a, 5);
|
|
#else /* POLYMORPHIC */
|
|
return vqshluq_n_s16(a, 5);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshluq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vqshlu.imm.v4i32(<4 x i32> [[A:%.*]], i32 4)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vqshluq_n_s32(int32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshluq(a, 4);
|
|
#else /* POLYMORPHIC */
|
|
return vqshluq_n_s32(a, 4);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vrshr.imm.v16i8(<16 x i8> [[A:%.*]], i32 4, i32 0)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
int8x16_t test_vrshrq_n_s8(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq(a, 4);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_n_s8(a, 4);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vrshr.imm.v8i16(<8 x i16> [[A:%.*]], i32 12, i32 0)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vrshrq_n_s16(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq(a, 12);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_n_s16(a, 12);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vrshr.imm.v4i32(<4 x i32> [[A:%.*]], i32 30, i32 0)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vrshrq_n_s32(int32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq(a, 30);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_n_s32(a, 30);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vrshr.imm.v16i8(<16 x i8> [[A:%.*]], i32 1, i32 1)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
|
|
//
|
|
uint8x16_t test_vrshrq_n_u8(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq(a, 1);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_n_u8(a, 1);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vrshr.imm.v8i16(<8 x i16> [[A:%.*]], i32 15, i32 1)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vrshrq_n_u16(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq(a, 15);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_n_u16(a, 15);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vrshr.imm.v4i32(<4 x i32> [[A:%.*]], i32 20, i32 1)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vrshrq_n_u32(uint32x4_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq(a, 20);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_n_u32(a, 20);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqshl.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 6, i32 0, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
int8x16_t test_vqshlq_m_n_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_m_n(inactive, a, 6, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_m_n_s8(inactive, a, 6, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqshl.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 13, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vqshlq_m_n_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_m_n(inactive, a, 13, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_m_n_s16(inactive, a, 13, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqshl.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 14, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vqshlq_m_n_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_m_n(inactive, a, 14, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_m_n_s32(inactive, a, 14, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqshl.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 4, i32 1, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vqshlq_m_n_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_m_n(inactive, a, 4, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_m_n_u8(inactive, a, 4, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqshl.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 9, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vqshlq_m_n_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_m_n(inactive, a, 9, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_m_n_u16(inactive, a, 9, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshlq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqshl.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 25, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vqshlq_m_n_u32(uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshlq_m_n(inactive, a, 25, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshlq_m_n_u32(inactive, a, 25, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshluq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vqshlu.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 2, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vqshluq_m_n_s8(uint8x16_t inactive, int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshluq_m(inactive, a, 2, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshluq_m_n_s8(inactive, a, 2, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshluq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vqshlu.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 12, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vqshluq_m_n_s16(uint16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshluq_m(inactive, a, 12, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshluq_m_n_s16(inactive, a, 12, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vqshluq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vqshlu.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 24, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vqshluq_m_n_s32(uint32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vqshluq_m(inactive, a, 24, p);
|
|
#else /* POLYMORPHIC */
|
|
return vqshluq_m_n_s32(inactive, a, 24, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrshr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 2, i32 0, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
int8x16_t test_vrshrq_m_n_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_m(inactive, a, 2, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_m_n_s8(inactive, a, 2, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrshr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 11, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vrshrq_m_n_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_m(inactive, a, 11, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_m_n_s16(inactive, a, 11, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_m_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrshr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 24, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vrshrq_m_n_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_m(inactive, a, 24, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_m_n_s32(inactive, a, 24, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrshr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 7, i32 1, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vrshrq_m_n_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_m(inactive, a, 7, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_m_n_u8(inactive, a, 7, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrshr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 4, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vrshrq_m_n_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_m(inactive, a, 4, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_m_n_u16(inactive, a, 4, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_m_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrshr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 27, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vrshrq_m_n_u32(uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_m(inactive, a, 27, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_m_n_u32(inactive, a, 27, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_x_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrshr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 3, i32 0, <16 x i1> [[TMP1]], <16 x i8> undef)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
int8x16_t test_vrshrq_x_n_s8(int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_x(a, 3, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_x_n_s8(a, 3, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_x_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrshr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 12, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vrshrq_x_n_s16(int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_x(a, 12, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_x_n_s16(a, 12, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_x_n_s32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrshr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 20, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vrshrq_x_n_s32(int32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_x(a, 20, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_x_n_s32(a, 20, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_x_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrshr.imm.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 1, i32 1, <16 x i1> [[TMP1]], <16 x i8> undef)
|
|
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
//
|
|
uint8x16_t test_vrshrq_x_n_u8(uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_x(a, 1, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_x_n_u8(a, 1, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_x_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrshr.imm.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 13, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vrshrq_x_n_u16(uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_x(a, 13, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_x_n_u16(a, 13, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vrshrq_x_n_u32(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrshr.imm.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 6, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vrshrq_x_n_u32(uint32x4_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vrshrq_x(a, 6, p);
|
|
#else /* POLYMORPHIC */
|
|
return vrshrq_x_n_u32(a, 6, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.v8i16.v16i8(<16 x i8> [[A:%.*]], i32 2, i32 0, i32 0)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vshllbq_n_s8(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq(a, 2);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_n_s8(a, 2);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.v4i32.v8i16(<8 x i16> [[A:%.*]], i32 13, i32 0, i32 0)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vshllbq_n_s16(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq(a, 13);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_n_s16(a, 13);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.v8i16.v16i8(<16 x i8> [[A:%.*]], i32 5, i32 1, i32 0)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vshllbq_n_u8(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq(a, 5);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_n_u8(a, 5);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.v4i32.v8i16(<8 x i16> [[A:%.*]], i32 6, i32 1, i32 0)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vshllbq_n_u16(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq(a, 6);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_n_u16(a, 6);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.v8i16.v16i8(<16 x i8> [[A:%.*]], i32 7, i32 0, i32 1)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
int16x8_t test_vshlltq_n_s8(int8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq(a, 7);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_n_s8(a, 7);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.v4i32.v8i16(<8 x i16> [[A:%.*]], i32 2, i32 0, i32 1)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
int32x4_t test_vshlltq_n_s16(int16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq(a, 2);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_n_s16(a, 2);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.v8i16.v16i8(<16 x i8> [[A:%.*]], i32 7, i32 1, i32 1)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
|
|
//
|
|
uint16x8_t test_vshlltq_n_u8(uint8x16_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq(a, 7);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_n_u8(a, 7);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.v4i32.v8i16(<8 x i16> [[A:%.*]], i32 14, i32 1, i32 1)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
|
//
|
|
uint32x4_t test_vshlltq_n_u16(uint16x8_t a)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq(a, 14);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_n_u16(a, 14);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 6, i32 0, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshllbq_m_n_s8(int16x8_t inactive, int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_m(inactive, a, 6, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_m_n_s8(inactive, a, 6, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 10, i32 0, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshllbq_m_n_s16(int32x4_t inactive, int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_m(inactive, a, 10, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_m_n_s16(inactive, a, 10, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 3, i32 1, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshllbq_m_n_u8(uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_m(inactive, a, 3, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_m_n_u8(inactive, a, 3, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 14, i32 1, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshllbq_m_n_u16(uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_m(inactive, a, 14, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_m_n_u16(inactive, a, 14, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_m_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 4, i32 0, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshlltq_m_n_s8(int16x8_t inactive, int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_m(inactive, a, 4, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_m_n_s8(inactive, a, 4, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_m_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 12, i32 0, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshlltq_m_n_s16(int32x4_t inactive, int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_m(inactive, a, 12, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_m_n_s16(inactive, a, 12, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_m_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 2, i32 1, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshlltq_m_n_u8(uint16x8_t inactive, uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_m(inactive, a, 2, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_m_n_u8(inactive, a, 2, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_m_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 9, i32 1, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshlltq_m_n_u16(uint32x4_t inactive, uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_m(inactive, a, 9, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_m_n_u16(inactive, a, 9, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_x_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 1, i32 0, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshllbq_x_n_s8(int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_x(a, 1, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_x_n_s8(a, 1, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_x_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 10, i32 0, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshllbq_x_n_s16(int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_x(a, 10, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_x_n_s16(a, 10, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_x_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 6, i32 1, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshllbq_x_n_u8(uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_x(a, 6, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_x_n_u8(a, 6, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshllbq_x_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 10, i32 1, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshllbq_x_n_u16(uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshllbq_x(a, 10, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshllbq_x_n_u16(a, 10, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_x_n_s8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 2, i32 0, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vshlltq_x_n_s8(int8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_x(a, 2, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_x_n_s8(a, 2, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_x_n_s16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 6, i32 0, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
int32x4_t test_vshlltq_x_n_s16(int16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_x(a, 6, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_x_n_s16(a, 6, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_x_n_u8(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshll.imm.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 5, i32 1, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vshlltq_x_n_u8(uint8x16_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_x(a, 5, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_x_n_u8(a, 5, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|
|
// CHECK-LABEL: @test_vshlltq_x_n_u16(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshll.imm.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 3, i32 1, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
//
|
|
uint32x4_t test_vshlltq_x_n_u16(uint16x8_t a, mve_pred16_t p)
|
|
{
|
|
#ifdef POLYMORPHIC
|
|
return vshlltq_x(a, 3, p);
|
|
#else /* POLYMORPHIC */
|
|
return vshlltq_x_n_u16(a, 3, p);
|
|
#endif /* POLYMORPHIC */
|
|
}
|
|
|