139 lines
7.3 KiB
LLVM
139 lines
7.3 KiB
LLVM
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
||
|
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
||
|
|
||
|
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
|
||
|
; WARN-NOT: warning
|
||
|
|
||
|
define <vscale x 16 x i8> @mul_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||
|
; CHECK-LABEL: mul_i8:
|
||
|
; CHECK: mul z0.b, p0/m, z0.b, z1.b
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a,
|
||
|
<vscale x 16 x i8> %b)
|
||
|
ret <vscale x 16 x i8> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i16> @mul_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||
|
; CHECK-LABEL: mul_i16:
|
||
|
; CHECK: mul z0.h, p0/m, z0.h, z1.h
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a,
|
||
|
<vscale x 8 x i16> %b)
|
||
|
ret <vscale x 8 x i16> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i32> @mul_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||
|
; CHECK-LABEL: mul_i32:
|
||
|
; CHECK: mul z0.s, p0/m, z0.s, z1.s
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a,
|
||
|
<vscale x 4 x i32> %b)
|
||
|
ret <vscale x 4 x i32> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i64> @mul_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||
|
; CHECK-LABEL: mul_i64:
|
||
|
; CHECK: mul z0.d, p0/m, z0.d, z1.d
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a,
|
||
|
<vscale x 2 x i64> %b)
|
||
|
ret <vscale x 2 x i64> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||
|
; CHECK-LABEL: smulh_i8:
|
||
|
; CHECK: smulh z0.b, p0/m, z0.b, z1.b
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a,
|
||
|
<vscale x 16 x i8> %b)
|
||
|
ret <vscale x 16 x i8> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||
|
; CHECK-LABEL: smulh_i16:
|
||
|
; CHECK: smulh z0.h, p0/m, z0.h, z1.h
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a,
|
||
|
<vscale x 8 x i16> %b)
|
||
|
ret <vscale x 8 x i16> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||
|
; CHECK-LABEL: smulh_i32:
|
||
|
; CHECK: smulh z0.s, p0/m, z0.s, z1.s
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a,
|
||
|
<vscale x 4 x i32> %b)
|
||
|
ret <vscale x 4 x i32> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||
|
; CHECK-LABEL: smulh_i64:
|
||
|
; CHECK: smulh z0.d, p0/m, z0.d, z1.d
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a,
|
||
|
<vscale x 2 x i64> %b)
|
||
|
ret <vscale x 2 x i64> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||
|
; CHECK-LABEL: umulh_i8:
|
||
|
; CHECK: umulh z0.b, p0/m, z0.b, z1.b
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a,
|
||
|
<vscale x 16 x i8> %b)
|
||
|
ret <vscale x 16 x i8> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||
|
; CHECK-LABEL: umulh_i16:
|
||
|
; CHECK: umulh z0.h, p0/m, z0.h, z1.h
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a,
|
||
|
<vscale x 8 x i16> %b)
|
||
|
ret <vscale x 8 x i16> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||
|
; CHECK-LABEL: umulh_i32:
|
||
|
; CHECK: umulh z0.s, p0/m, z0.s, z1.s
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a,
|
||
|
<vscale x 4 x i32> %b)
|
||
|
ret <vscale x 4 x i32> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||
|
; CHECK-LABEL: umulh_i64:
|
||
|
; CHECK: umulh z0.d, p0/m, z0.d, z1.d
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a,
|
||
|
<vscale x 2 x i64> %b)
|
||
|
ret <vscale x 2 x i64> %out
|
||
|
}
|
||
|
|
||
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
||
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
||
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||
|
declare <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
||
|
declare <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|