llvm-for-llvmta/test/CodeGen/VE/VELIntrinsics/vdiv.ll

1213 lines
45 KiB
LLVM
Raw Normal View History

2022-04-25 10:02:23 +02:00
; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
;;; Test vector divide intrinsic instructions
;;;
;;; Note:
;;; We test VDIV*vvl, VDIV*vvl_v, VDIV*rvl, VDIV*rvl_v, VDIV*ivl,
;;; VDIV*ivl_v, VDIV*vvml_v, VDIV*rvml_v, VDIV*ivml_v, VDIV*vrl,
;;; VDIV*vrl_v, VDIV*vil, VDIV*vil_v, VDIV*vrml_v, and VDIV*viml_v
;;; instructions.
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvvl(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivul_vvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v0, %v0, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vvvl(<256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivul_vvvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v2, %v0, %v1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vsvl(i64 %0, <256 x double> %1) {
; CHECK-LABEL: vdivul_vsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.l %v0, %s0, %v0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vsvl(i64 %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vsvl(i64, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivul_vsvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.l %v1, %s0, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vsvvl(i64, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vsvl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivul_vsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v0, 8, %v0
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vsvl(i64 8, <256 x double> %0, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivul_vsvvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v1, 8, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivul_vvvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v2, %v0, %v1, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivul_vsvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.l %v1, %s0, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivul_vsvmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v1, 8, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvsl(<256 x double> %0, i64 %1) {
; CHECK-LABEL: vdivul_vvsl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.l %v0, %v0, %s0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvsl(<256 x double> %0, i64 %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vvsl(<256 x double>, i64, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
; CHECK-LABEL: vdivul_vvsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.l %v1, %v0, %s0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vvsvl(<256 x double>, i64, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvsl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivul_vvsl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v0, %v0, 8
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvsl(<256 x double> %0, i64 8, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivul_vvsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v1, %v0, 8
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivul_vvsmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.l %v1, %v0, %s0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivul.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivul_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivul_vvsmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.l %v1, %v0, 8, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivul.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvvl(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivuw_vvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v0, %v0, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vvvl(<256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivuw_vvvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v2, %v0, %v1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vsvl(i32 signext %0, <256 x double> %1) {
; CHECK-LABEL: vdivuw_vsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.w %v0, %s0, %v0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vsvl(i32 %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vsvl(i32, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivuw_vsvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.w %v1, %s0, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vsvvl(i32, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vsvl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivuw_vsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v0, 8, %v0
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vsvl(i32 8, <256 x double> %0, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivuw_vsvvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v1, 8, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivuw_vvvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v2, %v0, %v1, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivuw_vsvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.w %v1, %s0, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivuw_vsvmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v1, 8, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvsl(<256 x double> %0, i32 signext %1) {
; CHECK-LABEL: vdivuw_vvsl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.w %v0, %v0, %s0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvsl(<256 x double> %0, i32 %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vvsl(<256 x double>, i32, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
; CHECK-LABEL: vdivuw_vvsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.w %v1, %v0, %s0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vvsvl(<256 x double>, i32, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvsl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivuw_vvsl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v0, %v0, 8
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvsl(<256 x double> %0, i32 8, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivuw_vvsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v1, %v0, 8
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivuw_vvsmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivu.w %v1, %v0, %s0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivuw.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivuw_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivuw_vvsmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivu.w %v1, %v0, 8, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivuw.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvvl(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivswsx_vvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v0, %v0, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vvvl(<256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswsx_vvvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v2, %v0, %v1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vsvl(i32 signext %0, <256 x double> %1) {
; CHECK-LABEL: vdivswsx_vsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.sx %v0, %s0, %v0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vsvl(i32 %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vsvl(i32, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswsx_vsvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.sx %v1, %s0, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vsvvl(i32, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vsvl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivswsx_vsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v0, 8, %v0
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vsvl(i32 8, <256 x double> %0, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivswsx_vsvvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v1, 8, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivswsx_vvvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v2, %v0, %v1, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivswsx_vsvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.sx %v1, %s0, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswsx_vsvmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v1, 8, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvsl(<256 x double> %0, i32 signext %1) {
; CHECK-LABEL: vdivswsx_vvsl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.sx %v0, %v0, %s0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvsl(<256 x double> %0, i32 %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vvsl(<256 x double>, i32, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
; CHECK-LABEL: vdivswsx_vvsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.sx %v1, %v0, %s0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vvsvl(<256 x double>, i32, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvsl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivswsx_vvsl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v0, %v0, 8
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvsl(<256 x double> %0, i32 8, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivswsx_vvsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v1, %v0, 8
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivswsx_vvsmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.sx %v1, %v0, %s0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswsx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswsx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswsx_vvsmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.sx %v1, %v0, 8, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswsx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvvl(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivswzx_vvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v0, %v0, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vvvl(<256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswzx_vvvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v2, %v0, %v1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vsvl(i32 signext %0, <256 x double> %1) {
; CHECK-LABEL: vdivswzx_vsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.zx %v0, %s0, %v0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vsvl(i32 %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vsvl(i32, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswzx_vsvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.zx %v1, %s0, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vsvvl(i32, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vsvl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivswzx_vsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v0, 8, %v0
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vsvl(i32 8, <256 x double> %0, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivswzx_vsvvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v1, 8, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivswzx_vvvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v2, %v0, %v1, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivswzx_vsvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.zx %v1, %s0, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswzx_vsvmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v1, 8, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvsl(<256 x double> %0, i32 signext %1) {
; CHECK-LABEL: vdivswzx_vvsl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.zx %v0, %v0, %s0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvsl(<256 x double> %0, i32 %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vvsl(<256 x double>, i32, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
; CHECK-LABEL: vdivswzx_vvsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.zx %v1, %v0, %s0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vvsvl(<256 x double>, i32, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvsl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivswzx_vvsl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v0, %v0, 8
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvsl(<256 x double> %0, i32 8, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivswzx_vvsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v1, %v0, 8
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivswzx_vvsmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.w.zx %v1, %v0, %s0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivswzx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivswzx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivswzx_vvsmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.w.zx %v1, %v0, 8, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivswzx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvvl(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivsl_vvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v0, %v0, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vvvl(<256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivsl_vvvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v2, %v0, %v1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vsvl(i64 %0, <256 x double> %1) {
; CHECK-LABEL: vdivsl_vsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.l %v0, %s0, %v0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vsvl(i64 %0, <256 x double> %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vsvl(i64, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) {
; CHECK-LABEL: vdivsl_vsvvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.l %v1, %s0, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vsvvl(i64, <256 x double>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vsvl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivsl_vsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v0, 8, %v0
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vsvl(i64 8, <256 x double> %0, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vsvvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivsl_vsvvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v1, 8, %v0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivsl_vvvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v2, %v0, %v1, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v2
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivsl_vsvmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.l %v1, %s0, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivsl_vsvmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v1, 8, %v0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvsl(<256 x double> %0, i64 %1) {
; CHECK-LABEL: vdivsl_vvsl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 256
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.l %v0, %v0, %s0
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvsl(<256 x double> %0, i64 %1, i32 256)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vvsl(<256 x double>, i64, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
; CHECK-LABEL: vdivsl_vvsvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.l %v1, %v0, %s0
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vvsvl(<256 x double>, i64, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvsl_imm(<256 x double> %0) {
; CHECK-LABEL: vdivsl_vvsl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 256
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v0, %v0, 8
; CHECK-NEXT: b.l.t (, %s10)
%2 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvsl(<256 x double> %0, i64 8, i32 256)
ret <256 x double> %2
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
; CHECK-LABEL: vdivsl_vvsvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v1, %v0, 8
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%3 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
ret <256 x double> %3
}
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
; CHECK-LABEL: vdivsl_vvsmvl:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s1, 128
; CHECK-NEXT: lvl %s1
; CHECK-NEXT: vdivs.l %v1, %v0, %s0, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%5 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
ret <256 x double> %5
}
; Function Attrs: nounwind readnone
declare <256 x double> @llvm.ve.vl.vdivsl.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
; Function Attrs: nounwind readnone
define fastcc <256 x double> @vdivsl_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
; CHECK-LABEL: vdivsl_vvsmvl_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: lea %s0, 128
; CHECK-NEXT: lvl %s0
; CHECK-NEXT: vdivs.l %v1, %v0, 8, %vm1
; CHECK-NEXT: lea %s16, 256
; CHECK-NEXT: lvl %s16
; CHECK-NEXT: vor %v0, (0)1, %v1
; CHECK-NEXT: b.l.t (, %s10)
%4 = tail call fast <256 x double> @llvm.ve.vl.vdivsl.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
ret <256 x double> %4
}