; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s ;;; Test vector xor intrinsic instructions ;;; ;;; Note: ;;; We test VXOR*vvl, VXOR*vvl_v, VXOR*rvl, VXOR*rvl_v, VXOR*vvml_v, ;;; VXOR*rvml_v, PVXOR*vvl, PVXOR*vvl_v, PVXOR*rvl, PVXOR*rvl_v, PVXOR*vvml_v, ;;; and PVXOR*rvml_v instructions. ; Function Attrs: nounwind readnone define fastcc <256 x double> @vxor_vvvl(<256 x double> %0, <256 x double> %1) { ; CHECK-LABEL: vxor_vvvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s0, 256 ; CHECK-NEXT: lvl %s0 ; CHECK-NEXT: vxor %v0, %v0, %v1 ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vxor.vvvl(<256 x double> %0, <256 x double> %1, i32 256) ret <256 x double> %3 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.vxor.vvvl(<256 x double>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @vxor_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { ; CHECK-LABEL: vxor_vvvvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s0, 128 ; CHECK-NEXT: lvl %s0 ; CHECK-NEXT: vxor %v2, %v0, %v1 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v2 ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vxor.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) ret <256 x double> %4 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.vxor.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @vxor_vsvl(i64 %0, <256 x double> %1) { ; CHECK-LABEL: vxor_vsvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s1, 256 ; CHECK-NEXT: lvl %s1 ; CHECK-NEXT: vxor %v0, %s0, %v0 ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.vxor.vsvl(i64 %0, <256 x double> %1, i32 256) ret <256 x double> %3 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.vxor.vsvl(i64, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @vxor_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { ; CHECK-LABEL: vxor_vsvvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s1, 128 ; CHECK-NEXT: lvl %s1 ; CHECK-NEXT: vxor %v1, %s0, %v0 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v1 ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.vxor.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) ret <256 x double> %4 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.vxor.vsvvl(i64, <256 x double>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @vxor_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { ; CHECK-LABEL: vxor_vvvmvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s0, 128 ; CHECK-NEXT: lvl %s0 ; CHECK-NEXT: vxor %v2, %v0, %v1, %vm1 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v2 ; CHECK-NEXT: b.l.t (, %s10) %5 = tail call fast <256 x double> @llvm.ve.vl.vxor.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) ret <256 x double> %5 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.vxor.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @vxor_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { ; CHECK-LABEL: vxor_vsvmvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s1, 128 ; CHECK-NEXT: lvl %s1 ; CHECK-NEXT: vxor %v1, %s0, %v0, %vm1 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v1 ; CHECK-NEXT: b.l.t (, %s10) %5 = tail call fast <256 x double> @llvm.ve.vl.vxor.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) ret <256 x double> %5 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.vxor.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @pvxor_vvvl(<256 x double> %0, <256 x double> %1) { ; CHECK-LABEL: pvxor_vvvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s0, 256 ; CHECK-NEXT: lvl %s0 ; CHECK-NEXT: pvxor %v0, %v0, %v1 ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vvvl(<256 x double> %0, <256 x double> %1, i32 256) ret <256 x double> %3 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.pvxor.vvvl(<256 x double>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @pvxor_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { ; CHECK-LABEL: pvxor_vvvvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s0, 128 ; CHECK-NEXT: lvl %s0 ; CHECK-NEXT: pvxor %v2, %v0, %v1 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v2 ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) ret <256 x double> %4 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.pvxor.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @pvxor_vsvl(i64 %0, <256 x double> %1) { ; CHECK-LABEL: pvxor_vsvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s1, 256 ; CHECK-NEXT: lvl %s1 ; CHECK-NEXT: pvxor %v0, %s0, %v0 ; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vsvl(i64 %0, <256 x double> %1, i32 256) ret <256 x double> %3 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.pvxor.vsvl(i64, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @pvxor_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { ; CHECK-LABEL: pvxor_vsvvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s1, 128 ; CHECK-NEXT: lvl %s1 ; CHECK-NEXT: pvxor %v1, %s0, %v0 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v1 ; CHECK-NEXT: b.l.t (, %s10) %4 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) ret <256 x double> %4 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.pvxor.vsvvl(i64, <256 x double>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @pvxor_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { ; CHECK-LABEL: pvxor_vvvMvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s0, 128 ; CHECK-NEXT: lvl %s0 ; CHECK-NEXT: pvxor %v2, %v0, %v1, %vm2 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v2 ; CHECK-NEXT: b.l.t (, %s10) %5 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) ret <256 x double> %5 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.pvxor.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) ; Function Attrs: nounwind readnone define fastcc <256 x double> @pvxor_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { ; CHECK-LABEL: pvxor_vsvMvl: ; CHECK: # %bb.0: ; CHECK-NEXT: lea %s1, 128 ; CHECK-NEXT: lvl %s1 ; CHECK-NEXT: pvxor %v1, %s0, %v0, %vm2 ; CHECK-NEXT: lea %s16, 256 ; CHECK-NEXT: lvl %s16 ; CHECK-NEXT: vor %v0, (0)1, %v1 ; CHECK-NEXT: b.l.t (, %s10) %5 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) ret <256 x double> %5 } ; Function Attrs: nounwind readnone declare <256 x double> @llvm.ve.vl.pvxor.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32)