; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -tail-predication=force-enabled-no-reductions -o - %s | FileCheck %s define arm_aapcs_vfpcc <16 x i8> @vcmp_vpst_combination(<16 x i8>* %pSrc, i16 zeroext %blockSize, i8* nocapture %pResult, i32* nocapture %pIndex) { ; CHECK-LABEL: vcmp_vpst_combination: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: vmov.i8 q0, #0x7f ; CHECK-NEXT: dlstp.8 lr, r1 ; CHECK-NEXT: .LBB0_1: @ %do.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrb.u8 q1, [r0] ; CHECK-NEXT: vpt.s8 ge, q0, q1 ; CHECK-NEXT: vmovt q0, q1 ; CHECK-NEXT: letp lr, .LBB0_1 ; CHECK-NEXT: @ %bb.2: @ %do.end ; CHECK-NEXT: pop {r7, pc} entry: %conv = zext i16 %blockSize to i32 %0 = tail call { <16 x i8>, i32 } @llvm.arm.mve.vidup.v16i8(i32 0, i32 1) %1 = extractvalue { <16 x i8>, i32 } %0, 0 br label %do.body do.body: ; preds = %do.body, %entry %indexVec.0 = phi <16 x i8> [ %1, %entry ], [ %add, %do.body ] %curExtremIdxVec.0 = phi <16 x i8> [ zeroinitializer, %entry ], [ %6, %do.body ] %curExtremValVec.0 = phi <16 x i8> [ , %entry ], [ %6, %do.body ] %blkCnt.0 = phi i32 [ %conv, %entry ], [ %sub2, %do.body ] %2 = tail call <16 x i1> @llvm.arm.mve.vctp8(i32 %blkCnt.0) %3 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %pSrc, i32 1, <16 x i1> %2, <16 x i8> zeroinitializer) %4 = icmp sle <16 x i8> %3, %curExtremValVec.0 %5 = and <16 x i1> %4, %2 %6 = tail call <16 x i8> @llvm.arm.mve.orr.predicated.v16i8.v16i1(<16 x i8> %3, <16 x i8> %3, <16 x i1> %5, <16 x i8> %curExtremValVec.0) %add = add <16 x i8> %indexVec.0, %sub2 = add nsw i32 %blkCnt.0, -16 %cmp = icmp sgt i32 %blkCnt.0, 16 br i1 %cmp, label %do.body, label %do.end do.end: ; preds = %do.body ret <16 x i8> %6 } define i32 @vcmp_new_vpst_combination(i32 %len, i32* nocapture readonly %arr) { ; CHECK-LABEL: vcmp_new_vpst_combination: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: cmp r0, #1 ; CHECK-NEXT: blt .LBB1_4 ; CHECK-NEXT: @ %bb.1: @ %vector.ph ; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: vmov.i32 q1, #0x1 ; CHECK-NEXT: movs r2, #0 ; CHECK-NEXT: dlstp.32 lr, r0 ; CHECK-NEXT: .LBB1_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q2, [r1], #16 ; CHECK-NEXT: vcmp.i32 ne, q2, zr ; CHECK-NEXT: vmov q2, q0 ; CHECK-NEXT: vpst ; CHECK-NEXT: vmovt q2, q1 ; CHECK-NEXT: vaddva.u32 r2, q2 ; CHECK-NEXT: letp lr, .LBB1_2 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup ; CHECK-NEXT: mov r0, r2 ; CHECK-NEXT: pop {r7, pc} ; CHECK-NEXT: .LBB1_4: ; CHECK-NEXT: movs r2, #0 ; CHECK-NEXT: mov r0, r2 ; CHECK-NEXT: pop {r7, pc} entry: %cmp7 = icmp sgt i32 %len, 0 br i1 %cmp7, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %entry %n.rnd.up = add i32 %len, 3 %n.vec = and i32 %n.rnd.up, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.phi = phi i32 [ 0, %vector.ph ], [ %5, %vector.body ] %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %len) %0 = getelementptr inbounds i32, i32* %arr, i32 %index %1 = bitcast i32* %0 to <4 x i32>* %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) %2 = icmp ne <4 x i32> %wide.masked.load, zeroinitializer %narrow = and <4 x i1> %active.lane.mask, %2 %3 = zext <4 x i1> %narrow to <4 x i32> %4 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %3) %5 = add i32 %4, %vec.phi %index.next = add i32 %index, 4 %6 = icmp eq i32 %index.next, %n.vec br i1 %6, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body, %entry %count.0.lcssa = phi i32 [ 0, %entry ], [ %5, %vector.body ] ret i32 %count.0.lcssa } declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) declare { <16 x i8>, i32 } @llvm.arm.mve.vidup.v16i8(i32, i32) declare <16 x i1> @llvm.arm.mve.vctp8(i32) declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>) declare <16 x i8> @llvm.arm.mve.orr.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>)