; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst %s -o - | FileCheck %s define arm_aapcs_vfpcc <4 x i32> @gather_inc_mini_4i32(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, <4 x i32> %offs) { ; CHECK-LABEL: gather_inc_mini_4i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmov.i32 q1, #0x4 ; CHECK-NEXT: vadd.i32 q1, q0, q1 ; CHECK-NEXT: vldrw.u32 q0, [r0, q1, uxtw #2] ; CHECK-NEXT: bx lr %1 = add <4 x i32> %offs, %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> , <4 x i32> undef) ret <4 x i32> %wide.masked.gather } define arm_aapcs_vfpcc <4 x i32> @gather_inc_minipred_4i32(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, <4 x i32> %offs) { ; CHECK-LABEL: gather_inc_minipred_4i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmov.i32 q1, #0x4 ; CHECK-NEXT: movw r1, #3855 ; CHECK-NEXT: vadd.i32 q1, q0, q1 ; CHECK-NEXT: vmsr p0, r1 ; CHECK-NEXT: vpst ; CHECK-NEXT: vldrwt.u32 q0, [r0, q1, uxtw #2] ; CHECK-NEXT: bx lr %1 = add <4 x i32> %offs, %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> , <4 x i32> undef) ret <4 x i32> %wide.masked.gather } define arm_aapcs_vfpcc <8 x i16> @gather_inc_mini_8i16(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, <8 x i32> %offs) { ; CHECK-LABEL: gather_inc_mini_8i16: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vmov.i32 q2, #0x10 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vshl.i32 q1, q1, #1 ; CHECK-NEXT: vadd.i32 q0, q0, q2 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: vadd.i32 q1, q1, q2 ; CHECK-NEXT: vmov r2, s3 ; CHECK-NEXT: vmov r3, s0 ; CHECK-NEXT: vmov r5, s1 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r4, s7 ; CHECK-NEXT: ldrh.w r12, [r1] ; CHECK-NEXT: vmov r1, s5 ; CHECK-NEXT: ldrh.w lr, [r2] ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: ldrh r3, [r3] ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: vmov.16 q0[0], r3 ; CHECK-NEXT: ldrh r0, [r0] ; CHECK-NEXT: vmov.16 q0[1], r5 ; CHECK-NEXT: ldrh r4, [r4] ; CHECK-NEXT: vmov.16 q0[2], r12 ; CHECK-NEXT: vmov.16 q0[3], lr ; CHECK-NEXT: vmov.16 q0[4], r0 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: vmov.16 q0[5], r1 ; CHECK-NEXT: vmov.16 q0[6], r2 ; CHECK-NEXT: vmov.16 q0[7], r4 ; CHECK-NEXT: pop {r4, r5, r7, pc} %1 = add <8 x i32> %offs, %2 = getelementptr inbounds i16, i16* %data, <8 x i32> %1 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %2, i32 4, <8 x i1> , <8 x i16> undef) ret <8 x i16> %wide.masked.gather } define arm_aapcs_vfpcc <8 x i16> @gather_inc_minipred_8i16(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, <8 x i32> %offs) { ; CHECK-LABEL: gather_inc_minipred_8i16: ; CHECK: @ %bb.0: ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vmov.i32 q2, #0x10 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vshl.i32 q1, q1, #1 ; CHECK-NEXT: vadd.i32 q0, q0, q2 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: vadd.i32 q1, q1, q2 ; CHECK-NEXT: vmov r3, s2 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: ldrh r3, [r3] ; CHECK-NEXT: vmov.16 q0[0], r1 ; CHECK-NEXT: ldrh r0, [r0] ; CHECK-NEXT: vmov.16 q0[2], r3 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: vmov.16 q0[4], r0 ; CHECK-NEXT: vmov.16 q0[6], r2 ; CHECK-NEXT: bx lr %1 = add <8 x i32> %offs, %2 = getelementptr inbounds i16, i16* %data, <8 x i32> %1 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %2, i32 4, <8 x i1> , <8 x i16> undef) ret <8 x i16> %wide.masked.gather } define arm_aapcs_vfpcc <16 x i8> @gather_inc_mini_16i8(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, <16 x i32> %offs) { ; CHECK-LABEL: gather_inc_mini_16i8: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, lr} ; CHECK-NEXT: push {r4, r5, r6, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov.i32 q4, #0x10 ; CHECK-NEXT: vadd.i32 q3, q3, r0 ; CHECK-NEXT: vadd.i32 q3, q3, q4 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r1, s14 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vadd.i32 q1, q1, q4 ; CHECK-NEXT: vadd.i32 q2, q2, r0 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vadd.i32 q2, q2, q4 ; CHECK-NEXT: vmov r2, s10 ; CHECK-NEXT: vmov r4, s11 ; CHECK-NEXT: ldrb.w r12, [r1] ; CHECK-NEXT: vmov r1, s15 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: ldrb r2, [r2] ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: ldrb.w lr, [r1] ; CHECK-NEXT: vmov r1, s12 ; CHECK-NEXT: ldrb r3, [r1] ; CHECK-NEXT: vmov r1, s13 ; CHECK-NEXT: vadd.i32 q3, q0, q4 ; CHECK-NEXT: vmov r5, s12 ; CHECK-NEXT: vmov r6, s15 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: vmov.8 q0[0], r5 ; CHECK-NEXT: vmov r5, s13 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov.8 q0[1], r5 ; CHECK-NEXT: vmov r5, s14 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov.8 q0[2], r5 ; CHECK-NEXT: vmov r5, s8 ; CHECK-NEXT: vmov.8 q0[3], r6 ; CHECK-NEXT: vmov.8 q0[4], r0 ; CHECK-NEXT: vmov r0, s5 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: vmov.8 q0[5], r0 ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: vmov.8 q0[6], r0 ; CHECK-NEXT: vmov r0, s7 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: vmov.8 q0[7], r0 ; CHECK-NEXT: vmov r0, s9 ; CHECK-NEXT: vmov.8 q0[8], r5 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: vmov.8 q0[9], r0 ; CHECK-NEXT: vmov.8 q0[10], r2 ; CHECK-NEXT: vmov.8 q0[11], r4 ; CHECK-NEXT: vmov.8 q0[12], r3 ; CHECK-NEXT: vmov.8 q0[13], r1 ; CHECK-NEXT: vmov.8 q0[14], r12 ; CHECK-NEXT: vmov.8 q0[15], lr ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r6, pc} %1 = add <16 x i32> %offs, %2 = getelementptr inbounds i8, i8* %data, <16 x i32> %1 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %2, i32 2, <16 x i1> , <16 x i8> undef) ret <16 x i8> %wide.masked.gather } define arm_aapcs_vfpcc <16 x i8> @gather_inc_minipred_16i8(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, <16 x i32> %offs) { ; CHECK-LABEL: gather_inc_minipred_16i8: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov.i32 q4, #0x10 ; CHECK-NEXT: vadd.i32 q2, q2, r0 ; CHECK-NEXT: vadd.i32 q2, q2, q4 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vmov r2, s8 ; CHECK-NEXT: vadd.i32 q1, q1, q4 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vadd.i32 q0, q0, q4 ; CHECK-NEXT: vmov r3, s10 ; CHECK-NEXT: vmov r5, s2 ; CHECK-NEXT: ldrb.w lr, [r2] ; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: ldrb.w r12, [r1] ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: vadd.i32 q1, q3, r0 ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: vadd.i32 q1, q1, q4 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r4, s6 ; CHECK-NEXT: ldrb r2, [r2] ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q0[0], r2 ; CHECK-NEXT: vmov.8 q0[2], r5 ; CHECK-NEXT: vmov.8 q0[4], r12 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: vmov.8 q0[6], r1 ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: vmov.8 q0[8], lr ; CHECK-NEXT: vmov.8 q0[10], r3 ; CHECK-NEXT: vmov.8 q0[12], r0 ; CHECK-NEXT: vmov.8 q0[14], r4 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r7, pc} %1 = add <16 x i32> %offs, %2 = getelementptr inbounds i8, i8* %data, <16 x i32> %1 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %2, i32 2, <16 x i1> , <16 x i8> undef) ret <16 x i8> %wide.masked.gather } define arm_aapcs_vfpcc void @gather_pre_inc(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: gather_pre_inc: ; CHECK: @ %bb.0: @ %vector.ph ; CHECK-NEXT: adr r3, .LCPI6_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB6_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q1, [q0, #96]! ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vstrb.8 q1, [r1], #16 ; CHECK-NEXT: bne .LBB6_1 ; CHECK-NEXT: @ %bb.2: @ %end ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: .LCPI6_0: ; CHECK-NEXT: .long 4294967224 @ 0xffffffb8 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8 ; CHECK-NEXT: .long 0 @ 0x0 vector.ph: ; preds = %for.body.preheader %ind.end = shl i32 %n.vec, 1 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul <4 x i32> %vec.ind, %1 = add <4 x i32> %0, %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> , <4 x i32> undef) %3 = getelementptr inbounds i32, i32* %dst, i32 %index %4 = bitcast i32* %3 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %4, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %5 = icmp eq i32 %index.next, %n.vec br i1 %5, label %end, label %vector.body end: ret void; } define arm_aapcs_vfpcc void @gather_post_inc(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec43) { ; CHECK-LABEL: gather_post_inc: ; CHECK: @ %bb.0: @ %vector.ph41 ; CHECK-NEXT: adr r3, .LCPI7_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB7_1: @ %vector.body39 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q1, [q0, #96]! ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vstrb.8 q1, [r1], #16 ; CHECK-NEXT: bne .LBB7_1 ; CHECK-NEXT: @ %bb.2: @ %end ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: .LCPI7_0: ; CHECK-NEXT: .long 4294967200 @ 0xffffffa0 ; CHECK-NEXT: .long 4294967224 @ 0xffffffb8 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8 vector.ph41: ; preds = %for.body6.preheader %ind.end47 = shl i32 %n.vec43, 1 br label %vector.body39 vector.body39: ; preds = %vector.body39, %vector.ph41 %index44 = phi i32 [ 0, %vector.ph41 ], [ %index.next45, %vector.body39 ] %vec.ind50 = phi <4 x i32> [ , %vector.ph41 ], [ %vec.ind.next51, %vector.body39 ] %0 = mul nuw nsw <4 x i32> %vec.ind50, %1 = getelementptr inbounds i32, i32* %data, <4 x i32> %0 %wide.masked.gather55 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> , <4 x i32> undef) %2 = getelementptr inbounds i32, i32* %dst, i32 %index44 %3 = bitcast i32* %2 to <4 x i32>* store <4 x i32> %wide.masked.gather55, <4 x i32>* %3, align 4 %index.next45 = add i32 %index44, 4 %vec.ind.next51 = add <4 x i32> %vec.ind50, %4 = icmp eq i32 %index.next45, %n.vec43 br i1 %4, label %end, label %vector.body39 end: ret void; } define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v4i32_simple: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: it lt ; CHECK-NEXT: poplt {r4, pc} ; CHECK-NEXT: .LBB8_1: @ %vector.ph.preheader ; CHECK-NEXT: bic r12, r2, #3 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: sub.w lr, r12, #4 ; CHECK-NEXT: add.w r4, r3, lr, lsr #2 ; CHECK-NEXT: adr r3, .LCPI8_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB8_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB8_3 Depth 2 ; CHECK-NEXT: dls lr, r4 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: .LBB8_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB8_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vldrw.u32 q2, [q1, #16]! ; CHECK-NEXT: vstrb.8 q2, [r0], #16 ; CHECK-NEXT: le lr, .LBB8_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB8_2 Depth=1 ; CHECK-NEXT: cmp r12, r2 ; CHECK-NEXT: bne .LBB8_2 ; CHECK-NEXT: @ %bb.5: @ %for.cond.cleanup ; CHECK-NEXT: pop {r4, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI8_0: ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0 ; CHECK-NEXT: .long 4294967284 @ 0xfffffff4 ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8 ; CHECK-NEXT: .long 4294967292 @ 0xfffffffc entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> , <4 x i32> undef) %1 = getelementptr inbounds i32, i32* %dst, i32 %index %2 = bitcast i32* %1 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %3 = icmp eq i32 %index.next, %n.vec br i1 %3, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v4i32_complex(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v4i32_complex: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: blt .LBB9_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: bic r12, r2, #3 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: sub.w lr, r12, #4 ; CHECK-NEXT: adr r4, .LCPI9_1 ; CHECK-NEXT: adr r5, .LCPI9_2 ; CHECK-NEXT: vldrw.u32 q1, [r4] ; CHECK-NEXT: add.w r3, r3, lr, lsr #2 ; CHECK-NEXT: adr.w lr, .LCPI9_0 ; CHECK-NEXT: vldrw.u32 q0, [r5] ; CHECK-NEXT: vldrw.u32 q2, [lr] ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vadd.i32 q2, q2, r0 ; CHECK-NEXT: .LBB9_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB9_3 Depth 2 ; CHECK-NEXT: dls lr, r3 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vmov q3, q1 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vmov q5, q2 ; CHECK-NEXT: .LBB9_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB9_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vldrw.u32 q6, [q5, #48]! ; CHECK-NEXT: vldrw.u32 q7, [q3, #48]! ; CHECK-NEXT: vadd.i32 q6, q7, q6 ; CHECK-NEXT: vldrw.u32 q7, [q4, #48]! ; CHECK-NEXT: vadd.i32 q6, q6, q7 ; CHECK-NEXT: vstrb.8 q6, [r0], #16 ; CHECK-NEXT: le lr, .LBB9_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB9_2 Depth=1 ; CHECK-NEXT: cmp r12, r2 ; CHECK-NEXT: bne .LBB9_2 ; CHECK-NEXT: .LBB9_5: @ %for.cond.cleanup ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: pop {r4, r5, r7, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI9_0: ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967260 @ 0xffffffdc ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8 ; CHECK-NEXT: .long 4294967284 @ 0xfffffff4 ; CHECK-NEXT: .LCPI9_1: ; CHECK-NEXT: .long 4294967252 @ 0xffffffd4 ; CHECK-NEXT: .long 4294967264 @ 0xffffffe0 ; CHECK-NEXT: .long 4294967276 @ 0xffffffec ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8 ; CHECK-NEXT: .LCPI9_2: ; CHECK-NEXT: .long 4294967256 @ 0xffffffd8 ; CHECK-NEXT: .long 4294967268 @ 0xffffffe4 ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0 ; CHECK-NEXT: .long 4294967292 @ 0xfffffffc entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul nuw nsw <4 x i32> %vec.ind, %1 = getelementptr inbounds i32, i32* %data, <4 x i32> %0 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> , <4 x i32> undef) %2 = add nuw nsw <4 x i32> %0, %3 = getelementptr inbounds i32, i32* %data, <4 x i32> %2 %wide.masked.gather24 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> , <4 x i32> undef) %4 = add nuw nsw <4 x i32> %0, %5 = getelementptr inbounds i32, i32* %data, <4 x i32> %4 %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %5, i32 4, <4 x i1> , <4 x i32> undef) %6 = add nsw <4 x i32> %wide.masked.gather24, %wide.masked.gather %7 = add nsw <4 x i32> %6, %wide.masked.gather25 %8 = getelementptr inbounds i32, i32* %dst, i32 %index %9 = bitcast i32* %8 to <4 x i32>* store <4 x i32> %7, <4 x i32>* %9, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %10 = icmp eq i32 %index.next, %n.vec br i1 %10, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v4i32_large(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v4i32_large: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: it lt ; CHECK-NEXT: poplt {r4, pc} ; CHECK-NEXT: .LBB10_1: @ %vector.ph.preheader ; CHECK-NEXT: bic r12, r2, #3 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: sub.w lr, r12, #4 ; CHECK-NEXT: add.w r4, r3, lr, lsr #2 ; CHECK-NEXT: adr r3, .LCPI10_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB10_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB10_3 Depth 2 ; CHECK-NEXT: dls lr, r4 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: .LBB10_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB10_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vldrw.u32 q2, [q1, #508]! ; CHECK-NEXT: vstrb.8 q2, [r0], #16 ; CHECK-NEXT: le lr, .LBB10_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB10_2 Depth=1 ; CHECK-NEXT: cmp r12, r2 ; CHECK-NEXT: bne .LBB10_2 ; CHECK-NEXT: @ %bb.5: @ %for.cond.cleanup ; CHECK-NEXT: pop {r4, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI10_0: ; CHECK-NEXT: .long 4294966788 @ 0xfffffe04 ; CHECK-NEXT: .long 4294966792 @ 0xfffffe08 ; CHECK-NEXT: .long 4294966796 @ 0xfffffe0c ; CHECK-NEXT: .long 4294966800 @ 0xfffffe10 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> , <4 x i32> undef) %1 = getelementptr inbounds i32, i32* %dst, i32 %index %2 = bitcast i32* %1 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %3 = icmp eq i32 %index.next, %n.vec br i1 %3, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } ; TODO: uneven - I think it's not possible to create such an example, because vec.ind will always be increased by a vector with 4 elements (=> x*4 = even) ; TODO: What is sxth? define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v8i16_simple: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #8 ; CHECK-NEXT: sub sp, #8 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: blt .LBB11_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: bic r8, r2, #7 ; CHECK-NEXT: movs r5, #1 ; CHECK-NEXT: sub.w r6, r8, #8 ; CHECK-NEXT: vmov.i16 q1, #0x8 ; CHECK-NEXT: add.w r1, r5, r6, lsr #3 ; CHECK-NEXT: adr r6, .LCPI11_0 ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: str r1, [sp] @ 4-byte Spill ; CHECK-NEXT: .LBB11_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB11_3 Depth 2 ; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload ; CHECK-NEXT: vmov q2, q0 ; CHECK-NEXT: dls lr, r1 ; CHECK-NEXT: ldr r4, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: .LBB11_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB11_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vmov.u16 r7, q2[6] ; CHECK-NEXT: vmov.u16 r3, q2[4] ; CHECK-NEXT: vmov q4[2], q4[0], r3, r7 ; CHECK-NEXT: vmov.u16 r3, q2[7] ; CHECK-NEXT: vmov.u16 r7, q2[5] ; CHECK-NEXT: vmov.u16 r5, q2[2] ; CHECK-NEXT: vmov q4[3], q4[1], r7, r3 ; CHECK-NEXT: vmov.u16 r6, q2[0] ; CHECK-NEXT: vmovlb.s16 q4, q4 ; CHECK-NEXT: vmov q3[2], q3[0], r6, r5 ; CHECK-NEXT: vshl.i32 q4, q4, #1 ; CHECK-NEXT: vmov.u16 r5, q2[3] ; CHECK-NEXT: vadd.i32 q4, q4, r0 ; CHECK-NEXT: vmov.u16 r6, q2[1] ; CHECK-NEXT: vmov r3, s16 ; CHECK-NEXT: vmov q3[3], q3[1], r6, r5 ; CHECK-NEXT: vmovlb.s16 q3, q3 ; CHECK-NEXT: vmov r7, s17 ; CHECK-NEXT: vshl.i32 q3, q3, #1 ; CHECK-NEXT: vadd.i16 q2, q2, q1 ; CHECK-NEXT: vadd.i32 q3, q3, r0 ; CHECK-NEXT: vmov r5, s15 ; CHECK-NEXT: vmov r6, s14 ; CHECK-NEXT: vmov r12, s13 ; CHECK-NEXT: ldrh.w r11, [r3] ; CHECK-NEXT: vmov r3, s12 ; CHECK-NEXT: ldrh r7, [r7] ; CHECK-NEXT: ldrh.w r9, [r5] ; CHECK-NEXT: vmov r5, s18 ; CHECK-NEXT: ldrh.w r10, [r6] ; CHECK-NEXT: vmov r6, s19 ; CHECK-NEXT: ldrh.w r1, [r12] ; CHECK-NEXT: ldrh r3, [r3] ; CHECK-NEXT: vmov.16 q3[0], r3 ; CHECK-NEXT: vmov.16 q3[1], r1 ; CHECK-NEXT: vmov.16 q3[2], r10 ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: vmov.16 q3[3], r9 ; CHECK-NEXT: ldrh r6, [r6] ; CHECK-NEXT: vmov.16 q3[4], r11 ; CHECK-NEXT: vmov.16 q3[5], r7 ; CHECK-NEXT: vmov.16 q3[6], r5 ; CHECK-NEXT: vmov.16 q3[7], r6 ; CHECK-NEXT: vstrb.8 q3, [r4], #16 ; CHECK-NEXT: le lr, .LBB11_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB11_2 Depth=1 ; CHECK-NEXT: cmp r8, r2 ; CHECK-NEXT: bne .LBB11_2 ; CHECK-NEXT: .LBB11_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI11_0: ; CHECK-NEXT: .short 0 @ 0x0 ; CHECK-NEXT: .short 1 @ 0x1 ; CHECK-NEXT: .short 2 @ 0x2 ; CHECK-NEXT: .short 3 @ 0x3 ; CHECK-NEXT: .short 4 @ 0x4 ; CHECK-NEXT: .short 5 @ 0x5 ; CHECK-NEXT: .short 6 @ 0x6 ; CHECK-NEXT: .short 7 @ 0x7 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <8 x i16> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i16, i16* %data, <8 x i16> %vec.ind %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %0, i32 2, <8 x i1> , <8 x i16> undef) %1 = getelementptr inbounds i16, i16* %dst, i32 %index %2 = bitcast i16* %1 to <8 x i16>* store <8 x i16> %wide.masked.gather, <8 x i16>* %2, align 2 %index.next = add i32 %index, 8 %vec.ind.next = add <8 x i16> %vec.ind, %3 = icmp eq i32 %index.next, %n.vec br i1 %3, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } ; TODO: This looks absolutely terrifying :( define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v8i16_complex: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #104 ; CHECK-NEXT: sub sp, #104 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: str r1, [sp, #60] @ 4-byte Spill ; CHECK-NEXT: blt.w .LBB12_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: bic r1, r2, #7 ; CHECK-NEXT: adr r6, .LCPI12_2 ; CHECK-NEXT: sub.w r3, r1, #8 ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: str r1, [sp, #56] @ 4-byte Spill ; CHECK-NEXT: movs r7, #1 ; CHECK-NEXT: add.w r1, r7, r3, lsr #3 ; CHECK-NEXT: adr r3, .LCPI12_0 ; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: adr r7, .LCPI12_1 ; CHECK-NEXT: vmov.i16 q3, #0x18 ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r7] ; CHECK-NEXT: str r1, [sp, #52] @ 4-byte Spill ; CHECK-NEXT: vstrw.32 q3, [sp, #64] @ 16-byte Spill ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill ; CHECK-NEXT: .LBB12_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB12_3 Depth 2 ; CHECK-NEXT: ldr r1, [sp, #52] @ 4-byte Reload ; CHECK-NEXT: dls lr, r1 ; CHECK-NEXT: ldr r4, [sp, #60] @ 4-byte Reload ; CHECK-NEXT: vldrw.u32 q7, [sp, #16] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q5, [sp, #32] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q6, [sp] @ 16-byte Reload ; CHECK-NEXT: .LBB12_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB12_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vmov.u16 r3, q5[2] ; CHECK-NEXT: vmov.u16 r5, q5[0] ; CHECK-NEXT: vmov q0[2], q0[0], r5, r3 ; CHECK-NEXT: vmov.u16 r3, q5[3] ; CHECK-NEXT: vmov.u16 r5, q5[1] ; CHECK-NEXT: vmov.u16 r7, q7[6] ; CHECK-NEXT: vmov q0[3], q0[1], r5, r3 ; CHECK-NEXT: vmov.u16 r5, q5[4] ; CHECK-NEXT: vmovlb.s16 q0, q0 ; CHECK-NEXT: vmov.u16 r12, q7[4] ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vmov.u16 r1, q7[5] ; CHECK-NEXT: vadd.i32 q2, q0, r0 ; CHECK-NEXT: vmov r3, s10 ; CHECK-NEXT: vmov r6, s11 ; CHECK-NEXT: ldrh.w r9, [r3] ; CHECK-NEXT: vmov.u16 r3, q5[6] ; CHECK-NEXT: vmov q0[2], q0[0], r5, r3 ; CHECK-NEXT: vmov.u16 r3, q5[7] ; CHECK-NEXT: vmov.u16 r5, q5[5] ; CHECK-NEXT: ldrh r6, [r6] ; CHECK-NEXT: vmov q0[3], q0[1], r5, r3 ; CHECK-NEXT: vmovlb.s16 q0, q0 ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r3, s0 ; CHECK-NEXT: vmov r5, s3 ; CHECK-NEXT: ldrh.w r10, [r3] ; CHECK-NEXT: vmov r3, s1 ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: ldrh.w r11, [r3] ; CHECK-NEXT: vmov r3, s2 ; CHECK-NEXT: vmov q0[2], q0[0], r12, r7 ; CHECK-NEXT: vmov.u16 r7, q7[7] ; CHECK-NEXT: vmov q0[3], q0[1], r1, r7 ; CHECK-NEXT: vmovlb.s16 q0, q0 ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: vstrw.32 q0, [sp, #80] @ 16-byte Spill ; CHECK-NEXT: ldrh.w r8, [r3] ; CHECK-NEXT: vmov.u16 r3, q6[0] ; CHECK-NEXT: ldrh r7, [r1] ; CHECK-NEXT: vmov.u16 r1, q6[2] ; CHECK-NEXT: vmov q1[2], q1[0], r3, r1 ; CHECK-NEXT: vmov.u16 r1, q6[3] ; CHECK-NEXT: vmov.u16 r3, q6[1] ; CHECK-NEXT: vmov q1[3], q1[1], r3, r1 ; CHECK-NEXT: vmov.u16 r1, q6[6] ; CHECK-NEXT: vmov.u16 r3, q6[4] ; CHECK-NEXT: vmovlb.s16 q1, q1 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r1 ; CHECK-NEXT: vmov.u16 r1, q6[7] ; CHECK-NEXT: vmov.u16 r3, q6[5] ; CHECK-NEXT: vshl.i32 q1, q1, #1 ; CHECK-NEXT: vmov q0[3], q0[1], r3, r1 ; CHECK-NEXT: vmov.u16 r1, q7[2] ; CHECK-NEXT: vmov.u16 r3, q7[0] ; CHECK-NEXT: vadd.i32 q4, q1, r0 ; CHECK-NEXT: vmov q3[2], q3[0], r3, r1 ; CHECK-NEXT: vmov.u16 r1, q7[3] ; CHECK-NEXT: vmov.u16 r3, q7[1] ; CHECK-NEXT: vmovlb.s16 q0, q0 ; CHECK-NEXT: vmov q3[3], q3[1], r3, r1 ; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vmovlb.s16 q3, q3 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vshl.i32 q3, q3, #1 ; CHECK-NEXT: vadd.i32 q3, q3, r0 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q1[0], r1 ; CHECK-NEXT: vmov r1, s9 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q1[1], r1 ; CHECK-NEXT: vmov r1, s16 ; CHECK-NEXT: vmov.16 q1[2], r9 ; CHECK-NEXT: vmov.16 q1[3], r6 ; CHECK-NEXT: vmov.16 q1[4], r10 ; CHECK-NEXT: vmov.16 q1[5], r11 ; CHECK-NEXT: vmov.16 q1[6], r8 ; CHECK-NEXT: vmov.16 q1[7], r5 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[0], r1 ; CHECK-NEXT: vmov r1, s17 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[1], r1 ; CHECK-NEXT: vmov r1, s18 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[2], r1 ; CHECK-NEXT: vmov r1, s19 ; CHECK-NEXT: vldrw.u32 q4, [sp, #80] @ 16-byte Reload ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[3], r1 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[4], r1 ; CHECK-NEXT: vmov r1, s1 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[5], r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[6], r1 ; CHECK-NEXT: vmov r1, s3 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q2[7], r1 ; CHECK-NEXT: vmov r1, s12 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[0], r1 ; CHECK-NEXT: vmov r1, s13 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[1], r1 ; CHECK-NEXT: vmov r1, s14 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[2], r1 ; CHECK-NEXT: vmov r1, s15 ; CHECK-NEXT: vldrw.u32 q3, [sp, #64] @ 16-byte Reload ; CHECK-NEXT: vadd.i16 q6, q6, q3 ; CHECK-NEXT: vadd.i16 q5, q5, q3 ; CHECK-NEXT: vadd.i16 q7, q7, q3 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[3], r1 ; CHECK-NEXT: vmov r1, s16 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[4], r1 ; CHECK-NEXT: vmov r1, s17 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[5], r1 ; CHECK-NEXT: vmov r1, s19 ; CHECK-NEXT: vmov.16 q0[6], r7 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[7], r1 ; CHECK-NEXT: vadd.i16 q0, q0, q2 ; CHECK-NEXT: vadd.i16 q0, q0, q1 ; CHECK-NEXT: vstrb.8 q0, [r4], #16 ; CHECK-NEXT: le lr, .LBB12_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB12_2 Depth=1 ; CHECK-NEXT: ldr r1, [sp, #56] @ 4-byte Reload ; CHECK-NEXT: cmp r1, r2 ; CHECK-NEXT: bne.w .LBB12_2 ; CHECK-NEXT: .LBB12_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #104 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI12_0: ; CHECK-NEXT: .short 1 @ 0x1 ; CHECK-NEXT: .short 4 @ 0x4 ; CHECK-NEXT: .short 7 @ 0x7 ; CHECK-NEXT: .short 10 @ 0xa ; CHECK-NEXT: .short 13 @ 0xd ; CHECK-NEXT: .short 16 @ 0x10 ; CHECK-NEXT: .short 19 @ 0x13 ; CHECK-NEXT: .short 22 @ 0x16 ; CHECK-NEXT: .LCPI12_1: ; CHECK-NEXT: .short 0 @ 0x0 ; CHECK-NEXT: .short 3 @ 0x3 ; CHECK-NEXT: .short 6 @ 0x6 ; CHECK-NEXT: .short 9 @ 0x9 ; CHECK-NEXT: .short 12 @ 0xc ; CHECK-NEXT: .short 15 @ 0xf ; CHECK-NEXT: .short 18 @ 0x12 ; CHECK-NEXT: .short 21 @ 0x15 ; CHECK-NEXT: .LCPI12_2: ; CHECK-NEXT: .short 2 @ 0x2 ; CHECK-NEXT: .short 5 @ 0x5 ; CHECK-NEXT: .short 8 @ 0x8 ; CHECK-NEXT: .short 11 @ 0xb ; CHECK-NEXT: .short 14 @ 0xe ; CHECK-NEXT: .short 17 @ 0x11 ; CHECK-NEXT: .short 20 @ 0x14 ; CHECK-NEXT: .short 23 @ 0x17 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <8 x i16> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul nuw nsw <8 x i16> %vec.ind, %1 = getelementptr inbounds i16, i16* %data, <8 x i16> %0 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %1, i32 2, <8 x i1> , <8 x i16> undef) %2 = add nuw nsw <8 x i16> %0, %3 = getelementptr inbounds i16, i16* %data, <8 x i16> %2 %wide.masked.gather24 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %3, i32 2, <8 x i1> , <8 x i16> undef) %4 = add nuw nsw <8 x i16> %0, %5 = getelementptr inbounds i16, i16* %data, <8 x i16> %4 %wide.masked.gather25 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %5, i32 2, <8 x i1> , <8 x i16> undef) %6 = add nsw <8 x i16> %wide.masked.gather24, %wide.masked.gather %7 = add nsw <8 x i16> %6, %wide.masked.gather25 %8 = getelementptr inbounds i16, i16* %dst, i32 %index %9 = bitcast i16* %8 to <8 x i16>* store <8 x i16> %7, <8 x i16>* %9, align 2 %index.next = add i32 %index, 8 %vec.ind.next = add <8 x i16> %vec.ind, %10 = icmp eq i32 %index.next, %n.vec br i1 %10, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v16i8_complex: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #328 ; CHECK-NEXT: sub sp, #328 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: str r1, [sp, #120] @ 4-byte Spill ; CHECK-NEXT: mov r1, r2 ; CHECK-NEXT: str r2, [sp, #124] @ 4-byte Spill ; CHECK-NEXT: blt.w .LBB13_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: ldr r1, [sp, #124] @ 4-byte Reload ; CHECK-NEXT: adr.w r6, .LCPI13_8 ; CHECK-NEXT: adr.w r7, .LCPI13_7 ; CHECK-NEXT: adr.w r3, .LCPI13_6 ; CHECK-NEXT: bic r11, r1, #7 ; CHECK-NEXT: adr r1, .LCPI13_0 ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: adr r1, .LCPI13_1 ; CHECK-NEXT: vmov.i32 q5, #0x30 ; CHECK-NEXT: str.w r11, [sp, #116] @ 4-byte Spill ; CHECK-NEXT: vstrw.32 q0, [sp, #96] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: adr r1, .LCPI13_5 ; CHECK-NEXT: vstrw.32 q0, [sp, #80] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: adr.w r6, .LCPI13_9 ; CHECK-NEXT: vstrw.32 q0, [sp, #64] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r7] ; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill ; CHECK-NEXT: .LBB13_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB13_3 Depth 2 ; CHECK-NEXT: vldrw.u32 q2, [sp, #32] @ 16-byte Reload ; CHECK-NEXT: adr r1, .LCPI13_3 ; CHECK-NEXT: vldrw.u32 q1, [r1] ; CHECK-NEXT: adr r1, .LCPI13_4 ; CHECK-NEXT: vstrw.32 q2, [sp, #288] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q2, [sp, #48] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q3, [r1] ; CHECK-NEXT: adr r1, .LCPI13_2 ; CHECK-NEXT: vstrw.32 q2, [sp, #224] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q2, [sp, #80] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: adr r1, .LCPI13_10 ; CHECK-NEXT: vstrw.32 q2, [sp, #272] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q2, [sp, #64] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q0, [sp, #304] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: adr r1, .LCPI13_11 ; CHECK-NEXT: ldr.w r9, [sp, #120] @ 4-byte Reload ; CHECK-NEXT: vstrw.32 q2, [sp, #208] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q2, [sp, #96] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q6, [r1] ; CHECK-NEXT: vldrw.u32 q7, [sp] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q4, [sp, #16] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q2, [sp, #192] @ 16-byte Spill ; CHECK-NEXT: .LBB13_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB13_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vstrw.32 q3, [sp, #240] @ 16-byte Spill ; CHECK-NEXT: vadd.i32 q3, q6, r0 ; CHECK-NEXT: vmov r1, s15 ; CHECK-NEXT: vstrw.32 q1, [sp, #256] @ 16-byte Spill ; CHECK-NEXT: vadd.i32 q1, q0, r0 ; CHECK-NEXT: vstrw.32 q0, [sp, #176] @ 16-byte Spill ; CHECK-NEXT: vadd.i32 q0, q7, r0 ; CHECK-NEXT: vstrw.32 q6, [sp, #160] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q6, [sp, #256] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q7, [sp, #144] @ 16-byte Spill ; CHECK-NEXT: vmov r5, s7 ; CHECK-NEXT: vldrw.u32 q2, [sp, #240] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q6, q6, r0 ; CHECK-NEXT: vstrw.32 q4, [sp, #128] @ 16-byte Spill ; CHECK-NEXT: subs.w r11, r11, #16 ; CHECK-NEXT: ldrb.w r12, [r1] ; CHECK-NEXT: vmov r1, s5 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb.w lr, [r1] ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: ldrb r6, [r1] ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: ldrb.w r10, [r1] ; CHECK-NEXT: vmov r1, s3 ; CHECK-NEXT: ldrb r4, [r1] ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: ldrb.w r8, [r1] ; CHECK-NEXT: vmov r1, s24 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[0], r1 ; CHECK-NEXT: vmov r1, s25 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[1], r1 ; CHECK-NEXT: vmov r1, s26 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[2], r1 ; CHECK-NEXT: vmov r1, s27 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[3], r1 ; CHECK-NEXT: vmov r1, s12 ; CHECK-NEXT: vmov.8 q7[4], r6 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[0], r1 ; CHECK-NEXT: vmov r1, s13 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[1], r1 ; CHECK-NEXT: vmov r1, s14 ; CHECK-NEXT: vadd.i32 q3, q4, r0 ; CHECK-NEXT: vldrw.u32 q4, [sp, #224] @ 16-byte Reload ; CHECK-NEXT: vmov r2, s12 ; CHECK-NEXT: vmov r3, s15 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[2], r1 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: vmov.8 q6[3], r12 ; CHECK-NEXT: ldrb r2, [r2] ; CHECK-NEXT: vldrw.u32 q1, [sp, #304] @ 16-byte Reload ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: vstrw.32 q1, [sp, #304] @ 16-byte Spill ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[4], r1 ; CHECK-NEXT: vmov r1, s13 ; CHECK-NEXT: vmov.8 q6[5], lr ; CHECK-NEXT: vmov.8 q6[6], r8 ; CHECK-NEXT: vmov.8 q6[7], r5 ; CHECK-NEXT: vmov r5, s1 ; CHECK-NEXT: vldrw.u32 q0, [sp, #288] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q0, [sp, #288] @ 16-byte Spill ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r6, s0 ; CHECK-NEXT: ldrb r7, [r1] ; CHECK-NEXT: vmov r1, s14 ; CHECK-NEXT: vldrw.u32 q3, [sp, #208] @ 16-byte Reload ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov.8 q7[5], r5 ; CHECK-NEXT: vmov r5, s1 ; CHECK-NEXT: vmov.8 q7[6], r10 ; CHECK-NEXT: vmov.8 q7[7], r4 ; CHECK-NEXT: vmov r4, s2 ; CHECK-NEXT: vmov.8 q7[8], r2 ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: vmov.8 q7[9], r7 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[10], r1 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: vmov.8 q7[11], r3 ; CHECK-NEXT: vmov.8 q7[12], r6 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: vmov.8 q7[13], r5 ; CHECK-NEXT: vmov.8 q7[14], r4 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[8], r1 ; CHECK-NEXT: vmov r1, s5 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[9], r1 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[10], r1 ; CHECK-NEXT: vmov r1, s7 ; CHECK-NEXT: vadd.i32 q1, q2, r0 ; CHECK-NEXT: vldrw.u32 q2, [sp, #192] @ 16-byte Reload ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[11], r1 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[12], r1 ; CHECK-NEXT: vmov r1, s5 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[13], r1 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[14], r1 ; CHECK-NEXT: vmov r1, s7 ; CHECK-NEXT: vldrw.u32 q1, [sp, #256] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q1, q1, q5 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[15], r1 ; CHECK-NEXT: vmov r1, s3 ; CHECK-NEXT: vadd.i32 q0, q4, r0 ; CHECK-NEXT: vadd.i32 q4, q4, q5 ; CHECK-NEXT: vmov r2, s1 ; CHECK-NEXT: vstrw.32 q4, [sp, #224] @ 16-byte Spill ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[15], r1 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: vadd.i8 q6, q7, q6 ; CHECK-NEXT: ldrb r2, [r2] ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[0], r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: vmov.8 q7[1], r2 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[2], r1 ; CHECK-NEXT: vmov r1, s3 ; CHECK-NEXT: vldrw.u32 q0, [sp, #272] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q0, [sp, #272] @ 16-byte Spill ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vldrw.u32 q4, [sp, #272] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q4, q4, q5 ; CHECK-NEXT: vstrw.32 q4, [sp, #272] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q4, [sp, #304] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q4, q4, q5 ; CHECK-NEXT: vstrw.32 q4, [sp, #304] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q4, [sp, #128] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q4, q4, q5 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[3], r1 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[4], r1 ; CHECK-NEXT: vmov r1, s1 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[5], r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[6], r1 ; CHECK-NEXT: vmov r1, s3 ; CHECK-NEXT: vadd.i32 q0, q3, r0 ; CHECK-NEXT: vadd.i32 q3, q3, q5 ; CHECK-NEXT: vstrw.32 q3, [sp, #208] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [sp, #240] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q3, q3, q5 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[7], r1 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[8], r1 ; CHECK-NEXT: vmov r1, s1 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[9], r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[10], r1 ; CHECK-NEXT: vmov r1, s3 ; CHECK-NEXT: vadd.i32 q0, q2, r0 ; CHECK-NEXT: vadd.i32 q2, q2, q5 ; CHECK-NEXT: vstrw.32 q2, [sp, #192] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q2, [sp, #288] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q2, q2, q5 ; CHECK-NEXT: vstrw.32 q2, [sp, #288] @ 16-byte Spill ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[11], r1 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[12], r1 ; CHECK-NEXT: vmov r1, s1 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[13], r1 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[14], r1 ; CHECK-NEXT: vmov r1, s3 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[15], r1 ; CHECK-NEXT: vadd.i8 q0, q6, q7 ; CHECK-NEXT: vldrw.u32 q7, [sp, #144] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q6, [sp, #160] @ 16-byte Reload ; CHECK-NEXT: vstrb.8 q0, [r9], #16 ; CHECK-NEXT: vldrw.u32 q0, [sp, #176] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q7, q7, q5 ; CHECK-NEXT: vadd.i32 q6, q6, q5 ; CHECK-NEXT: vadd.i32 q0, q0, q5 ; CHECK-NEXT: bne.w .LBB13_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB13_2 Depth=1 ; CHECK-NEXT: ldr r1, [sp, #124] @ 4-byte Reload ; CHECK-NEXT: ldr.w r11, [sp, #116] @ 4-byte Reload ; CHECK-NEXT: cmp r11, r1 ; CHECK-NEXT: bne.w .LBB13_2 ; CHECK-NEXT: .LBB13_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #328 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI13_0: ; CHECK-NEXT: .long 38 @ 0x26 ; CHECK-NEXT: .long 41 @ 0x29 ; CHECK-NEXT: .long 44 @ 0x2c ; CHECK-NEXT: .long 47 @ 0x2f ; CHECK-NEXT: .LCPI13_1: ; CHECK-NEXT: .long 14 @ 0xe ; CHECK-NEXT: .long 17 @ 0x11 ; CHECK-NEXT: .long 20 @ 0x14 ; CHECK-NEXT: .long 23 @ 0x17 ; CHECK-NEXT: .LCPI13_2: ; CHECK-NEXT: .long 24 @ 0x18 ; CHECK-NEXT: .long 27 @ 0x1b ; CHECK-NEXT: .long 30 @ 0x1e ; CHECK-NEXT: .long 33 @ 0x21 ; CHECK-NEXT: .LCPI13_3: ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 7 @ 0x7 ; CHECK-NEXT: .long 10 @ 0xa ; CHECK-NEXT: .LCPI13_4: ; CHECK-NEXT: .long 36 @ 0x24 ; CHECK-NEXT: .long 39 @ 0x27 ; CHECK-NEXT: .long 42 @ 0x2a ; CHECK-NEXT: .long 45 @ 0x2d ; CHECK-NEXT: .LCPI13_5: ; CHECK-NEXT: .long 25 @ 0x19 ; CHECK-NEXT: .long 28 @ 0x1c ; CHECK-NEXT: .long 31 @ 0x1f ; CHECK-NEXT: .long 34 @ 0x22 ; CHECK-NEXT: .LCPI13_6: ; CHECK-NEXT: .long 13 @ 0xd ; CHECK-NEXT: .long 16 @ 0x10 ; CHECK-NEXT: .long 19 @ 0x13 ; CHECK-NEXT: .long 22 @ 0x16 ; CHECK-NEXT: .LCPI13_7: ; CHECK-NEXT: .long 2 @ 0x2 ; CHECK-NEXT: .long 5 @ 0x5 ; CHECK-NEXT: .long 8 @ 0x8 ; CHECK-NEXT: .long 11 @ 0xb ; CHECK-NEXT: .LCPI13_8: ; CHECK-NEXT: .long 26 @ 0x1a ; CHECK-NEXT: .long 29 @ 0x1d ; CHECK-NEXT: .long 32 @ 0x20 ; CHECK-NEXT: .long 35 @ 0x23 ; CHECK-NEXT: .LCPI13_9: ; CHECK-NEXT: .long 37 @ 0x25 ; CHECK-NEXT: .long 40 @ 0x28 ; CHECK-NEXT: .long 43 @ 0x2b ; CHECK-NEXT: .long 46 @ 0x2e ; CHECK-NEXT: .LCPI13_10: ; CHECK-NEXT: .long 12 @ 0xc ; CHECK-NEXT: .long 15 @ 0xf ; CHECK-NEXT: .long 18 @ 0x12 ; CHECK-NEXT: .long 21 @ 0x15 ; CHECK-NEXT: .LCPI13_11: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 3 @ 0x3 ; CHECK-NEXT: .long 6 @ 0x6 ; CHECK-NEXT: .long 9 @ 0x9 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <16 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul nuw nsw <16 x i32> %vec.ind, %1 = getelementptr inbounds i8, i8* %data, <16 x i32> %0 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %1, i32 2, <16 x i1> , <16 x i8> undef) %2 = add nuw nsw <16 x i32> %0, %3 = getelementptr inbounds i8, i8* %data, <16 x i32> %2 %wide.masked.gather24 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %3, i32 2, <16 x i1> , <16 x i8> undef) %4 = add nuw nsw <16 x i32> %0, %5 = getelementptr inbounds i8, i8* %data, <16 x i32> %4 %wide.masked.gather25 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %5, i32 2, <16 x i1> , <16 x i8> undef) %6 = add nsw <16 x i8> %wide.masked.gather24, %wide.masked.gather %7 = add nsw <16 x i8> %6, %wide.masked.gather25 %8 = getelementptr inbounds i8, i8* %dst, i32 %index %9 = bitcast i8* %8 to <16 x i8>* store <16 x i8> %7, <16 x i8>* %9, align 2 %index.next = add i32 %index, 16 %vec.ind.next = add <16 x i32> %vec.ind, %10 = icmp eq i32 %index.next, %n.vec br i1 %10, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v16i8_simple: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #72 ; CHECK-NEXT: sub sp, #72 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: blt.w .LBB14_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: adr r5, .LCPI14_3 ; CHECK-NEXT: adr r7, .LCPI14_1 ; CHECK-NEXT: vldrw.u32 q0, [r5] ; CHECK-NEXT: adr r6, .LCPI14_2 ; CHECK-NEXT: adr r3, .LCPI14_0 ; CHECK-NEXT: bic r12, r2, #7 ; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r7] ; CHECK-NEXT: vmov.i32 q4, #0x10 ; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill ; CHECK-NEXT: .LBB14_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB14_3 Depth 2 ; CHECK-NEXT: vldrw.u32 q5, [sp] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q7, [sp, #32] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q0, [sp, #48] @ 16-byte Reload ; CHECK-NEXT: mov lr, r1 ; CHECK-NEXT: mov r3, r12 ; CHECK-NEXT: .LBB14_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB14_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vadd.i32 q1, q7, r0 ; CHECK-NEXT: vadd.i32 q2, q0, r0 ; CHECK-NEXT: vmov r4, s6 ; CHECK-NEXT: vadd.i32 q3, q5, r0 ; CHECK-NEXT: vmov r6, s12 ; CHECK-NEXT: subs r3, #16 ; CHECK-NEXT: vmov r5, s8 ; CHECK-NEXT: vadd.i32 q5, q5, q4 ; CHECK-NEXT: vadd.i32 q7, q7, q4 ; CHECK-NEXT: vadd.i32 q0, q0, q4 ; CHECK-NEXT: ldrb.w r8, [r4] ; CHECK-NEXT: vmov r4, s7 ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb.w r10, [r4] ; CHECK-NEXT: vmov r4, s9 ; CHECK-NEXT: ldrb.w r9, [r4] ; CHECK-NEXT: vmov r4, s10 ; CHECK-NEXT: ldrb.w r11, [r4] ; CHECK-NEXT: vmov r4, s11 ; CHECK-NEXT: vmov.8 q2[0], r6 ; CHECK-NEXT: vmov r6, s13 ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: vmov.8 q2[1], r6 ; CHECK-NEXT: vmov r6, s14 ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: vmov.8 q2[2], r6 ; CHECK-NEXT: vmov r6, s15 ; CHECK-NEXT: vadd.i32 q3, q6, r0 ; CHECK-NEXT: vadd.i32 q6, q6, q4 ; CHECK-NEXT: vmov r7, s12 ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q2[3], r6 ; CHECK-NEXT: vmov r6, s5 ; CHECK-NEXT: vmov.8 q2[4], r7 ; CHECK-NEXT: vmov r7, s13 ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q2[5], r7 ; CHECK-NEXT: vmov r7, s14 ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q2[6], r7 ; CHECK-NEXT: vmov r7, s15 ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q2[7], r7 ; CHECK-NEXT: vmov r7, s4 ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q2[8], r7 ; CHECK-NEXT: vmov.8 q2[9], r6 ; CHECK-NEXT: vmov.8 q2[10], r8 ; CHECK-NEXT: vmov.8 q2[11], r10 ; CHECK-NEXT: vmov.8 q2[12], r5 ; CHECK-NEXT: vmov.8 q2[13], r9 ; CHECK-NEXT: vmov.8 q2[14], r11 ; CHECK-NEXT: vmov.8 q2[15], r4 ; CHECK-NEXT: vstrb.8 q2, [lr], #16 ; CHECK-NEXT: bne .LBB14_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB14_2 Depth=1 ; CHECK-NEXT: cmp r12, r2 ; CHECK-NEXT: bne .LBB14_2 ; CHECK-NEXT: .LBB14_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #72 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI14_0: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 2 @ 0x2 ; CHECK-NEXT: .long 3 @ 0x3 ; CHECK-NEXT: .LCPI14_1: ; CHECK-NEXT: .long 8 @ 0x8 ; CHECK-NEXT: .long 9 @ 0x9 ; CHECK-NEXT: .long 10 @ 0xa ; CHECK-NEXT: .long 11 @ 0xb ; CHECK-NEXT: .LCPI14_2: ; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 5 @ 0x5 ; CHECK-NEXT: .long 6 @ 0x6 ; CHECK-NEXT: .long 7 @ 0x7 ; CHECK-NEXT: .LCPI14_3: ; CHECK-NEXT: .long 12 @ 0xc ; CHECK-NEXT: .long 13 @ 0xd ; CHECK-NEXT: .long 14 @ 0xe ; CHECK-NEXT: .long 15 @ 0xf entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <16 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i8, i8* %data, <16 x i32> %vec.ind %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %0, i32 2, <16 x i1> , <16 x i8> undef) %1 = getelementptr inbounds i8, i8* %dst, i32 %index %2 = bitcast i8* %1 to <16 x i8>* store <16 x i8> %wide.masked.gather, <16 x i8>* %2, align 2 %index.next = add i32 %index, 16 %vec.ind.next = add <16 x i32> %vec.ind, %3 = icmp eq i32 %index.next, %n.vec br i1 %3, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } declare <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>) declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>) declare <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>) declare <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*>, i32, <2 x i1>, <2 x float>) declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>) declare <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*>, i32, <8 x i1>, <8 x float>) declare <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>) declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>) declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>) declare <16 x i16> @llvm.masked.gather.v16i16.v16p0i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>) declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>) declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>) declare <16 x half> @llvm.masked.gather.v16f16.v16p0f16(<16 x half*>, i32, <16 x i1>, <16 x half>) declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>) declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>) declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>) declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>) declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)