; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( , half, i64); define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( %0, half %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( , , half, , i64); define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16( %0, %1, half %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv2f16.f16( , half, i64); define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( %0, half %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( , , half, , i64); define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16( %0, %1, half %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv4f16.f16( , half, i64); define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( %0, half %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( , , half, , i64); define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16( %0, %1, half %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv8f16.f16( , half, i64); define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( %0, half %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( , , half, , i64); define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16( %0, %1, half %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv16f16.f16( , half, i64); define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( %0, half %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( , , half, , i64); define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16( %0, %1, half %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv32f16.f16( , half, i64); define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( %0, half %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( , , half, , i64); define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16( %0, %1, half %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv1f32.f32( , float, i64); define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( %0, float %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( , , float, , i64); define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32( %0, %1, float %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv2f32.f32( , float, i64); define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( %0, float %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( , , float, , i64); define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32( %0, %1, float %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv4f32.f32( , float, i64); define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( %0, float %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( , , float, , i64); define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32( %0, %1, float %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv8f32.f32( , float, i64); define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( %0, float %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( , , float, , i64); define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32( %0, %1, float %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv16f32.f32( , float, i64); define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( %0, float %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( , , float, , i64); define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32( %0, %1, float %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv1f64.f64( , double, i64); define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( %0, double %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( , , double, , i64); define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64( %0, %1, double %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv2f64.f64( , double, i64); define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( %0, double %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( , , double, , i64); define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64( %0, %1, double %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv4f64.f64( , double, i64); define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( %0, double %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( , , double, , i64); define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64( %0, %1, double %2, %3, i64 %4) ret %a } declare @llvm.riscv.vfslide1down.nxv8f64.f64( , double, i64); define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu ; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( %0, double %1, i64 %2) ret %a } declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( , , double, , i64); define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t ; CHECK-NEXT: jalr zero, 0(ra) entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64( %0, %1, double %2, %3, i64 %4) ret %a }