; RUN: llc -mtriple=aarch64-linux-gnu -mattr=sve -mattr=+use-experimental-zeroing-pseudos < %s 2>%t | FileCheck %s ; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t ; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. ; WARN-NOT: warning ; ; FADD ; define @fadd_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fadd_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fadd.nxv8f16( %pg, %a_z, %b) ret %out } define @fadd_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fadd_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fadd.nxv4f32( %pg, %a_z, %b) ret %out } define @fadd_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fadd_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fadd.nxv2f64( %pg, %a_z, %b) ret %out } ; ; FMAX ; define @fmax_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fmax_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmax.nxv8f16( %pg, %a_z, %b) ret %out } define @fmax_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fmax_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmax.nxv4f32( %pg, %a_z, %b) ret %out } define @fmax_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fmax_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmax.nxv2f64( %pg, %a_z, %b) ret %out } ; ; FMAXNM ; define @fmaxnm_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fmaxnm_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmaxnm.nxv8f16( %pg, %a_z, %b) ret %out } define @fmaxnm_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fmaxnm_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmaxnm.nxv4f32( %pg, %a_z, %b) ret %out } define @fmaxnm_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fmaxnm_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmaxnm.nxv2f64( %pg, %a_z, %b) ret %out } ; ; FMIN ; define @fmin_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fmin_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmin.nxv8f16( %pg, %a_z, %b) ret %out } define @fmin_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fmin_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmin.nxv4f32( %pg, %a_z, %b) ret %out } define @fmin_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fmin_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmin.nxv2f64( %pg, %a_z, %b) ret %out } ; ; FMINNM ; define @fminnm_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fminnm_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fminnm.nxv8f16( %pg, %a_z, %b) ret %out } define @fminnm_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fminnm_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fminnm.nxv4f32( %pg, %a_z, %b) ret %out } define @fminnm_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fminnm_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fminnm.nxv2f64( %pg, %a_z, %b) ret %out } ; ; FMUL ; define @fmul_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fmul_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmul.nxv8f16( %pg, %a_z, %b) ret %out } define @fmul_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fmul_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmul.nxv4f32( %pg, %a_z, %b) ret %out } define @fmul_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fmul_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmul.nxv2f64( %pg, %a_z, %b) ret %out } ; ; FSUB ; define @fsub_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fsub_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fsub.nxv8f16( %pg, %a_z, %b) ret %out } define @fsub_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fsub_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fsub.nxv4f32( %pg, %a_z, %b) ret %out } define @fsub_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fsub_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fsub.nxv2f64( %pg, %a_z, %b) ret %out } ; ; FSUBR ; define @fsubr_h_zero( %pg, %a, %b) { ; CHECK-LABEL: fsubr_h_zero: ; CHECK: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fsubr.nxv8f16( %pg, %a_z, %b) ret %out } define @fsubr_s_zero( %pg, %a, %b) { ; CHECK-LABEL: fsubr_s_zero: ; CHECK: movprfx z0.s, p0/z, z0.s ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fsubr.nxv4f32( %pg, %a_z, %b) ret %out } define @fsubr_d_zero( %pg, %a, %b) { ; CHECK-LABEL: fsubr_d_zero: ; CHECK: movprfx z0.d, p0/z, z0.d ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fsubr.nxv2f64( %pg, %a_z, %b) ret %out } declare @llvm.aarch64.sve.fabd.nxv8f16(, , ) declare @llvm.aarch64.sve.fabd.nxv4f32(, , ) declare @llvm.aarch64.sve.fabd.nxv2f64(, , ) declare @llvm.aarch64.sve.fadd.nxv8f16(, , ) declare @llvm.aarch64.sve.fadd.nxv4f32(, , ) declare @llvm.aarch64.sve.fadd.nxv2f64(, , ) declare @llvm.aarch64.sve.fdiv.nxv8f16(, , ) declare @llvm.aarch64.sve.fdiv.nxv4f32(, , ) declare @llvm.aarch64.sve.fdiv.nxv2f64(, , ) declare @llvm.aarch64.sve.fdivr.nxv8f16(, , ) declare @llvm.aarch64.sve.fdivr.nxv4f32(, , ) declare @llvm.aarch64.sve.fdivr.nxv2f64(, , ) declare @llvm.aarch64.sve.fmax.nxv8f16(, , ) declare @llvm.aarch64.sve.fmax.nxv4f32(, , ) declare @llvm.aarch64.sve.fmax.nxv2f64(, , ) declare @llvm.aarch64.sve.fmaxnm.nxv8f16(, , ) declare @llvm.aarch64.sve.fmaxnm.nxv4f32(, , ) declare @llvm.aarch64.sve.fmaxnm.nxv2f64(, , ) declare @llvm.aarch64.sve.fmin.nxv8f16(, , ) declare @llvm.aarch64.sve.fmin.nxv4f32(, , ) declare @llvm.aarch64.sve.fmin.nxv2f64(, , ) declare @llvm.aarch64.sve.fminnm.nxv8f16(, , ) declare @llvm.aarch64.sve.fminnm.nxv4f32(, , ) declare @llvm.aarch64.sve.fminnm.nxv2f64(, , ) declare @llvm.aarch64.sve.fmul.nxv8f16(, , ) declare @llvm.aarch64.sve.fmul.nxv4f32(, , ) declare @llvm.aarch64.sve.fmul.nxv2f64(, , ) declare @llvm.aarch64.sve.fmulx.nxv8f16(, , ) declare @llvm.aarch64.sve.fmulx.nxv4f32(, , ) declare @llvm.aarch64.sve.fmulx.nxv2f64(, , ) declare @llvm.aarch64.sve.fsub.nxv8f16(, , ) declare @llvm.aarch64.sve.fsub.nxv4f32(, , ) declare @llvm.aarch64.sve.fsub.nxv2f64(, , ) declare @llvm.aarch64.sve.fsubr.nxv8f16(, , ) declare @llvm.aarch64.sve.fsubr.nxv4f32(, , ) declare @llvm.aarch64.sve.fsubr.nxv2f64(, , )