//===-- VOP3Instructions.td - Vector Instruction Definitions --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // VOP3 Classes //===----------------------------------------------------------------------===// class getVOP3ModPat { dag src0 = !if(P.HasOMod, (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod), (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp)); list ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT src0), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))]; list ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT src0), (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))]; list ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT src0)))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getVOP3PModPat { dag src0_dag = (P.Src0VT (VOP3PMods P.Src0VT:$src0, i32:$src0_modifiers)); dag src1_dag = (P.Src1VT (VOP3PMods P.Src1VT:$src1, i32:$src1_modifiers)); dag src2_dag = (P.Src2VT (VOP3PMods P.Src2VT:$src2, i32:$src2_modifiers)); dag clamp_dag = (i1 timm:$clamp); list ret3 = [(set P.DstVT:$vdst, !if(HasExplicitClamp, (DivergentFragOrOp.ret src0_dag, src1_dag, src2_dag, clamp_dag), (DivergentFragOrOp.ret src0_dag, src1_dag, src2_dag)))]; list ret2 = [(set P.DstVT:$vdst, !if(HasExplicitClamp, (DivergentFragOrOp.ret src0_dag, src1_dag, clamp_dag), (DivergentFragOrOp.ret src0_dag, src1_dag)))]; list ret1 = [(set P.DstVT:$vdst, !if(HasExplicitClamp, (DivergentFragOrOp.ret src0_dag, clamp_dag), (DivergentFragOrOp.ret src0_dag)))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getVOP3OpSelPat { list ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers)), (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3OpSel P.Src2VT:$src2, i32:$src2_modifiers))))]; list ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers)), (P.Src1VT (VOP3OpSel P.Src1VT:$src1, i32:$src1_modifiers))))]; list ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT (VOP3OpSel P.Src0VT:$src0, i32:$src0_modifiers))))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getVOP3OpSelModPat { list ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT !if(P.HasClamp, (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers), (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers)), (P.Src2VT (VOP3OpSelMods P.Src2VT:$src2, i32:$src2_modifiers))))]; list ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret !if(P.HasClamp, (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers)), (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))), (P.Src1VT (VOP3OpSelMods P.Src1VT:$src1, i32:$src1_modifiers))))]; list ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret (P.Src0VT (VOP3OpSelMods P.Src0VT:$src0, i32:$src0_modifiers))))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getVOP3Pat { list ret3 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2))]; list ret2 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret P.Src0VT:$src0, P.Src1VT:$src1))]; list ret1 = [(set P.DstVT:$vdst, (DivergentFragOrOp.ret P.Src0VT:$src0))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getVOP3ClampPat { list ret3 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, i1:$clamp))]; list ret2 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, i1:$clamp))]; list ret1 = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, i1:$clamp))]; list ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getVOP3MAIPat { list ret = [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, timm:$cbsz, timm:$abid, timm:$blgp))]; } // Consistently gives instructions a _e64 suffix. multiclass VOP3Inst_Pseudo_Wrapper pattern = [], bit VOP3Only = 0> { def _e64 : VOP3_Pseudo; } class VOP3InstBase : VOP3_Pseudo.ret, getVOP3OpSelPat.ret), !if(P.HasModifiers, getVOP3ModPat.ret, !if(P.HasIntClamp, getVOP3ClampPat.ret, !if (P.IsMAI, getVOP3MAIPat.ret, getVOP3Pat.ret)))), VOP3Only, 0, P.HasOpSel> { let IntClamp = P.HasIntClamp; let AsmMatchConverter = !if(P.HasOpSel, "cvtVOP3OpSel", !if(!or(P.HasModifiers, P.HasOMod, P.HasIntClamp), "cvtVOP3", "")); } multiclass VOP3Inst { def _e64 : VOP3InstBase; } // Special case for v_div_fmas_{f32|f64}, since it seems to be the // only VOP instruction that implicitly reads VCC. let Asm64 = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod" in { def VOP_F32_F32_F32_F32_VCC : VOPProfile<[f32, f32, f32, f32]> { let Outs64 = (outs DstRC.RegClass:$vdst); } def VOP_F64_F64_F64_F64_VCC : VOPProfile<[f64, f64, f64, f64]> { let Outs64 = (outs DstRC.RegClass:$vdst); } } class VOP3Features { bit HasClamp = Clamp; bit HasOpSel = OpSel; bit IsPacked = Packed; bit IsMAI = MAI; } def VOP3_REGULAR : VOP3Features<0, 0, 0, 0>; def VOP3_CLAMP : VOP3Features<1, 0, 0, 0>; def VOP3_OPSEL : VOP3Features<1, 1, 0, 0>; def VOP3_PACKED : VOP3Features<1, 1, 1, 0>; def VOP3_MAI : VOP3Features<0, 0, 0, 1>; class VOP3_Profile : VOPProfile { let HasClamp = !if(Features.HasClamp, 1, P.HasClamp); let HasOpSel = !if(Features.HasOpSel, 1, P.HasOpSel); let IsMAI = !if(Features.IsMAI, 1, P.IsMAI); let IsPacked = !if(Features.IsPacked, 1, P.IsPacked); let HasModifiers = !if(Features.IsMAI, 0, !or(Features.IsPacked, P.HasModifiers)); // FIXME: Hack to stop printing _e64 let Outs64 = (outs DstRC.RegClass:$vdst); let Asm64 = " " # !if(Features.HasOpSel, getAsmVOP3OpSel.ret, !if(Features.HasClamp, getAsm64.ret, P.Asm64)); let NeedPatGen = P.NeedPatGen; } class VOP3b_Profile : VOPProfile<[vt, vt, vt, vt]> { let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = " $vdst, $sdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$clamp$omod"; } def VOP3b_F32_I1_F32_F32_F32 : VOP3b_Profile { // FIXME: Hack to stop printing _e64 let DstRC = RegisterOperand; } def VOP3b_F64_I1_F64_F64_F64 : VOP3b_Profile { // FIXME: Hack to stop printing _e64 let DstRC = RegisterOperand; } def VOP3b_I64_I1_I32_I32_I64 : VOPProfile<[i64, i32, i32, i64]> { let HasClamp = 1; // FIXME: Hack to stop printing _e64 let DstRC = RegisterOperand; let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = " $vdst, $sdst, $src0, $src1, $src2$clamp"; } //===----------------------------------------------------------------------===// // VOP3 INTERP //===----------------------------------------------------------------------===// class VOP3Interp pattern = []> : VOP3_Pseudo { let AsmMatchConverter = "cvtVOP3Interp"; let mayRaiseFPException = 0; } def VOP3_INTERP : VOPProfile<[f32, f32, i32, untyped]> { let Ins64 = (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, Attr:$attr, AttrChan:$attrchan, clampmod0:$clamp, omod0:$omod); let Asm64 = "$vdst, $src0_modifiers, $attr$attrchan$clamp$omod"; } def VOP3_INTERP_MOV : VOPProfile<[f32, i32, i32, untyped]> { let Ins64 = (ins InterpSlot:$src0, Attr:$attr, AttrChan:$attrchan, clampmod0:$clamp, omod0:$omod); let Asm64 = "$vdst, $src0, $attr$attrchan$clamp$omod"; let HasClamp = 1; let HasSrc0Mods = 0; } class getInterp16Asm { string src2 = !if(HasSrc2, ", $src2_modifiers", ""); string omod = !if(HasOMod, "$omod", ""); string ret = " $vdst, $src0_modifiers, $attr$attrchan"#src2#"$high$clamp"#omod; } class getInterp16Ins { dag ret = !if(HasSrc2, !if(HasOMod, (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, Attr:$attr, AttrChan:$attrchan, Src2Mod:$src2_modifiers, VRegSrc_32:$src2, highmod:$high, clampmod0:$clamp, omod0:$omod), (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, Attr:$attr, AttrChan:$attrchan, Src2Mod:$src2_modifiers, VRegSrc_32:$src2, highmod:$high, clampmod0:$clamp) ), (ins Src0Mod:$src0_modifiers, VRegSrc_32:$src0, Attr:$attr, AttrChan:$attrchan, highmod:$high, clampmod0:$clamp, omod0:$omod) ); } class VOP3_INTERP16 ArgVT> : VOPProfile { let HasOMod = !ne(DstVT.Value, f16.Value); let HasHigh = 1; let Outs64 = (outs VGPR_32:$vdst); let Ins64 = getInterp16Ins.ret; let Asm64 = getInterp16Asm.ret; } //===----------------------------------------------------------------------===// // VOP3 Instructions //===----------------------------------------------------------------------===// let isCommutable = 1 in { let mayRaiseFPException = 0 in { let SubtargetPredicate = HasMadMacF32Insts in { defm V_MAD_LEGACY_F32 : VOP3Inst <"v_mad_legacy_f32", VOP3_Profile>; defm V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile, fmad>; } // End SubtargetPredicate = HasMadMacInsts let SubtargetPredicate = HasFmaLegacy32 in defm V_FMA_LEGACY_F32 : VOP3Inst <"v_fma_legacy_f32", VOP3_Profile, int_amdgcn_fma_legacy>; } defm V_MAD_I32_I24 : VOP3Inst <"v_mad_i32_i24", VOP3_Profile>; defm V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile>; defm V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile, any_fma>; defm V_LERP_U8 : VOP3Inst <"v_lerp_u8", VOP3_Profile, int_amdgcn_lerp>; let SchedRW = [WriteDoubleAdd] in { let FPDPRounding = 1 in { defm V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP3_Profile, any_fma>; defm V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile, any_fadd, 1>; defm V_MUL_F64 : VOP3Inst <"v_mul_f64", VOP3_Profile, fmul, 1>; } // End FPDPRounding = 1 defm V_MIN_F64 : VOP3Inst <"v_min_f64", VOP3_Profile, fminnum_like, 1>; defm V_MAX_F64 : VOP3Inst <"v_max_f64", VOP3_Profile, fmaxnum_like, 1>; } // End SchedRW = [WriteDoubleAdd] let SchedRW = [WriteQuarterRate32] in { defm V_MUL_LO_U32 : VOP3Inst <"v_mul_lo_u32", VOP3_Profile, mul>; defm V_MUL_HI_U32 : VOP3Inst <"v_mul_hi_u32", VOP3_Profile, mulhu>; defm V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", VOP3_Profile>; defm V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", VOP3_Profile, mulhs>; } // End SchedRW = [WriteQuarterRate32] let Uses = [MODE, VCC, EXEC] in { // v_div_fmas_f32: // result = src0 * src1 + src2 // if (vcc) // result *= 2^32 // let SchedRW = [WriteFloatFMA] in defm V_DIV_FMAS_F32 : VOP3Inst_Pseudo_Wrapper <"v_div_fmas_f32", VOP_F32_F32_F32_F32_VCC, []>; // v_div_fmas_f64: // result = src0 * src1 + src2 // if (vcc) // result *= 2^64 // let SchedRW = [WriteDouble], FPDPRounding = 1 in defm V_DIV_FMAS_F64 : VOP3Inst_Pseudo_Wrapper <"v_div_fmas_f64", VOP_F64_F64_F64_F64_VCC, []>; } // End Uses = [MODE, VCC, EXEC] } // End isCommutable = 1 let mayRaiseFPException = 0 in { defm V_CUBEID_F32 : VOP3Inst <"v_cubeid_f32", VOP3_Profile, int_amdgcn_cubeid>; defm V_CUBESC_F32 : VOP3Inst <"v_cubesc_f32", VOP3_Profile, int_amdgcn_cubesc>; defm V_CUBETC_F32 : VOP3Inst <"v_cubetc_f32", VOP3_Profile, int_amdgcn_cubetc>; defm V_CUBEMA_F32 : VOP3Inst <"v_cubema_f32", VOP3_Profile, int_amdgcn_cubema>; } // End mayRaiseFPException defm V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile, AMDGPUbfe_u32>; defm V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile, AMDGPUbfe_i32>; defm V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile, AMDGPUbfi>; defm V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile, fshr>; defm V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile, int_amdgcn_alignbyte>; let mayRaiseFPException = 0 in { // XXX - Seems suspect but manual doesn't say it does defm V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile, AMDGPUfmin3>; defm V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile, AMDGPUsmin3>; defm V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile, AMDGPUumin3>; defm V_MAX3_F32 : VOP3Inst <"v_max3_f32", VOP3_Profile, AMDGPUfmax3>; defm V_MAX3_I32 : VOP3Inst <"v_max3_i32", VOP3_Profile, AMDGPUsmax3>; defm V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile, AMDGPUumax3>; defm V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile, AMDGPUfmed3>; defm V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile, AMDGPUsmed3>; defm V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile, AMDGPUumed3>; } // End mayRaiseFPException = 0 defm V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile>; defm V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile>; defm V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile>; defm V_SAD_U32 : VOP3Inst <"v_sad_u32", VOP3_Profile>; defm V_CVT_PK_U8_F32 : VOP3Inst<"v_cvt_pk_u8_f32", VOP3_Profile, int_amdgcn_cvt_pk_u8_f32>; defm V_DIV_FIXUP_F32 : VOP3Inst <"v_div_fixup_f32", VOP3_Profile, AMDGPUdiv_fixup>; let SchedRW = [WriteDoubleAdd], FPDPRounding = 1 in { defm V_DIV_FIXUP_F64 : VOP3Inst <"v_div_fixup_f64", VOP3_Profile, AMDGPUdiv_fixup>; defm V_LDEXP_F64 : VOP3Inst <"v_ldexp_f64", VOP3_Profile, AMDGPUldexp, 1>; } // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1 let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it does. let SchedRW = [WriteFloatFMA, WriteSALU] in defm V_DIV_SCALE_F32 : VOP3Inst_Pseudo_Wrapper <"v_div_scale_f32", VOP3b_F32_I1_F32_F32_F32, [], 1> ; // Double precision division pre-scale. let SchedRW = [WriteDouble, WriteSALU], FPDPRounding = 1 in defm V_DIV_SCALE_F64 : VOP3Inst_Pseudo_Wrapper <"v_div_scale_f64", VOP3b_F64_I1_F64_F64_F64, [], 1>; } // End mayRaiseFPException = 0 defm V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile>; let Constraints = "@earlyclobber $vdst" in { defm V_MQSAD_PK_U16_U8 : VOP3Inst <"v_mqsad_pk_u16_u8", VOP3_Profile>; } // End Constraints = "@earlyclobber $vdst" let SchedRW = [WriteDouble] in { defm V_TRIG_PREOP_F64 : VOP3Inst <"v_trig_preop_f64", VOP3_Profile, int_amdgcn_trig_preop>; } // End SchedRW = [WriteDouble] let SchedRW = [Write64Bit] in { let SubtargetPredicate = isGFX6GFX7 in { defm V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile, shl>; defm V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile, srl>; defm V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile, sra>; } // End SubtargetPredicate = isGFX6GFX7 let SubtargetPredicate = isGFX8Plus in { defm V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile, lshl_rev>; defm V_LSHRREV_B64 : VOP3Inst <"v_lshrrev_b64", VOP3_Profile, lshr_rev>; defm V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile, ashr_rev>; } // End SubtargetPredicate = isGFX8Plus } // End SchedRW = [Write64Bit] def : GCNPat< (i32 (getDivergentFrag.ret i16:$src)), (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))) >; let SubtargetPredicate = isGFX6GFX7GFX10 in { defm V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile>; } // End SubtargetPredicate = isGFX6GFX7GFX10 let SchedRW = [Write32Bit] in { let SubtargetPredicate = isGFX8Plus in { defm V_PERM_B32 : VOP3Inst <"v_perm_b32", VOP3_Profile, AMDGPUperm>; } // End SubtargetPredicate = isGFX8Plus } // End SchedRW = [Write32Bit] let SubtargetPredicate = isGFX7Plus in { let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in { defm V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile>; defm V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOP3_Profile>; } // End Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] let isCommutable = 1 in { let SchedRW = [WriteQuarterRate32, WriteSALU] in { defm V_MAD_U64_U32 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>; defm V_MAD_I64_I32 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>; } // End SchedRW = [WriteQuarterRate32, WriteSALU] } // End isCommutable = 1 } // End SubtargetPredicate = isGFX7Plus let FPDPRounding = 1 in { let Predicates = [Has16BitInsts, isGFX8Only] in { defm V_DIV_FIXUP_F16 : VOP3Inst <"v_div_fixup_f16", VOP3_Profile, AMDGPUdiv_fixup>; defm V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile, any_fma>; } // End Predicates = [Has16BitInsts, isGFX8Only] let renamedInGFX9 = 1, Predicates = [Has16BitInsts, isGFX9Plus] in { defm V_DIV_FIXUP_F16_gfx9 : VOP3Inst <"v_div_fixup_f16_gfx9", VOP3_Profile, AMDGPUdiv_fixup>; defm V_FMA_F16_gfx9 : VOP3Inst <"v_fma_f16_gfx9", VOP3_Profile, any_fma>; } // End renamedInGFX9 = 1, Predicates = [Has16BitInsts, isGFX9Plus] } // End FPDPRounding = 1 let SubtargetPredicate = Has16BitInsts, isCommutable = 1 in { let renamedInGFX9 = 1 in { defm V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile>; defm V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile>; let FPDPRounding = 1 in { defm V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile, fmad>; let Uses = [MODE, M0, EXEC] in { // For some reason the intrinsic operands are in a different order // from the instruction operands. def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>, [(set f16:$vdst, (int_amdgcn_interp_p2_f16 (VOP3Mods f32:$src2, i32:$src2_modifiers), (VOP3Mods f32:$src0, i32:$src0_modifiers), (i32 timm:$attrchan), (i32 timm:$attr), (i1 timm:$high), M0))]>; } // End Uses = [M0, MODE, EXEC] } // End FPDPRounding = 1 } // End renamedInGFX9 = 1 let SubtargetPredicate = isGFX9Only, FPDPRounding = 1 in { defm V_MAD_F16_gfx9 : VOP3Inst <"v_mad_f16_gfx9", VOP3_Profile> ; } // End SubtargetPredicate = isGFX9Only, FPDPRounding = 1 let SubtargetPredicate = isGFX9Plus in { defm V_MAD_U16_gfx9 : VOP3Inst <"v_mad_u16_gfx9", VOP3_Profile>; defm V_MAD_I16_gfx9 : VOP3Inst <"v_mad_i16_gfx9", VOP3_Profile>; def V_INTERP_P2_F16_gfx9 : VOP3Interp <"v_interp_p2_f16_gfx9", VOP3_INTERP16<[f16, f32, i32, f32]>>; } // End SubtargetPredicate = isGFX9Plus let Uses = [MODE, M0, EXEC], FPDPRounding = 1 in { def V_INTERP_P1LL_F16 : VOP3Interp <"v_interp_p1ll_f16", VOP3_INTERP16<[f32, f32, i32, untyped]>, [(set f32:$vdst, (int_amdgcn_interp_p1_f16 (VOP3Mods f32:$src0, i32:$src0_modifiers), (i32 timm:$attrchan), (i32 timm:$attr), (i1 timm:$high), M0))]> { // This predicate should only apply to the selection pattern. The // instruction still exists and should decode on subtargets with // other bank counts. let OtherPredicates = [has32BankLDS]; } def V_INTERP_P1LV_F16 : VOP3Interp <"v_interp_p1lv_f16", VOP3_INTERP16<[f32, f32, i32, f16]>>; } // End Uses = [MODE, M0, EXEC], FPDPRounding = 1 } // End SubtargetPredicate = Has16BitInsts, isCommutable = 1 def : GCNPat< (i64 (getDivergentFrag.ret i16:$src)), (REG_SEQUENCE VReg_64, (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))), sub0, (i32 (COPY_TO_REGCLASS (V_ASHRREV_I32_e32 (S_MOV_B32 (i32 0x1f)), (i32 (V_BFE_I32_e64 $src, (S_MOV_B32 (i32 0)), (S_MOV_B32 (i32 0x10)))) ), VGPR_32)), sub1) >; let SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC] in { def V_INTERP_P1_F32_e64 : VOP3Interp <"v_interp_p1_f32", VOP3_INTERP>; def V_INTERP_P2_F32_e64 : VOP3Interp <"v_interp_p2_f32", VOP3_INTERP>; def V_INTERP_MOV_F32_e64 : VOP3Interp <"v_interp_mov_f32", VOP3_INTERP_MOV>; } // End SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC] let Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] in { multiclass Ternary_i16_Pats { def : GCNPat < (op2 (op1 i16:$src0, i16:$src1), i16:$src2), (inst i16:$src0, i16:$src1, i16:$src2, (i1 0)) >; } defm: Ternary_i16_Pats; defm: Ternary_i16_Pats; } // End Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] let Predicates = [Has16BitInsts, isGFX10Plus] in { multiclass Ternary_i16_Pats_gfx9 { def : GCNPat < (op2 (op1 i16:$src0, i16:$src1), i16:$src2), (inst SRCMODS.NONE, $src0, SRCMODS.NONE, $src1, SRCMODS.NONE, $src2, DSTCLAMP.NONE) >; } defm: Ternary_i16_Pats_gfx9; defm: Ternary_i16_Pats_gfx9; } // End Predicates = [Has16BitInsts, isGFX10Plus] class ThreeOpFrag : PatFrag< (ops node:$x, node:$y, node:$z), // When the inner operation is used multiple times, selecting 3-op // instructions may still be beneficial -- if the other users can be // combined similarly. Let's be conservative for now. (op2 (HasOneUseBinOp node:$x, node:$y), node:$z), [{ // Only use VALU ops when the result is divergent. if (!N->isDivergent()) return false; // Check constant bus limitations. // // Note: Use !isDivergent as a conservative proxy for whether the value // is in an SGPR (uniform values can end up in VGPRs as well). unsigned ConstantBusUses = 0; for (unsigned i = 0; i < 3; ++i) { if (!Operands[i]->isDivergent() && !isInlineImmediate(Operands[i].getNode())) { ConstantBusUses++; // This uses AMDGPU::V_ADD3_U32_e64, but all three operand instructions // have the same constant bus limit. if (ConstantBusUses > Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32_e64)) return false; } } return true; }]> { let PredicateCodeUsesOperands = 1; // The divergence predicate is irrelevant in GlobalISel, as we have // proper register bank checks. We just need to verify the constant // bus restriction when all the sources are considered. // // FIXME: With unlucky SGPR operands, we could penalize code by // blocking folding SGPR->VGPR copies later. // FIXME: There's no register bank verifier let GISelPredicateCode = [{ const int ConstantBusLimit = Subtarget->getConstantBusLimit(AMDGPU::V_ADD3_U32_e64); int ConstantBusUses = 0; for (unsigned i = 0; i < 3; ++i) { const RegisterBank *RegBank = RBI.getRegBank(Operands[i]->getReg(), MRI, TRI); if (RegBank->getID() == AMDGPU::SGPRRegBankID) { if (++ConstantBusUses > ConstantBusLimit) return false; } } return true; }]; } let SubtargetPredicate = isGFX9Plus in { defm V_PACK_B32_F16 : VOP3Inst <"v_pack_b32_f16", VOP3_Profile>; defm V_LSHL_ADD_U32 : VOP3Inst <"v_lshl_add_u32", VOP3_Profile>; defm V_ADD_LSHL_U32 : VOP3Inst <"v_add_lshl_u32", VOP3_Profile>; defm V_ADD3_U32 : VOP3Inst <"v_add3_u32", VOP3_Profile>; defm V_LSHL_OR_B32 : VOP3Inst <"v_lshl_or_b32", VOP3_Profile>; defm V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile>; defm V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile>; defm V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile>; defm V_MED3_F16 : VOP3Inst <"v_med3_f16", VOP3_Profile, AMDGPUfmed3>; defm V_MED3_I16 : VOP3Inst <"v_med3_i16", VOP3_Profile, AMDGPUsmed3>; defm V_MED3_U16 : VOP3Inst <"v_med3_u16", VOP3_Profile, AMDGPUumed3>; defm V_MIN3_F16 : VOP3Inst <"v_min3_f16", VOP3_Profile, AMDGPUfmin3>; defm V_MIN3_I16 : VOP3Inst <"v_min3_i16", VOP3_Profile, AMDGPUsmin3>; defm V_MIN3_U16 : VOP3Inst <"v_min3_u16", VOP3_Profile, AMDGPUumin3>; defm V_MAX3_F16 : VOP3Inst <"v_max3_f16", VOP3_Profile, AMDGPUfmax3>; defm V_MAX3_I16 : VOP3Inst <"v_max3_i16", VOP3_Profile, AMDGPUsmax3>; defm V_MAX3_U16 : VOP3Inst <"v_max3_u16", VOP3_Profile, AMDGPUumax3>; defm V_ADD_I16 : VOP3Inst <"v_add_i16", VOP3_Profile>; defm V_SUB_I16 : VOP3Inst <"v_sub_i16", VOP3_Profile>; defm V_MAD_U32_U16 : VOP3Inst <"v_mad_u32_u16", VOP3_Profile>; defm V_MAD_I32_I16 : VOP3Inst <"v_mad_i32_i16", VOP3_Profile>; defm V_CVT_PKNORM_I16_F16 : VOP3Inst <"v_cvt_pknorm_i16_f16", VOP3_Profile>; defm V_CVT_PKNORM_U16_F16 : VOP3Inst <"v_cvt_pknorm_u16_f16", VOP3_Profile>; defm V_ADD_I32 : VOP3Inst <"v_add_i32", VOP3_Profile>; defm V_SUB_I32 : VOP3Inst <"v_sub_i32", VOP3_Profile>; class ThreeOp_i32_Pats : GCNPat < // This matches (op2 (op1 i32:$src0, i32:$src1), i32:$src2) with conditions. (ThreeOpFrag i32:$src0, i32:$src1, i32:$src2), (inst VSrc_b32:$src0, VSrc_b32:$src1, VSrc_b32:$src2) >; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : ThreeOp_i32_Pats; def : VOPBinOpClampPat; def : VOPBinOpClampPat; // FIXME: Probably should hardcode clamp bit in pseudo and avoid this. class OpSelBinOpClampPat : GCNPat< (node (i16 (VOP3OpSel i16:$src0, i32:$src0_modifiers)), (i16 (VOP3OpSel i16:$src1, i32:$src1_modifiers))), (inst $src0_modifiers, $src0, $src1_modifiers, $src1, DSTCLAMP.ENABLE, 0) >; def : OpSelBinOpClampPat; def : OpSelBinOpClampPat; } // End SubtargetPredicate = isGFX9Plus def VOP3_PERMLANE_Profile : VOP3_Profile, VOP3_OPSEL> { let Src0RC64 = VRegSrc_32; let Src1RC64 = SCSrc_b32; let Src2RC64 = SCSrc_b32; let InsVOP3OpSel = (ins IntOpSelMods:$src0_modifiers, VRegSrc_32:$src0, IntOpSelMods:$src1_modifiers, SCSrc_b32:$src1, IntOpSelMods:$src2_modifiers, SCSrc_b32:$src2, VGPR_32:$vdst_in, op_sel0:$op_sel); let HasClamp = 0; } class PermlanePat : GCNPat< (permlane i32:$vdst_in, i32:$src0, i32:$src1, i32:$src2, timm:$fi, timm:$bc), (inst (as_i1timm $fi), VGPR_32:$src0, (as_i1timm $bc), SCSrc_b32:$src1, 0, SCSrc_b32:$src2, VGPR_32:$vdst_in) >; // Permlane intrinsic that has either fetch invalid or bound control // fields enabled. class BoundControlOrFetchInvalidPermlane : PatFrag<(ops node:$vdst_in, node:$src0, node:$src1, node:$src2, node:$fi, node:$bc), (permlane node:$vdst_in, node:$src0, node: $src1, node:$src2, node:$fi, node:$bc)> { let PredicateCode = [{ return N->getConstantOperandVal(5) != 0 || N->getConstantOperandVal(6) != 0; }]; let GISelPredicateCode = [{ return MI.getOperand(6).getImm() != 0 || MI.getOperand(7).getImm() != 0; }]; } // Drop the input value if it won't be read. class PermlaneDiscardVDstIn : GCNPat< (permlane srcvalue, i32:$src0, i32:$src1, i32:$src2, timm:$fi, timm:$bc), (inst (as_i1timm $fi), VGPR_32:$src0, (as_i1timm $bc), SCSrc_b32:$src1, 0, SCSrc_b32:$src2, (IMPLICIT_DEF)) >; let SubtargetPredicate = isGFX10Plus in { defm V_XOR3_B32 : VOP3Inst <"v_xor3_b32", VOP3_Profile>; def : ThreeOp_i32_Pats; let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in { defm V_PERMLANE16_B32 : VOP3Inst<"v_permlane16_b32", VOP3_PERMLANE_Profile>; defm V_PERMLANEX16_B32 : VOP3Inst<"v_permlanex16_b32", VOP3_PERMLANE_Profile>; } // End $vdst = $vdst_in, DisableEncoding $vdst_in def : PermlanePat; def : PermlanePat; def : PermlaneDiscardVDstIn< BoundControlOrFetchInvalidPermlane, V_PERMLANE16_B32_e64>; def : PermlaneDiscardVDstIn< BoundControlOrFetchInvalidPermlane, V_PERMLANEX16_B32_e64>; } // End SubtargetPredicate = isGFX10Plus class DivFmasPat : GCNPat< (AMDGPUdiv_fmas (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)), (vt (VOP3Mods vt:$src1, i32:$src1_modifiers)), (vt (VOP3Mods vt:$src2, i32:$src2_modifiers)), (i1 CondReg)), (inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2) >; let WaveSizePredicate = isWave64 in { def : DivFmasPat; def : DivFmasPat; } let WaveSizePredicate = isWave32 in { def : DivFmasPat; def : DivFmasPat; } //===----------------------------------------------------------------------===// // Integer Clamp Patterns //===----------------------------------------------------------------------===// class getClampPat { dag ret3 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2)); dag ret2 = (P.DstVT (node P.Src0VT:$src0, P.Src1VT:$src1)); dag ret1 = (P.DstVT (node P.Src0VT:$src0)); dag ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class getClampRes { dag ret3 = (inst P.Src0VT:$src0, P.Src1VT:$src1, P.Src2VT:$src2, (i1 0)); dag ret2 = (inst P.Src0VT:$src0, P.Src1VT:$src1, (i1 0)); dag ret1 = (inst P.Src0VT:$src0, (i1 0)); dag ret = !if(!eq(P.NumSrcArgs, 3), ret3, !if(!eq(P.NumSrcArgs, 2), ret2, ret1)); } class IntClampPat : GCNPat< getClampPat.ret, getClampRes.ret >; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; def : IntClampPat; //===----------------------------------------------------------------------===// // Target-specific instruction encodings. //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // GFX10. //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in { multiclass VOP3_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, VOP3e_gfx10(NAME#"_e64").Pfl>; } multiclass VOP3_Real_No_Suffix_gfx10 op> { def _gfx10 : VOP3_Real(NAME), SIEncodingFamily.GFX10>, VOP3e_gfx10(NAME).Pfl>; } multiclass VOP3_Real_gfx10_with_name op, string opName, string asmName> { def _gfx10 : VOP3_Real(opName#"_e64"), SIEncodingFamily.GFX10>, VOP3e_gfx10(opName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(opName#"_e64"); let AsmString = asmName # ps.AsmOperands; } } multiclass VOP3be_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, VOP3be_gfx10(NAME#"_e64").Pfl>; } multiclass VOP3Interp_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME), SIEncodingFamily.GFX10>, VOP3Interp_gfx10(NAME).Pfl>; } multiclass VOP3OpSel_Real_gfx10 op> { def _gfx10 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX10>, VOP3OpSel_gfx10(NAME#"_e64").Pfl>; } multiclass VOP3OpSel_Real_gfx10_with_name op, string opName, string asmName> { def _gfx10 : VOP3_Real(opName#"_e64"), SIEncodingFamily.GFX10>, VOP3OpSel_gfx10(opName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(opName#"_e64"); let AsmString = asmName # ps.AsmOperands; } } } // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" defm V_READLANE_B32 : VOP3_Real_No_Suffix_gfx10<0x360>; let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) in { defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx10<0x361>; } // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) let SubtargetPredicate = isGFX10Before1030 in { defm V_MUL_LO_I32 : VOP3_Real_gfx10<0x16b>; } defm V_XOR3_B32 : VOP3_Real_gfx10<0x178>; defm V_LSHLREV_B64 : VOP3_Real_gfx10<0x2ff>; defm V_LSHRREV_B64 : VOP3_Real_gfx10<0x300>; defm V_ASHRREV_I64 : VOP3_Real_gfx10<0x301>; defm V_PERM_B32 : VOP3_Real_gfx10<0x344>; defm V_XAD_U32 : VOP3_Real_gfx10<0x345>; defm V_LSHL_ADD_U32 : VOP3_Real_gfx10<0x346>; defm V_ADD_LSHL_U32 : VOP3_Real_gfx10<0x347>; defm V_ADD3_U32 : VOP3_Real_gfx10<0x36d>; defm V_LSHL_OR_B32 : VOP3_Real_gfx10<0x36f>; defm V_AND_OR_B32 : VOP3_Real_gfx10<0x371>; defm V_OR3_B32 : VOP3_Real_gfx10<0x372>; // TODO-GFX10: add MC tests for v_add/sub_nc_i16 defm V_ADD_NC_I16 : VOP3OpSel_Real_gfx10_with_name<0x30d, "V_ADD_I16", "v_add_nc_i16">; defm V_SUB_NC_I16 : VOP3OpSel_Real_gfx10_with_name<0x30e, "V_SUB_I16", "v_sub_nc_i16">; defm V_SUB_NC_I32 : VOP3_Real_gfx10_with_name<0x376, "V_SUB_I32", "v_sub_nc_i32">; defm V_ADD_NC_I32 : VOP3_Real_gfx10_with_name<0x37f, "V_ADD_I32", "v_add_nc_i32">; defm V_INTERP_P1_F32_e64 : VOP3Interp_Real_gfx10<0x200>; defm V_INTERP_P2_F32_e64 : VOP3Interp_Real_gfx10<0x201>; defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_gfx10<0x202>; defm V_INTERP_P1LL_F16 : VOP3Interp_Real_gfx10<0x342>; defm V_INTERP_P1LV_F16 : VOP3Interp_Real_gfx10<0x343>; defm V_INTERP_P2_F16 : VOP3Interp_Real_gfx10<0x35a>; defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx10<0x311>; defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx10<0x312>; defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx10<0x313>; defm V_MIN3_F16 : VOP3OpSel_Real_gfx10<0x351>; defm V_MIN3_I16 : VOP3OpSel_Real_gfx10<0x352>; defm V_MIN3_U16 : VOP3OpSel_Real_gfx10<0x353>; defm V_MAX3_F16 : VOP3OpSel_Real_gfx10<0x354>; defm V_MAX3_I16 : VOP3OpSel_Real_gfx10<0x355>; defm V_MAX3_U16 : VOP3OpSel_Real_gfx10<0x356>; defm V_MED3_F16 : VOP3OpSel_Real_gfx10<0x357>; defm V_MED3_I16 : VOP3OpSel_Real_gfx10<0x358>; defm V_MED3_U16 : VOP3OpSel_Real_gfx10<0x359>; defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx10<0x373>; defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx10<0x375>; defm V_MAD_U16 : VOP3OpSel_Real_gfx10_with_name<0x340, "V_MAD_U16_gfx9", "v_mad_u16">; defm V_FMA_F16 : VOP3OpSel_Real_gfx10_with_name<0x34b, "V_FMA_F16_gfx9", "v_fma_f16">; defm V_MAD_I16 : VOP3OpSel_Real_gfx10_with_name<0x35e, "V_MAD_I16_gfx9", "v_mad_i16">; defm V_DIV_FIXUP_F16 : VOP3OpSel_Real_gfx10_with_name<0x35f, "V_DIV_FIXUP_F16_gfx9", "v_div_fixup_f16">; // FIXME-GFX10-OPSEL: Need to add "selective" opsel support to some of these // (they do not support SDWA or DPP). defm V_ADD_NC_U16 : VOP3_Real_gfx10_with_name<0x303, "V_ADD_U16", "v_add_nc_u16">; defm V_SUB_NC_U16 : VOP3_Real_gfx10_with_name<0x304, "V_SUB_U16", "v_sub_nc_u16">; defm V_MUL_LO_U16 : VOP3_Real_gfx10_with_name<0x305, "V_MUL_LO_U16", "v_mul_lo_u16">; defm V_LSHRREV_B16 : VOP3_Real_gfx10_with_name<0x307, "V_LSHRREV_B16", "v_lshrrev_b16">; defm V_ASHRREV_I16 : VOP3_Real_gfx10_with_name<0x308, "V_ASHRREV_I16", "v_ashrrev_i16">; defm V_MAX_U16 : VOP3_Real_gfx10_with_name<0x309, "V_MAX_U16", "v_max_u16">; defm V_MAX_I16 : VOP3_Real_gfx10_with_name<0x30a, "V_MAX_I16", "v_max_i16">; defm V_MIN_U16 : VOP3_Real_gfx10_with_name<0x30b, "V_MIN_U16", "v_min_u16">; defm V_MIN_I16 : VOP3_Real_gfx10_with_name<0x30c, "V_MIN_I16", "v_min_i16">; defm V_LSHLREV_B16 : VOP3_Real_gfx10_with_name<0x314, "V_LSHLREV_B16", "v_lshlrev_b16">; defm V_PERMLANE16_B32 : VOP3OpSel_Real_gfx10<0x377>; defm V_PERMLANEX16_B32 : VOP3OpSel_Real_gfx10<0x378>; //===----------------------------------------------------------------------===// // GFX7, GFX10. //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" in { multiclass VOP3_Real_gfx7 op> { def _gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3e_gfx6_gfx7(NAME#"_e64").Pfl>; } multiclass VOP3be_Real_gfx7 op> { def _gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3be_gfx6_gfx7(NAME#"_e64").Pfl>; } } // End AssemblerPredicate = isGFX7Only, DecoderNamespace = "GFX7" multiclass VOP3_Real_gfx7_gfx10 op> : VOP3_Real_gfx7, VOP3_Real_gfx10; multiclass VOP3be_Real_gfx7_gfx10 op> : VOP3be_Real_gfx7, VOP3be_Real_gfx10; defm V_QSAD_PK_U16_U8 : VOP3_Real_gfx7_gfx10<0x172>; defm V_MQSAD_U32_U8 : VOP3_Real_gfx7_gfx10<0x175>; defm V_MAD_U64_U32 : VOP3be_Real_gfx7_gfx10<0x176>; defm V_MAD_I64_I32 : VOP3be_Real_gfx7_gfx10<0x177>; //===----------------------------------------------------------------------===// // GFX6, GFX7, GFX10. //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in { multiclass VOP3_Real_gfx6_gfx7 op> { def _gfx6_gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3e_gfx6_gfx7(NAME#"_e64").Pfl>; } multiclass VOP3be_Real_gfx6_gfx7 op> { def _gfx6_gfx7 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.SI>, VOP3be_gfx6_gfx7(NAME#"_e64").Pfl>; } } // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" multiclass VOP3_Real_gfx6_gfx7_gfx10 op> : VOP3_Real_gfx6_gfx7, VOP3_Real_gfx10; multiclass VOP3be_Real_gfx6_gfx7_gfx10 op> : VOP3be_Real_gfx6_gfx7, VOP3be_Real_gfx10; defm V_LSHL_B64 : VOP3_Real_gfx6_gfx7<0x161>; defm V_LSHR_B64 : VOP3_Real_gfx6_gfx7<0x162>; defm V_ASHR_I64 : VOP3_Real_gfx6_gfx7<0x163>; defm V_MUL_LO_I32 : VOP3_Real_gfx6_gfx7<0x16b>; defm V_MAD_LEGACY_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x140>; defm V_MAD_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x141>; defm V_MAD_I32_I24 : VOP3_Real_gfx6_gfx7_gfx10<0x142>; defm V_MAD_U32_U24 : VOP3_Real_gfx6_gfx7_gfx10<0x143>; defm V_CUBEID_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x144>; defm V_CUBESC_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x145>; defm V_CUBETC_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x146>; defm V_CUBEMA_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x147>; defm V_BFE_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x148>; defm V_BFE_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x149>; defm V_BFI_B32 : VOP3_Real_gfx6_gfx7_gfx10<0x14a>; defm V_FMA_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x14b>; defm V_FMA_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x14c>; defm V_LERP_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x14d>; defm V_ALIGNBIT_B32 : VOP3_Real_gfx6_gfx7_gfx10<0x14e>; defm V_ALIGNBYTE_B32 : VOP3_Real_gfx6_gfx7_gfx10<0x14f>; defm V_MULLIT_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x150>; defm V_MIN3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x151>; defm V_MIN3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x152>; defm V_MIN3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x153>; defm V_MAX3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x154>; defm V_MAX3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x155>; defm V_MAX3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x156>; defm V_MED3_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x157>; defm V_MED3_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x158>; defm V_MED3_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x159>; defm V_SAD_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x15a>; defm V_SAD_HI_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x15b>; defm V_SAD_U16 : VOP3_Real_gfx6_gfx7_gfx10<0x15c>; defm V_SAD_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x15d>; defm V_CVT_PK_U8_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x15e>; defm V_DIV_FIXUP_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x15f>; defm V_DIV_FIXUP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x160>; defm V_ADD_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x164>; defm V_MUL_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x165>; defm V_MIN_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x166>; defm V_MAX_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x167>; defm V_LDEXP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x168>; defm V_MUL_LO_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x169>; defm V_MUL_HI_U32 : VOP3_Real_gfx6_gfx7_gfx10<0x16a>; defm V_MUL_HI_I32 : VOP3_Real_gfx6_gfx7_gfx10<0x16c>; defm V_DIV_FMAS_F32 : VOP3_Real_gfx6_gfx7_gfx10<0x16f>; defm V_DIV_FMAS_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x170>; defm V_MSAD_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x171>; defm V_MQSAD_PK_U16_U8 : VOP3_Real_gfx6_gfx7_gfx10<0x173>; defm V_TRIG_PREOP_F64 : VOP3_Real_gfx6_gfx7_gfx10<0x174>; defm V_DIV_SCALE_F32 : VOP3be_Real_gfx6_gfx7_gfx10<0x16d>; defm V_DIV_SCALE_F64 : VOP3be_Real_gfx6_gfx7_gfx10<0x16e>; // NB: Same opcode as v_mad_legacy_f32 let DecoderNamespace = "GFX10_B" in defm V_FMA_LEGACY_F32 : VOP3_Real_gfx10<0x140>; //===----------------------------------------------------------------------===// // GFX8, GFX9 (VI). //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in { multiclass VOP3_Real_vi op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3e_vi (NAME#"_e64").Pfl>; } multiclass VOP3_Real_No_Suffix_vi op> { def _vi : VOP3_Real(NAME), SIEncodingFamily.VI>, VOP3e_vi (NAME).Pfl>; } multiclass VOP3be_Real_vi op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3be_vi (NAME#"_e64").Pfl>; } multiclass VOP3OpSel_Real_gfx9 op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3OpSel_gfx9 (NAME#"_e64").Pfl>; } multiclass VOP3Interp_Real_vi op> { def _vi : VOP3_Real(NAME), SIEncodingFamily.VI>, VOP3Interp_vi (NAME).Pfl>; } } // End AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" let AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" in { multiclass VOP3_F16_Real_vi op> { def _vi : VOP3_Real(NAME#"_e64"), SIEncodingFamily.VI>, VOP3e_vi (NAME#"_e64").Pfl>; } multiclass VOP3Interp_F16_Real_vi op> { def _vi : VOP3_Real(NAME), SIEncodingFamily.VI>, VOP3Interp_vi (NAME).Pfl>; } } // End AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in { multiclass VOP3_F16_Real_gfx9 op, string OpName, string AsmName> { def _gfx9 : VOP3_Real(OpName#"_e64"), SIEncodingFamily.GFX9>, VOP3e_vi (OpName#"_e64").Pfl> { VOP3_Pseudo ps = !cast(OpName#"_e64"); let AsmString = AsmName # ps.AsmOperands; } } multiclass VOP3OpSel_F16_Real_gfx9 op, string AsmName> { def _gfx9 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX9>, VOP3OpSel_gfx9 (NAME#"_e64").Pfl> { VOP3_Pseudo ps = !cast(NAME#"_e64"); let AsmString = AsmName # ps.AsmOperands; } } multiclass VOP3Interp_F16_Real_gfx9 op, string OpName, string AsmName> { def _gfx9 : VOP3_Real(OpName), SIEncodingFamily.GFX9>, VOP3Interp_vi (OpName).Pfl> { VOP3_Pseudo ps = !cast(OpName); let AsmString = AsmName # ps.AsmOperands; } } multiclass VOP3_Real_gfx9 op, string AsmName> { def _gfx9 : VOP3_Real(NAME#"_e64"), SIEncodingFamily.GFX9>, VOP3e_vi (NAME#"_e64").Pfl> { VOP_Pseudo ps = !cast(NAME#"_e64"); let AsmString = AsmName # ps.AsmOperands; } } } // End AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" defm V_MAD_U64_U32 : VOP3be_Real_vi <0x1E8>; defm V_MAD_I64_I32 : VOP3be_Real_vi <0x1E9>; defm V_MAD_LEGACY_F32 : VOP3_Real_vi <0x1c0>; defm V_MAD_F32 : VOP3_Real_vi <0x1c1>; defm V_MAD_I32_I24 : VOP3_Real_vi <0x1c2>; defm V_MAD_U32_U24 : VOP3_Real_vi <0x1c3>; defm V_CUBEID_F32 : VOP3_Real_vi <0x1c4>; defm V_CUBESC_F32 : VOP3_Real_vi <0x1c5>; defm V_CUBETC_F32 : VOP3_Real_vi <0x1c6>; defm V_CUBEMA_F32 : VOP3_Real_vi <0x1c7>; defm V_BFE_U32 : VOP3_Real_vi <0x1c8>; defm V_BFE_I32 : VOP3_Real_vi <0x1c9>; defm V_BFI_B32 : VOP3_Real_vi <0x1ca>; defm V_FMA_F32 : VOP3_Real_vi <0x1cb>; defm V_FMA_F64 : VOP3_Real_vi <0x1cc>; defm V_LERP_U8 : VOP3_Real_vi <0x1cd>; defm V_ALIGNBIT_B32 : VOP3_Real_vi <0x1ce>; defm V_ALIGNBYTE_B32 : VOP3_Real_vi <0x1cf>; defm V_MIN3_F32 : VOP3_Real_vi <0x1d0>; defm V_MIN3_I32 : VOP3_Real_vi <0x1d1>; defm V_MIN3_U32 : VOP3_Real_vi <0x1d2>; defm V_MAX3_F32 : VOP3_Real_vi <0x1d3>; defm V_MAX3_I32 : VOP3_Real_vi <0x1d4>; defm V_MAX3_U32 : VOP3_Real_vi <0x1d5>; defm V_MED3_F32 : VOP3_Real_vi <0x1d6>; defm V_MED3_I32 : VOP3_Real_vi <0x1d7>; defm V_MED3_U32 : VOP3_Real_vi <0x1d8>; defm V_SAD_U8 : VOP3_Real_vi <0x1d9>; defm V_SAD_HI_U8 : VOP3_Real_vi <0x1da>; defm V_SAD_U16 : VOP3_Real_vi <0x1db>; defm V_SAD_U32 : VOP3_Real_vi <0x1dc>; defm V_CVT_PK_U8_F32 : VOP3_Real_vi <0x1dd>; defm V_DIV_FIXUP_F32 : VOP3_Real_vi <0x1de>; defm V_DIV_FIXUP_F64 : VOP3_Real_vi <0x1df>; defm V_DIV_SCALE_F32 : VOP3be_Real_vi <0x1e0>; defm V_DIV_SCALE_F64 : VOP3be_Real_vi <0x1e1>; defm V_DIV_FMAS_F32 : VOP3_Real_vi <0x1e2>; defm V_DIV_FMAS_F64 : VOP3_Real_vi <0x1e3>; defm V_MSAD_U8 : VOP3_Real_vi <0x1e4>; defm V_QSAD_PK_U16_U8 : VOP3_Real_vi <0x1e5>; defm V_MQSAD_PK_U16_U8 : VOP3_Real_vi <0x1e6>; defm V_MQSAD_U32_U8 : VOP3_Real_vi <0x1e7>; defm V_PERM_B32 : VOP3_Real_vi <0x1ed>; defm V_MAD_F16 : VOP3_F16_Real_vi <0x1ea>; defm V_MAD_U16 : VOP3_F16_Real_vi <0x1eb>; defm V_MAD_I16 : VOP3_F16_Real_vi <0x1ec>; defm V_FMA_F16 : VOP3_F16_Real_vi <0x1ee>; defm V_DIV_FIXUP_F16 : VOP3_F16_Real_vi <0x1ef>; defm V_INTERP_P2_F16 : VOP3Interp_F16_Real_vi <0x276>; let FPDPRounding = 1 in { defm V_MAD_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ea, "V_MAD_F16", "v_mad_legacy_f16">; defm V_FMA_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ee, "V_FMA_F16", "v_fma_legacy_f16">; defm V_DIV_FIXUP_LEGACY_F16 : VOP3_F16_Real_gfx9 <0x1ef, "V_DIV_FIXUP_F16", "v_div_fixup_legacy_f16">; defm V_INTERP_P2_LEGACY_F16 : VOP3Interp_F16_Real_gfx9 <0x276, "V_INTERP_P2_F16", "v_interp_p2_legacy_f16">; } // End FPDPRounding = 1 defm V_MAD_LEGACY_U16 : VOP3_F16_Real_gfx9 <0x1eb, "V_MAD_U16", "v_mad_legacy_u16">; defm V_MAD_LEGACY_I16 : VOP3_F16_Real_gfx9 <0x1ec, "V_MAD_I16", "v_mad_legacy_i16">; defm V_MAD_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x203, "v_mad_f16">; defm V_MAD_U16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x204, "v_mad_u16">; defm V_MAD_I16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x205, "v_mad_i16">; defm V_FMA_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x206, "v_fma_f16">; defm V_DIV_FIXUP_F16_gfx9 : VOP3OpSel_F16_Real_gfx9 <0x207, "v_div_fixup_f16">; defm V_INTERP_P2_F16_gfx9 : VOP3Interp_F16_Real_gfx9 <0x277, "V_INTERP_P2_F16_gfx9", "v_interp_p2_f16">; defm V_ADD_I32 : VOP3_Real_vi <0x29c>; defm V_SUB_I32 : VOP3_Real_vi <0x29d>; defm V_INTERP_P1_F32_e64 : VOP3Interp_Real_vi <0x270>; defm V_INTERP_P2_F32_e64 : VOP3Interp_Real_vi <0x271>; defm V_INTERP_MOV_F32_e64 : VOP3Interp_Real_vi <0x272>; defm V_INTERP_P1LL_F16 : VOP3Interp_Real_vi <0x274>; defm V_INTERP_P1LV_F16 : VOP3Interp_Real_vi <0x275>; defm V_ADD_F64 : VOP3_Real_vi <0x280>; defm V_MUL_F64 : VOP3_Real_vi <0x281>; defm V_MIN_F64 : VOP3_Real_vi <0x282>; defm V_MAX_F64 : VOP3_Real_vi <0x283>; defm V_LDEXP_F64 : VOP3_Real_vi <0x284>; defm V_MUL_LO_U32 : VOP3_Real_vi <0x285>; // removed from VI as identical to V_MUL_LO_U32 let isAsmParserOnly = 1 in { defm V_MUL_LO_I32 : VOP3_Real_vi <0x285>; } defm V_MUL_HI_U32 : VOP3_Real_vi <0x286>; defm V_MUL_HI_I32 : VOP3_Real_vi <0x287>; defm V_READLANE_B32 : VOP3_Real_No_Suffix_vi <0x289>; defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_vi <0x28a>; defm V_LSHLREV_B64 : VOP3_Real_vi <0x28f>; defm V_LSHRREV_B64 : VOP3_Real_vi <0x290>; defm V_ASHRREV_I64 : VOP3_Real_vi <0x291>; defm V_TRIG_PREOP_F64 : VOP3_Real_vi <0x292>; defm V_LSHL_ADD_U32 : VOP3_Real_vi <0x1fd>; defm V_ADD_LSHL_U32 : VOP3_Real_vi <0x1fe>; defm V_ADD3_U32 : VOP3_Real_vi <0x1ff>; defm V_LSHL_OR_B32 : VOP3_Real_vi <0x200>; defm V_AND_OR_B32 : VOP3_Real_vi <0x201>; defm V_OR3_B32 : VOP3_Real_vi <0x202>; defm V_PACK_B32_F16 : VOP3OpSel_Real_gfx9 <0x2a0>; defm V_XAD_U32 : VOP3_Real_vi <0x1f3>; defm V_MIN3_F16 : VOP3OpSel_Real_gfx9 <0x1f4>; defm V_MIN3_I16 : VOP3OpSel_Real_gfx9 <0x1f5>; defm V_MIN3_U16 : VOP3OpSel_Real_gfx9 <0x1f6>; defm V_MAX3_F16 : VOP3OpSel_Real_gfx9 <0x1f7>; defm V_MAX3_I16 : VOP3OpSel_Real_gfx9 <0x1f8>; defm V_MAX3_U16 : VOP3OpSel_Real_gfx9 <0x1f9>; defm V_MED3_F16 : VOP3OpSel_Real_gfx9 <0x1fa>; defm V_MED3_I16 : VOP3OpSel_Real_gfx9 <0x1fb>; defm V_MED3_U16 : VOP3OpSel_Real_gfx9 <0x1fc>; defm V_ADD_I16 : VOP3OpSel_Real_gfx9 <0x29e>; defm V_SUB_I16 : VOP3OpSel_Real_gfx9 <0x29f>; defm V_MAD_U32_U16 : VOP3OpSel_Real_gfx9 <0x1f1>; defm V_MAD_I32_I16 : VOP3OpSel_Real_gfx9 <0x1f2>; defm V_CVT_PKNORM_I16_F16 : VOP3OpSel_Real_gfx9 <0x299>; defm V_CVT_PKNORM_U16_F16 : VOP3OpSel_Real_gfx9 <0x29a>;