//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "RISCVMatInt.h" #include "MCTargetDesc/RISCVMCTargetDesc.h" #include "llvm/ADT/APInt.h" #include "llvm/Support/MathExtras.h" namespace llvm { namespace RISCVMatInt { void generateInstSeq(int64_t Val, bool IsRV64, InstSeq &Res) { if (isInt<32>(Val)) { // Depending on the active bits in the immediate Value v, the following // instruction sequences are emitted: // // v == 0 : ADDI // v[0,12) != 0 && v[12,32) == 0 : ADDI // v[0,12) == 0 && v[12,32) != 0 : LUI // v[0,32) != 0 : LUI+ADDI(W) int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF; int64_t Lo12 = SignExtend64<12>(Val); if (Hi20) Res.push_back(Inst(RISCV::LUI, Hi20)); if (Lo12 || Hi20 == 0) { unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI; Res.push_back(Inst(AddiOpc, Lo12)); } return; } assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target"); // In the worst case, for a full 64-bit constant, a sequence of 8 instructions // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emmitted. Note // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits // while the following ADDI instructions contribute up to 12 bits each. // // On the first glance, implementing this seems to be possible by simply // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the // fact that ADDI performs a sign extended addition, doing it like that would // only be possible when at most 11 bits of the ADDI instructions are used. // Using all 12 bits of the ADDI instructions, like done by GAS, actually // requires that the constant is processed starting with the least significant // bit. // // In the following, constants are processed from LSB to MSB but instruction // emission is performed from MSB to LSB by recursively calling // generateInstSeq. In each recursion, first the lowest 12 bits are removed // from the constant and the optimal shift amount, which can be greater than // 12 bits if the constant is sparse, is determined. Then, the shifted // remaining constant is processed recursively and gets emitted as soon as it // fits into 32 bits. The emission of the shifts and additions is subsequently // performed when the recursion returns. int64_t Lo12 = SignExtend64<12>(Val); int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12; int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52); Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount); generateInstSeq(Hi52, IsRV64, Res); Res.push_back(Inst(RISCV::SLLI, ShiftAmount)); if (Lo12) Res.push_back(Inst(RISCV::ADDI, Lo12)); } int getIntMatCost(const APInt &Val, unsigned Size, bool IsRV64) { int PlatRegSize = IsRV64 ? 64 : 32; // Split the constant into platform register sized chunks, and calculate cost // of each chunk. int Cost = 0; for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) { APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize); InstSeq MatSeq; generateInstSeq(Chunk.getSExtValue(), IsRV64, MatSeq); Cost += MatSeq.size(); } return std::max(1, Cost); } } // namespace RISCVMatInt } // namespace llvm