| //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| /// \file |
| /// This file implements the targeting of the InstructionSelector class for |
| /// AArch64. |
| /// \todo This should be generated by TableGen. |
| //===----------------------------------------------------------------------===// |
| |
| #include "AArch64InstrInfo.h" |
| #include "AArch64MachineFunctionInfo.h" |
| #include "AArch64RegisterBankInfo.h" |
| #include "AArch64RegisterInfo.h" |
| #include "AArch64Subtarget.h" |
| #include "AArch64TargetMachine.h" |
| #include "MCTargetDesc/AArch64AddressingModes.h" |
| #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" |
| #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" |
| #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| #include "llvm/CodeGen/GlobalISel/Utils.h" |
| #include "llvm/CodeGen/MachineBasicBlock.h" |
| #include "llvm/CodeGen/MachineFunction.h" |
| #include "llvm/CodeGen/MachineInstr.h" |
| #include "llvm/CodeGen/MachineInstrBuilder.h" |
| #include "llvm/CodeGen/MachineOperand.h" |
| #include "llvm/CodeGen/MachineRegisterInfo.h" |
| #include "llvm/IR/Type.h" |
| #include "llvm/Support/Debug.h" |
| #include "llvm/Support/raw_ostream.h" |
| |
| #define DEBUG_TYPE "aarch64-isel" |
| |
| using namespace llvm; |
| |
| namespace { |
| |
| #define GET_GLOBALISEL_PREDICATE_BITSET |
| #include "AArch64GenGlobalISel.inc" |
| #undef GET_GLOBALISEL_PREDICATE_BITSET |
| |
| class AArch64InstructionSelector : public InstructionSelector { |
| public: |
| AArch64InstructionSelector(const AArch64TargetMachine &TM, |
| const AArch64Subtarget &STI, |
| const AArch64RegisterBankInfo &RBI); |
| |
| bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override; |
| static const char *getName() { return DEBUG_TYPE; } |
| |
| private: |
| /// tblgen-erated 'select' implementation, used as the initial selector for |
| /// the patterns that don't require complex C++. |
| bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; |
| |
| bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF, |
| MachineRegisterInfo &MRI) const; |
| bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF, |
| MachineRegisterInfo &MRI) const; |
| |
| bool selectCompareBranch(MachineInstr &I, MachineFunction &MF, |
| MachineRegisterInfo &MRI) const; |
| |
| ComplexRendererFns selectArithImmed(MachineOperand &Root) const; |
| |
| ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root, |
| unsigned Size) const; |
| |
| ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const { |
| return selectAddrModeUnscaled(Root, 1); |
| } |
| ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const { |
| return selectAddrModeUnscaled(Root, 2); |
| } |
| ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const { |
| return selectAddrModeUnscaled(Root, 4); |
| } |
| ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const { |
| return selectAddrModeUnscaled(Root, 8); |
| } |
| ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const { |
| return selectAddrModeUnscaled(Root, 16); |
| } |
| |
| ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root, |
| unsigned Size) const; |
| template <int Width> |
| ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const { |
| return selectAddrModeIndexed(Root, Width / 8); |
| } |
| |
| void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI) const; |
| |
| // Materialize a GlobalValue or BlockAddress using a movz+movk sequence. |
| void materializeLargeCMVal(MachineInstr &I, const Value *V, |
| unsigned char OpFlags) const; |
| |
| const AArch64TargetMachine &TM; |
| const AArch64Subtarget &STI; |
| const AArch64InstrInfo &TII; |
| const AArch64RegisterInfo &TRI; |
| const AArch64RegisterBankInfo &RBI; |
| |
| #define GET_GLOBALISEL_PREDICATES_DECL |
| #include "AArch64GenGlobalISel.inc" |
| #undef GET_GLOBALISEL_PREDICATES_DECL |
| |
| // We declare the temporaries used by selectImpl() in the class to minimize the |
| // cost of constructing placeholder values. |
| #define GET_GLOBALISEL_TEMPORARIES_DECL |
| #include "AArch64GenGlobalISel.inc" |
| #undef GET_GLOBALISEL_TEMPORARIES_DECL |
| }; |
| |
| } // end anonymous namespace |
| |
| #define GET_GLOBALISEL_IMPL |
| #include "AArch64GenGlobalISel.inc" |
| #undef GET_GLOBALISEL_IMPL |
| |
| AArch64InstructionSelector::AArch64InstructionSelector( |
| const AArch64TargetMachine &TM, const AArch64Subtarget &STI, |
| const AArch64RegisterBankInfo &RBI) |
| : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()), |
| TRI(*STI.getRegisterInfo()), RBI(RBI), |
| #define GET_GLOBALISEL_PREDICATES_INIT |
| #include "AArch64GenGlobalISel.inc" |
| #undef GET_GLOBALISEL_PREDICATES_INIT |
| #define GET_GLOBALISEL_TEMPORARIES_INIT |
| #include "AArch64GenGlobalISel.inc" |
| #undef GET_GLOBALISEL_TEMPORARIES_INIT |
| { |
| } |
| |
| // FIXME: This should be target-independent, inferred from the types declared |
| // for each class in the bank. |
| static const TargetRegisterClass * |
| getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB, |
| const RegisterBankInfo &RBI, |
| bool GetAllRegSet = false) { |
| if (RB.getID() == AArch64::GPRRegBankID) { |
| if (Ty.getSizeInBits() <= 32) |
| return GetAllRegSet ? &AArch64::GPR32allRegClass |
| : &AArch64::GPR32RegClass; |
| if (Ty.getSizeInBits() == 64) |
| return GetAllRegSet ? &AArch64::GPR64allRegClass |
| : &AArch64::GPR64RegClass; |
| return nullptr; |
| } |
| |
| if (RB.getID() == AArch64::FPRRegBankID) { |
| if (Ty.getSizeInBits() <= 16) |
| return &AArch64::FPR16RegClass; |
| if (Ty.getSizeInBits() == 32) |
| return &AArch64::FPR32RegClass; |
| if (Ty.getSizeInBits() == 64) |
| return &AArch64::FPR64RegClass; |
| if (Ty.getSizeInBits() == 128) |
| return &AArch64::FPR128RegClass; |
| return nullptr; |
| } |
| |
| return nullptr; |
| } |
| |
| /// Check whether \p I is a currently unsupported binary operation: |
| /// - it has an unsized type |
| /// - an operand is not a vreg |
| /// - all operands are not in the same bank |
| /// These are checks that should someday live in the verifier, but right now, |
| /// these are mostly limitations of the aarch64 selector. |
| static bool unsupportedBinOp(const MachineInstr &I, |
| const AArch64RegisterBankInfo &RBI, |
| const MachineRegisterInfo &MRI, |
| const AArch64RegisterInfo &TRI) { |
| LLT Ty = MRI.getType(I.getOperand(0).getReg()); |
| if (!Ty.isValid()) { |
| LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n"); |
| return true; |
| } |
| |
| const RegisterBank *PrevOpBank = nullptr; |
| for (auto &MO : I.operands()) { |
| // FIXME: Support non-register operands. |
| if (!MO.isReg()) { |
| LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n"); |
| return true; |
| } |
| |
| // FIXME: Can generic operations have physical registers operands? If |
| // so, this will need to be taught about that, and we'll need to get the |
| // bank out of the minimal class for the register. |
| // Either way, this needs to be documented (and possibly verified). |
| if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) { |
| LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n"); |
| return true; |
| } |
| |
| const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI); |
| if (!OpBank) { |
| LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n"); |
| return true; |
| } |
| |
| if (PrevOpBank && OpBank != PrevOpBank) { |
| LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n"); |
| return true; |
| } |
| PrevOpBank = OpBank; |
| } |
| return false; |
| } |
| |
| /// Select the AArch64 opcode for the basic binary operation \p GenericOpc |
| /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID |
| /// and of size \p OpSize. |
| /// \returns \p GenericOpc if the combination is unsupported. |
| static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, |
| unsigned OpSize) { |
| switch (RegBankID) { |
| case AArch64::GPRRegBankID: |
| if (OpSize == 32) { |
| switch (GenericOpc) { |
| case TargetOpcode::G_SHL: |
| return AArch64::LSLVWr; |
| case TargetOpcode::G_LSHR: |
| return AArch64::LSRVWr; |
| case TargetOpcode::G_ASHR: |
| return AArch64::ASRVWr; |
| default: |
| return GenericOpc; |
| } |
| } else if (OpSize == 64) { |
| switch (GenericOpc) { |
| case TargetOpcode::G_GEP: |
| return AArch64::ADDXrr; |
| case TargetOpcode::G_SHL: |
| return AArch64::LSLVXr; |
| case TargetOpcode::G_LSHR: |
| return AArch64::LSRVXr; |
| case TargetOpcode::G_ASHR: |
| return AArch64::ASRVXr; |
| default: |
| return GenericOpc; |
| } |
| } |
| break; |
| case AArch64::FPRRegBankID: |
| switch (OpSize) { |
| case 32: |
| switch (GenericOpc) { |
| case TargetOpcode::G_FADD: |
| return AArch64::FADDSrr; |
| case TargetOpcode::G_FSUB: |
| return AArch64::FSUBSrr; |
| case TargetOpcode::G_FMUL: |
| return AArch64::FMULSrr; |
| case TargetOpcode::G_FDIV: |
| return AArch64::FDIVSrr; |
| default: |
| return GenericOpc; |
| } |
| case 64: |
| switch (GenericOpc) { |
| case TargetOpcode::G_FADD: |
| return AArch64::FADDDrr; |
| case TargetOpcode::G_FSUB: |
| return AArch64::FSUBDrr; |
| case TargetOpcode::G_FMUL: |
| return AArch64::FMULDrr; |
| case TargetOpcode::G_FDIV: |
| return AArch64::FDIVDrr; |
| case TargetOpcode::G_OR: |
| return AArch64::ORRv8i8; |
| default: |
| return GenericOpc; |
| } |
| } |
| break; |
| } |
| return GenericOpc; |
| } |
| |
| /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc, |
| /// appropriate for the (value) register bank \p RegBankID and of memory access |
| /// size \p OpSize. This returns the variant with the base+unsigned-immediate |
| /// addressing mode (e.g., LDRXui). |
| /// \returns \p GenericOpc if the combination is unsupported. |
| static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID, |
| unsigned OpSize) { |
| const bool isStore = GenericOpc == TargetOpcode::G_STORE; |
| switch (RegBankID) { |
| case AArch64::GPRRegBankID: |
| switch (OpSize) { |
| case 8: |
| return isStore ? AArch64::STRBBui : AArch64::LDRBBui; |
| case 16: |
| return isStore ? AArch64::STRHHui : AArch64::LDRHHui; |
| case 32: |
| return isStore ? AArch64::STRWui : AArch64::LDRWui; |
| case 64: |
| return isStore ? AArch64::STRXui : AArch64::LDRXui; |
| } |
| break; |
| case AArch64::FPRRegBankID: |
| switch (OpSize) { |
| case 8: |
| return isStore ? AArch64::STRBui : AArch64::LDRBui; |
| case 16: |
| return isStore ? AArch64::STRHui : AArch64::LDRHui; |
| case 32: |
| return isStore ? AArch64::STRSui : AArch64::LDRSui; |
| case 64: |
| return isStore ? AArch64::STRDui : AArch64::LDRDui; |
| } |
| break; |
| } |
| return GenericOpc; |
| } |
| |
| static bool selectFP16CopyFromGPR32(MachineInstr &I, const TargetInstrInfo &TII, |
| MachineRegisterInfo &MRI, unsigned SrcReg) { |
| // Copies from gpr32 to fpr16 need to use a sub-register copy. |
| unsigned CopyReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); |
| BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::COPY)) |
| .addDef(CopyReg) |
| .addUse(SrcReg); |
| unsigned SubRegCopy = MRI.createVirtualRegister(&AArch64::FPR16RegClass); |
| BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY)) |
| .addDef(SubRegCopy) |
| .addUse(CopyReg, 0, AArch64::hsub); |
| |
| MachineOperand &RegOp = I.getOperand(1); |
| RegOp.setReg(SubRegCopy); |
| return true; |
| } |
| |
| static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, |
| MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, |
| const RegisterBankInfo &RBI) { |
| |
| unsigned DstReg = I.getOperand(0).getReg(); |
| unsigned SrcReg = I.getOperand(1).getReg(); |
| |
| if (TargetRegisterInfo::isPhysicalRegister(DstReg)) { |
| if (TRI.getRegClass(AArch64::FPR16RegClassID)->contains(DstReg) && |
| !TargetRegisterInfo::isPhysicalRegister(SrcReg)) { |
| const RegisterBank &RegBank = *RBI.getRegBank(SrcReg, MRI, TRI); |
| const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank( |
| MRI.getType(SrcReg), RegBank, RBI, /* GetAllRegSet */ true); |
| if (SrcRC == &AArch64::GPR32allRegClass) |
| return selectFP16CopyFromGPR32(I, TII, MRI, SrcReg); |
| } |
| assert(I.isCopy() && "Generic operators do not allow physical registers"); |
| return true; |
| } |
| |
| const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI); |
| const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); |
| (void)DstSize; |
| const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); |
| (void)SrcSize; |
| assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) && |
| "No phys reg on generic operators"); |
| assert( |
| (DstSize == SrcSize || |
| // Copies are a mean to setup initial types, the number of |
| // bits may not exactly match. |
| (TargetRegisterInfo::isPhysicalRegister(SrcReg) && |
| DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) || |
| // Copies are a mean to copy bits around, as long as we are |
| // on the same register class, that's fine. Otherwise, that |
| // means we need some SUBREG_TO_REG or AND & co. |
| (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) && |
| "Copy with different width?!"); |
| assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) && |
| "GPRs cannot get more than 64-bit width values"); |
| |
| const TargetRegisterClass *RC = getRegClassForTypeOnBank( |
| MRI.getType(DstReg), RegBank, RBI, /* GetAllRegSet */ true); |
| if (!RC) { |
| LLVM_DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n'); |
| return false; |
| } |
| |
| if (!TargetRegisterInfo::isPhysicalRegister(SrcReg)) { |
| const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(SrcReg); |
| const TargetRegisterClass *SrcRC = |
| RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); |
| const RegisterBank *RB = nullptr; |
| if (!SrcRC) { |
| RB = RegClassOrBank.get<const RegisterBank *>(); |
| SrcRC = getRegClassForTypeOnBank(MRI.getType(SrcReg), *RB, RBI, true); |
| } |
| // Copies from fpr16 to gpr32 need to use SUBREG_TO_REG. |
| if (RC == &AArch64::GPR32allRegClass && SrcRC == &AArch64::FPR16RegClass) { |
| unsigned PromoteReg = MRI.createVirtualRegister(&AArch64::FPR32RegClass); |
| BuildMI(*I.getParent(), I, I.getDebugLoc(), |
| TII.get(AArch64::SUBREG_TO_REG)) |
| .addDef(PromoteReg) |
| .addImm(0) |
| .addUse(SrcReg) |
| .addImm(AArch64::hsub); |
| MachineOperand &RegOp = I.getOperand(1); |
| RegOp.setReg(PromoteReg); |
| } else if (RC == &AArch64::FPR16RegClass && |
| SrcRC == &AArch64::GPR32allRegClass) { |
| selectFP16CopyFromGPR32(I, TII, MRI, SrcReg); |
| } |
| } |
| |
| // No need to constrain SrcReg. It will get constrained when |
| // we hit another of its use or its defs. |
| // Copies do not have constraints. |
| if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { |
| LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) |
| << " operand\n"); |
| return false; |
| } |
| I.setDesc(TII.get(AArch64::COPY)); |
| return true; |
| } |
| |
| static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) { |
| if (!DstTy.isScalar() || !SrcTy.isScalar()) |
| return GenericOpc; |
| |
| const unsigned DstSize = DstTy.getSizeInBits(); |
| const unsigned SrcSize = SrcTy.getSizeInBits(); |
| |
| switch (DstSize) { |
| case 32: |
| switch (SrcSize) { |
| case 32: |
| switch (GenericOpc) { |
| case TargetOpcode::G_SITOFP: |
| return AArch64::SCVTFUWSri; |
| case TargetOpcode::G_UITOFP: |
| return AArch64::UCVTFUWSri; |
| case TargetOpcode::G_FPTOSI: |
| return AArch64::FCVTZSUWSr; |
| case TargetOpcode::G_FPTOUI: |
| return AArch64::FCVTZUUWSr; |
| default: |
| return GenericOpc; |
| } |
| case 64: |
| switch (GenericOpc) { |
| case TargetOpcode::G_SITOFP: |
| return AArch64::SCVTFUXSri; |
| case TargetOpcode::G_UITOFP: |
| return AArch64::UCVTFUXSri; |
| case TargetOpcode::G_FPTOSI: |
| return AArch64::FCVTZSUWDr; |
| case TargetOpcode::G_FPTOUI: |
| return AArch64::FCVTZUUWDr; |
| default: |
| return GenericOpc; |
| } |
| default: |
| return GenericOpc; |
| } |
| case 64: |
| switch (SrcSize) { |
| case 32: |
| switch (GenericOpc) { |
| case TargetOpcode::G_SITOFP: |
| return AArch64::SCVTFUWDri; |
| case TargetOpcode::G_UITOFP: |
| return AArch64::UCVTFUWDri; |
| case TargetOpcode::G_FPTOSI: |
| return AArch64::FCVTZSUXSr; |
| case TargetOpcode::G_FPTOUI: |
| return AArch64::FCVTZUUXSr; |
| default: |
| return GenericOpc; |
| } |
| case 64: |
| switch (GenericOpc) { |
| case TargetOpcode::G_SITOFP: |
| return AArch64::SCVTFUXDri; |
| case TargetOpcode::G_UITOFP: |
| return AArch64::UCVTFUXDri; |
| case TargetOpcode::G_FPTOSI: |
| return AArch64::FCVTZSUXDr; |
| case TargetOpcode::G_FPTOUI: |
| return AArch64::FCVTZUUXDr; |
| default: |
| return GenericOpc; |
| } |
| default: |
| return GenericOpc; |
| } |
| default: |
| return GenericOpc; |
| }; |
| return GenericOpc; |
| } |
| |
| static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) { |
| switch (P) { |
| default: |
| llvm_unreachable("Unknown condition code!"); |
| case CmpInst::ICMP_NE: |
| return AArch64CC::NE; |
| case CmpInst::ICMP_EQ: |
| return AArch64CC::EQ; |
| case CmpInst::ICMP_SGT: |
| return AArch64CC::GT; |
| case CmpInst::ICMP_SGE: |
| return AArch64CC::GE; |
| case CmpInst::ICMP_SLT: |
| return AArch64CC::LT; |
| case CmpInst::ICMP_SLE: |
| return AArch64CC::LE; |
| case CmpInst::ICMP_UGT: |
| return AArch64CC::HI; |
| case CmpInst::ICMP_UGE: |
| return AArch64CC::HS; |
| case CmpInst::ICMP_ULT: |
| return AArch64CC::LO; |
| case CmpInst::ICMP_ULE: |
| return AArch64CC::LS; |
| } |
| } |
| |
| static void changeFCMPPredToAArch64CC(CmpInst::Predicate P, |
| AArch64CC::CondCode &CondCode, |
| AArch64CC::CondCode &CondCode2) { |
| CondCode2 = AArch64CC::AL; |
| switch (P) { |
| default: |
| llvm_unreachable("Unknown FP condition!"); |
| case CmpInst::FCMP_OEQ: |
| CondCode = AArch64CC::EQ; |
| break; |
| case CmpInst::FCMP_OGT: |
| CondCode = AArch64CC::GT; |
| break; |
| case CmpInst::FCMP_OGE: |
| CondCode = AArch64CC::GE; |
| break; |
| case CmpInst::FCMP_OLT: |
| CondCode = AArch64CC::MI; |
| break; |
| case CmpInst::FCMP_OLE: |
| CondCode = AArch64CC::LS; |
| break; |
| case CmpInst::FCMP_ONE: |
| CondCode = AArch64CC::MI; |
| CondCode2 = AArch64CC::GT; |
| break; |
| case CmpInst::FCMP_ORD: |
| CondCode = AArch64CC::VC; |
| break; |
| case CmpInst::FCMP_UNO: |
| CondCode = AArch64CC::VS; |
| break; |
| case CmpInst::FCMP_UEQ: |
| CondCode = AArch64CC::EQ; |
| CondCode2 = AArch64CC::VS; |
| break; |
| case CmpInst::FCMP_UGT: |
| CondCode = AArch64CC::HI; |
| break; |
| case CmpInst::FCMP_UGE: |
| CondCode = AArch64CC::PL; |
| break; |
| case CmpInst::FCMP_ULT: |
| CondCode = AArch64CC::LT; |
| break; |
| case CmpInst::FCMP_ULE: |
| CondCode = AArch64CC::LE; |
| break; |
| case CmpInst::FCMP_UNE: |
| CondCode = AArch64CC::NE; |
| break; |
| } |
| } |
| |
| bool AArch64InstructionSelector::selectCompareBranch( |
| MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { |
| |
| const unsigned CondReg = I.getOperand(0).getReg(); |
| MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); |
| MachineInstr *CCMI = MRI.getVRegDef(CondReg); |
| if (CCMI->getOpcode() == TargetOpcode::G_TRUNC) |
| CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg()); |
| if (CCMI->getOpcode() != TargetOpcode::G_ICMP) |
| return false; |
| |
| unsigned LHS = CCMI->getOperand(2).getReg(); |
| unsigned RHS = CCMI->getOperand(3).getReg(); |
| if (!getConstantVRegVal(RHS, MRI)) |
| std::swap(RHS, LHS); |
| |
| const auto RHSImm = getConstantVRegVal(RHS, MRI); |
| if (!RHSImm || *RHSImm != 0) |
| return false; |
| |
| const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI); |
| if (RB.getID() != AArch64::GPRRegBankID) |
| return false; |
| |
| const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate(); |
| if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ) |
| return false; |
| |
| const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits(); |
| unsigned CBOpc = 0; |
| if (CmpWidth <= 32) |
| CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW); |
| else if (CmpWidth == 64) |
| CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX); |
| else |
| return false; |
| |
| BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc)) |
| .addUse(LHS) |
| .addMBB(DestMBB) |
| .constrainAllUses(TII, TRI, RBI); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| bool AArch64InstructionSelector::selectVaStartAAPCS( |
| MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { |
| return false; |
| } |
| |
| bool AArch64InstructionSelector::selectVaStartDarwin( |
| MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { |
| AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); |
| unsigned ListReg = I.getOperand(0).getReg(); |
| |
| unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); |
| |
| auto MIB = |
| BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri)) |
| .addDef(ArgsAddrReg) |
| .addFrameIndex(FuncInfo->getVarArgsStackIndex()) |
| .addImm(0) |
| .addImm(0); |
| |
| constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| |
| MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui)) |
| .addUse(ArgsAddrReg) |
| .addUse(ListReg) |
| .addImm(0) |
| .addMemOperand(*I.memoperands_begin()); |
| |
| constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| void AArch64InstructionSelector::materializeLargeCMVal( |
| MachineInstr &I, const Value *V, unsigned char OpFlags) const { |
| MachineBasicBlock &MBB = *I.getParent(); |
| MachineFunction &MF = *MBB.getParent(); |
| MachineRegisterInfo &MRI = MF.getRegInfo(); |
| MachineIRBuilder MIB(I); |
| |
| auto MovZ = MIB.buildInstr(AArch64::MOVZXi, &AArch64::GPR64RegClass); |
| MovZ->addOperand(MF, I.getOperand(1)); |
| MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 | |
| AArch64II::MO_NC); |
| MovZ->addOperand(MF, MachineOperand::CreateImm(0)); |
| constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI); |
| |
| auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset, |
| unsigned ForceDstReg) { |
| unsigned DstReg = ForceDstReg |
| ? ForceDstReg |
| : MRI.createVirtualRegister(&AArch64::GPR64RegClass); |
| auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg); |
| if (auto *GV = dyn_cast<GlobalValue>(V)) { |
| MovI->addOperand(MF, MachineOperand::CreateGA( |
| GV, MovZ->getOperand(1).getOffset(), Flags)); |
| } else { |
| MovI->addOperand( |
| MF, MachineOperand::CreateBA(cast<BlockAddress>(V), |
| MovZ->getOperand(1).getOffset(), Flags)); |
| } |
| MovI->addOperand(MF, MachineOperand::CreateImm(Offset)); |
| constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI); |
| return DstReg; |
| }; |
| unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(), |
| AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0); |
| DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0); |
| BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg()); |
| return; |
| } |
| |
| bool AArch64InstructionSelector::select(MachineInstr &I, |
| CodeGenCoverage &CoverageInfo) const { |
| assert(I.getParent() && "Instruction should be in a basic block!"); |
| assert(I.getParent()->getParent() && "Instruction should be in a function!"); |
| |
| MachineBasicBlock &MBB = *I.getParent(); |
| MachineFunction &MF = *MBB.getParent(); |
| MachineRegisterInfo &MRI = MF.getRegInfo(); |
| |
| unsigned Opcode = I.getOpcode(); |
| // G_PHI requires same handling as PHI |
| if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) { |
| // Certain non-generic instructions also need some special handling. |
| |
| if (Opcode == TargetOpcode::LOAD_STACK_GUARD) |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| |
| if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) { |
| const unsigned DefReg = I.getOperand(0).getReg(); |
| const LLT DefTy = MRI.getType(DefReg); |
| |
| const TargetRegisterClass *DefRC = nullptr; |
| if (TargetRegisterInfo::isPhysicalRegister(DefReg)) { |
| DefRC = TRI.getRegClass(DefReg); |
| } else { |
| const RegClassOrRegBank &RegClassOrBank = |
| MRI.getRegClassOrRegBank(DefReg); |
| |
| DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); |
| if (!DefRC) { |
| if (!DefTy.isValid()) { |
| LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); |
| return false; |
| } |
| const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); |
| DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI); |
| if (!DefRC) { |
| LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); |
| return false; |
| } |
| } |
| } |
| I.setDesc(TII.get(TargetOpcode::PHI)); |
| |
| return RBI.constrainGenericRegister(DefReg, *DefRC, MRI); |
| } |
| |
| if (I.isCopy()) |
| return selectCopy(I, TII, MRI, TRI, RBI); |
| |
| return true; |
| } |
| |
| |
| if (I.getNumOperands() != I.getNumExplicitOperands()) { |
| LLVM_DEBUG( |
| dbgs() << "Generic instruction has unexpected implicit operands\n"); |
| return false; |
| } |
| |
| if (selectImpl(I, CoverageInfo)) |
| return true; |
| |
| LLT Ty = |
| I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{}; |
| |
| switch (Opcode) { |
| case TargetOpcode::G_BRCOND: { |
| if (Ty.getSizeInBits() > 32) { |
| // We shouldn't need this on AArch64, but it would be implemented as an |
| // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the |
| // bit being tested is < 32. |
| LLVM_DEBUG(dbgs() << "G_BRCOND has type: " << Ty |
| << ", expected at most 32-bits"); |
| return false; |
| } |
| |
| const unsigned CondReg = I.getOperand(0).getReg(); |
| MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); |
| |
| if (selectCompareBranch(I, MF, MRI)) |
| return true; |
| |
| auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW)) |
| .addUse(CondReg) |
| .addImm(/*bit offset=*/0) |
| .addMBB(DestMBB); |
| |
| I.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_BRINDIRECT: { |
| I.setDesc(TII.get(AArch64::BR)); |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_FCONSTANT: |
| case TargetOpcode::G_CONSTANT: { |
| const bool isFP = Opcode == TargetOpcode::G_FCONSTANT; |
| |
| const LLT s32 = LLT::scalar(32); |
| const LLT s64 = LLT::scalar(64); |
| const LLT p0 = LLT::pointer(0, 64); |
| |
| const unsigned DefReg = I.getOperand(0).getReg(); |
| const LLT DefTy = MRI.getType(DefReg); |
| const unsigned DefSize = DefTy.getSizeInBits(); |
| const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); |
| |
| // FIXME: Redundant check, but even less readable when factored out. |
| if (isFP) { |
| if (Ty != s32 && Ty != s64) { |
| LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty |
| << " constant, expected: " << s32 << " or " << s64 |
| << '\n'); |
| return false; |
| } |
| |
| if (RB.getID() != AArch64::FPRRegBankID) { |
| LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty |
| << " constant on bank: " << RB |
| << ", expected: FPR\n"); |
| return false; |
| } |
| |
| // The case when we have 0.0 is covered by tablegen. Reject it here so we |
| // can be sure tablegen works correctly and isn't rescued by this code. |
| if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0)) |
| return false; |
| } else { |
| // s32 and s64 are covered by tablegen. |
| if (Ty != p0) { |
| LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty |
| << " constant, expected: " << s32 << ", " << s64 |
| << ", or " << p0 << '\n'); |
| return false; |
| } |
| |
| if (RB.getID() != AArch64::GPRRegBankID) { |
| LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty |
| << " constant on bank: " << RB |
| << ", expected: GPR\n"); |
| return false; |
| } |
| } |
| |
| const unsigned MovOpc = |
| DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm; |
| |
| I.setDesc(TII.get(MovOpc)); |
| |
| if (isFP) { |
| const TargetRegisterClass &GPRRC = |
| DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass; |
| const TargetRegisterClass &FPRRC = |
| DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass; |
| |
| const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC); |
| MachineOperand &RegOp = I.getOperand(0); |
| RegOp.setReg(DefGPRReg); |
| |
| BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(), |
| TII.get(AArch64::COPY)) |
| .addDef(DefReg) |
| .addUse(DefGPRReg); |
| |
| if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) { |
| LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n"); |
| return false; |
| } |
| |
| MachineOperand &ImmOp = I.getOperand(1); |
| // FIXME: Is going through int64_t always correct? |
| ImmOp.ChangeToImmediate( |
| ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); |
| } else if (I.getOperand(1).isCImm()) { |
| uint64_t Val = I.getOperand(1).getCImm()->getZExtValue(); |
| I.getOperand(1).ChangeToImmediate(Val); |
| } else if (I.getOperand(1).isImm()) { |
| uint64_t Val = I.getOperand(1).getImm(); |
| I.getOperand(1).ChangeToImmediate(Val); |
| } |
| |
| constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| return true; |
| } |
| case TargetOpcode::G_EXTRACT: { |
| LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); |
| LLT DstTy = MRI.getType(I.getOperand(0).getReg()); |
| (void)DstTy; |
| unsigned SrcSize = SrcTy.getSizeInBits(); |
| // Larger extracts are vectors, same-size extracts should be something else |
| // by now (either split up or simplified to a COPY). |
| if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32) |
| return false; |
| |
| I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri)); |
| MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() + |
| Ty.getSizeInBits() - 1); |
| |
| if (SrcSize < 64) { |
| assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 && |
| "unexpected G_EXTRACT types"); |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); |
| BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(), |
| TII.get(AArch64::COPY)) |
| .addDef(I.getOperand(0).getReg()) |
| .addUse(DstReg, 0, AArch64::sub_32); |
| RBI.constrainGenericRegister(I.getOperand(0).getReg(), |
| AArch64::GPR32RegClass, MRI); |
| I.getOperand(0).setReg(DstReg); |
| |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_INSERT: { |
| LLT SrcTy = MRI.getType(I.getOperand(2).getReg()); |
| LLT DstTy = MRI.getType(I.getOperand(0).getReg()); |
| unsigned DstSize = DstTy.getSizeInBits(); |
| // Larger inserts are vectors, same-size ones should be something else by |
| // now (split up or turned into COPYs). |
| if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32) |
| return false; |
| |
| I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri)); |
| unsigned LSB = I.getOperand(3).getImm(); |
| unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); |
| I.getOperand(3).setImm((DstSize - LSB) % DstSize); |
| MachineInstrBuilder(MF, I).addImm(Width - 1); |
| |
| if (DstSize < 64) { |
| assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 && |
| "unexpected G_INSERT types"); |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); |
| BuildMI(MBB, I.getIterator(), I.getDebugLoc(), |
| TII.get(AArch64::SUBREG_TO_REG)) |
| .addDef(SrcReg) |
| .addImm(0) |
| .addUse(I.getOperand(2).getReg()) |
| .addImm(AArch64::sub_32); |
| RBI.constrainGenericRegister(I.getOperand(2).getReg(), |
| AArch64::GPR32RegClass, MRI); |
| I.getOperand(2).setReg(SrcReg); |
| |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| case TargetOpcode::G_FRAME_INDEX: { |
| // allocas and G_FRAME_INDEX are only supported in addrspace(0). |
| if (Ty != LLT::pointer(0, 64)) { |
| LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty |
| << ", expected: " << LLT::pointer(0, 64) << '\n'); |
| return false; |
| } |
| I.setDesc(TII.get(AArch64::ADDXri)); |
| |
| // MOs for a #0 shifted immediate. |
| I.addOperand(MachineOperand::CreateImm(0)); |
| I.addOperand(MachineOperand::CreateImm(0)); |
| |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_GLOBAL_VALUE: { |
| auto GV = I.getOperand(1).getGlobal(); |
| if (GV->isThreadLocal()) { |
| // FIXME: we don't support TLS yet. |
| return false; |
| } |
| unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM); |
| if (OpFlags & AArch64II::MO_GOT) { |
| I.setDesc(TII.get(AArch64::LOADgot)); |
| I.getOperand(1).setTargetFlags(OpFlags); |
| } else if (TM.getCodeModel() == CodeModel::Large) { |
| // Materialize the global using movz/movk instructions. |
| materializeLargeCMVal(I, GV, OpFlags); |
| I.eraseFromParent(); |
| return true; |
| } else { |
| I.setDesc(TII.get(AArch64::MOVaddr)); |
| I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE); |
| MachineInstrBuilder MIB(MF, I); |
| MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(), |
| OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); |
| } |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_LOAD: |
| case TargetOpcode::G_STORE: { |
| LLT PtrTy = MRI.getType(I.getOperand(1).getReg()); |
| |
| if (PtrTy != LLT::pointer(0, 64)) { |
| LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy |
| << ", expected: " << LLT::pointer(0, 64) << '\n'); |
| return false; |
| } |
| |
| auto &MemOp = **I.memoperands_begin(); |
| if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { |
| LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n"); |
| return false; |
| } |
| unsigned MemSizeInBits = MemOp.getSize() * 8; |
| |
| // FIXME: PR36018: Volatile loads in some cases are incorrectly selected by |
| // folding with an extend. Until we have a G_SEXTLOAD solution bail out if |
| // we hit one. |
| if (Opcode == TargetOpcode::G_LOAD && MemOp.isVolatile()) |
| return false; |
| |
| const unsigned PtrReg = I.getOperand(1).getReg(); |
| #ifndef NDEBUG |
| const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI); |
| // Sanity-check the pointer register. |
| assert(PtrRB.getID() == AArch64::GPRRegBankID && |
| "Load/Store pointer operand isn't a GPR"); |
| assert(MRI.getType(PtrReg).isPointer() && |
| "Load/Store pointer operand isn't a pointer"); |
| #endif |
| |
| const unsigned ValReg = I.getOperand(0).getReg(); |
| const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI); |
| |
| const unsigned NewOpc = |
| selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits); |
| if (NewOpc == I.getOpcode()) |
| return false; |
| |
| I.setDesc(TII.get(NewOpc)); |
| |
| uint64_t Offset = 0; |
| auto *PtrMI = MRI.getVRegDef(PtrReg); |
| |
| // Try to fold a GEP into our unsigned immediate addressing mode. |
| if (PtrMI->getOpcode() == TargetOpcode::G_GEP) { |
| if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) { |
| int64_t Imm = *COff; |
| const unsigned Size = MemSizeInBits / 8; |
| const unsigned Scale = Log2_32(Size); |
| if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) { |
| unsigned Ptr2Reg = PtrMI->getOperand(1).getReg(); |
| I.getOperand(1).setReg(Ptr2Reg); |
| PtrMI = MRI.getVRegDef(Ptr2Reg); |
| Offset = Imm / Size; |
| } |
| } |
| } |
| |
| // If we haven't folded anything into our addressing mode yet, try to fold |
| // a frame index into the base+offset. |
| if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX) |
| I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex()); |
| |
| I.addOperand(MachineOperand::CreateImm(Offset)); |
| |
| // If we're storing a 0, use WZR/XZR. |
| if (auto CVal = getConstantVRegVal(ValReg, MRI)) { |
| if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) { |
| if (I.getOpcode() == AArch64::STRWui) |
| I.getOperand(0).setReg(AArch64::WZR); |
| else if (I.getOpcode() == AArch64::STRXui) |
| I.getOperand(0).setReg(AArch64::XZR); |
| } |
| } |
| |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_SMULH: |
| case TargetOpcode::G_UMULH: { |
| // Reject the various things we don't support yet. |
| if (unsupportedBinOp(I, RBI, MRI, TRI)) |
| return false; |
| |
| const unsigned DefReg = I.getOperand(0).getReg(); |
| const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); |
| |
| if (RB.getID() != AArch64::GPRRegBankID) { |
| LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n"); |
| return false; |
| } |
| |
| if (Ty != LLT::scalar(64)) { |
| LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty |
| << ", expected: " << LLT::scalar(64) << '\n'); |
| return false; |
| } |
| |
| unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr |
| : AArch64::UMULHrr; |
| I.setDesc(TII.get(NewOpc)); |
| |
| // Now that we selected an opcode, we need to constrain the register |
| // operands to use appropriate classes. |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| case TargetOpcode::G_FADD: |
| case TargetOpcode::G_FSUB: |
| case TargetOpcode::G_FMUL: |
| case TargetOpcode::G_FDIV: |
| |
| case TargetOpcode::G_OR: |
| case TargetOpcode::G_SHL: |
| case TargetOpcode::G_LSHR: |
| case TargetOpcode::G_ASHR: |
| case TargetOpcode::G_GEP: { |
| // Reject the various things we don't support yet. |
| if (unsupportedBinOp(I, RBI, MRI, TRI)) |
| return false; |
| |
| const unsigned OpSize = Ty.getSizeInBits(); |
| |
| const unsigned DefReg = I.getOperand(0).getReg(); |
| const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); |
| |
| const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize); |
| if (NewOpc == I.getOpcode()) |
| return false; |
| |
| I.setDesc(TII.get(NewOpc)); |
| // FIXME: Should the type be always reset in setDesc? |
| |
| // Now that we selected an opcode, we need to constrain the register |
| // operands to use appropriate classes. |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_PTR_MASK: { |
| uint64_t Align = I.getOperand(2).getImm(); |
| if (Align >= 64 || Align == 0) |
| return false; |
| |
| uint64_t Mask = ~((1ULL << Align) - 1); |
| I.setDesc(TII.get(AArch64::ANDXri)); |
| I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64)); |
| |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| case TargetOpcode::G_PTRTOINT: |
| case TargetOpcode::G_TRUNC: { |
| const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); |
| const LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); |
| |
| const unsigned DstReg = I.getOperand(0).getReg(); |
| const unsigned SrcReg = I.getOperand(1).getReg(); |
| |
| const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); |
| const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); |
| |
| if (DstRB.getID() != SrcRB.getID()) { |
| LLVM_DEBUG( |
| dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n"); |
| return false; |
| } |
| |
| if (DstRB.getID() == AArch64::GPRRegBankID) { |
| const TargetRegisterClass *DstRC = |
| getRegClassForTypeOnBank(DstTy, DstRB, RBI); |
| if (!DstRC) |
| return false; |
| |
| const TargetRegisterClass *SrcRC = |
| getRegClassForTypeOnBank(SrcTy, SrcRB, RBI); |
| if (!SrcRC) |
| return false; |
| |
| if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || |
| !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { |
| LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n"); |
| return false; |
| } |
| |
| if (DstRC == SrcRC) { |
| // Nothing to be done |
| } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) && |
| SrcTy == LLT::scalar(64)) { |
| llvm_unreachable("TableGen can import this case"); |
| return false; |
| } else if (DstRC == &AArch64::GPR32RegClass && |
| SrcRC == &AArch64::GPR64RegClass) { |
| I.getOperand(1).setSubReg(AArch64::sub_32); |
| } else { |
| LLVM_DEBUG( |
| dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n"); |
| return false; |
| } |
| |
| I.setDesc(TII.get(TargetOpcode::COPY)); |
| return true; |
| } else if (DstRB.getID() == AArch64::FPRRegBankID) { |
| if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) { |
| I.setDesc(TII.get(AArch64::XTNv4i16)); |
| constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| case TargetOpcode::G_ANYEXT: { |
| const unsigned DstReg = I.getOperand(0).getReg(); |
| const unsigned SrcReg = I.getOperand(1).getReg(); |
| |
| const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI); |
| if (RBDst.getID() != AArch64::GPRRegBankID) { |
| LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst |
| << ", expected: GPR\n"); |
| return false; |
| } |
| |
| const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI); |
| if (RBSrc.getID() != AArch64::GPRRegBankID) { |
| LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc |
| << ", expected: GPR\n"); |
| return false; |
| } |
| |
| const unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); |
| |
| if (DstSize == 0) { |
| LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n"); |
| return false; |
| } |
| |
| if (DstSize != 64 && DstSize > 32) { |
| LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize |
| << ", expected: 32 or 64\n"); |
| return false; |
| } |
| // At this point G_ANYEXT is just like a plain COPY, but we need |
| // to explicitly form the 64-bit value if any. |
| if (DstSize > 32) { |
| unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass); |
| BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG)) |
| .addDef(ExtSrc) |
| .addImm(0) |
| .addUse(SrcReg) |
| .addImm(AArch64::sub_32); |
| I.getOperand(1).setReg(ExtSrc); |
| } |
| return selectCopy(I, TII, MRI, TRI, RBI); |
| } |
| |
| case TargetOpcode::G_ZEXT: |
| case TargetOpcode::G_SEXT: { |
| unsigned Opcode = I.getOpcode(); |
| const LLT DstTy = MRI.getType(I.getOperand(0).getReg()), |
| SrcTy = MRI.getType(I.getOperand(1).getReg()); |
| const bool isSigned = Opcode == TargetOpcode::G_SEXT; |
| const unsigned DefReg = I.getOperand(0).getReg(); |
| const unsigned SrcReg = I.getOperand(1).getReg(); |
| const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); |
| |
| if (RB.getID() != AArch64::GPRRegBankID) { |
| LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB |
| << ", expected: GPR\n"); |
| return false; |
| } |
| |
| MachineInstr *ExtI; |
| if (DstTy == LLT::scalar(64)) { |
| // FIXME: Can we avoid manually doing this? |
| if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) { |
| LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode) |
| << " operand\n"); |
| return false; |
| } |
| |
| const unsigned SrcXReg = |
| MRI.createVirtualRegister(&AArch64::GPR64RegClass); |
| BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG)) |
| .addDef(SrcXReg) |
| .addImm(0) |
| .addUse(SrcReg) |
| .addImm(AArch64::sub_32); |
| |
| const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri; |
| ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc)) |
| .addDef(DefReg) |
| .addUse(SrcXReg) |
| .addImm(0) |
| .addImm(SrcTy.getSizeInBits() - 1); |
| } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) { |
| const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri; |
| ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc)) |
| .addDef(DefReg) |
| .addUse(SrcReg) |
| .addImm(0) |
| .addImm(SrcTy.getSizeInBits() - 1); |
| } else { |
| return false; |
| } |
| |
| constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| case TargetOpcode::G_SITOFP: |
| case TargetOpcode::G_UITOFP: |
| case TargetOpcode::G_FPTOSI: |
| case TargetOpcode::G_FPTOUI: { |
| const LLT DstTy = MRI.getType(I.getOperand(0).getReg()), |
| SrcTy = MRI.getType(I.getOperand(1).getReg()); |
| const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy); |
| if (NewOpc == Opcode) |
| return false; |
| |
| I.setDesc(TII.get(NewOpc)); |
| constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| |
| return true; |
| } |
| |
| |
| case TargetOpcode::G_INTTOPTR: |
| // The importer is currently unable to import pointer types since they |
| // didn't exist in SelectionDAG. |
| return selectCopy(I, TII, MRI, TRI, RBI); |
| |
| case TargetOpcode::G_BITCAST: |
| // Imported SelectionDAG rules can handle every bitcast except those that |
| // bitcast from a type to the same type. Ideally, these shouldn't occur |
| // but we might not run an optimizer that deletes them. |
| if (MRI.getType(I.getOperand(0).getReg()) == |
| MRI.getType(I.getOperand(1).getReg())) |
| return selectCopy(I, TII, MRI, TRI, RBI); |
| return false; |
| |
| case TargetOpcode::G_SELECT: { |
| if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) { |
| LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty |
| << ", expected: " << LLT::scalar(1) << '\n'); |
| return false; |
| } |
| |
| const unsigned CondReg = I.getOperand(1).getReg(); |
| const unsigned TReg = I.getOperand(2).getReg(); |
| const unsigned FReg = I.getOperand(3).getReg(); |
| |
| unsigned CSelOpc = 0; |
| |
| if (Ty == LLT::scalar(32)) { |
| CSelOpc = AArch64::CSELWr; |
| } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) { |
| CSelOpc = AArch64::CSELXr; |
| } else { |
| return false; |
| } |
| |
| MachineInstr &TstMI = |
| *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri)) |
| .addDef(AArch64::WZR) |
| .addUse(CondReg) |
| .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); |
| |
| MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc)) |
| .addDef(I.getOperand(0).getReg()) |
| .addUse(TReg) |
| .addUse(FReg) |
| .addImm(AArch64CC::NE); |
| |
| constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI); |
| constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| case TargetOpcode::G_ICMP: { |
| if (Ty != LLT::scalar(32)) { |
| LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty |
| << ", expected: " << LLT::scalar(32) << '\n'); |
| return false; |
| } |
| |
| unsigned CmpOpc = 0; |
| unsigned ZReg = 0; |
| |
| LLT CmpTy = MRI.getType(I.getOperand(2).getReg()); |
| if (CmpTy == LLT::scalar(32)) { |
| CmpOpc = AArch64::SUBSWrr; |
| ZReg = AArch64::WZR; |
| } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) { |
| CmpOpc = AArch64::SUBSXrr; |
| ZReg = AArch64::XZR; |
| } else { |
| return false; |
| } |
| |
| // CSINC increments the result by one when the condition code is false. |
| // Therefore, we have to invert the predicate to get an increment by 1 when |
| // the predicate is true. |
| const AArch64CC::CondCode invCC = |
| changeICMPPredToAArch64CC(CmpInst::getInversePredicate( |
| (CmpInst::Predicate)I.getOperand(1).getPredicate())); |
| |
| MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc)) |
| .addDef(ZReg) |
| .addUse(I.getOperand(2).getReg()) |
| .addUse(I.getOperand(3).getReg()); |
| |
| MachineInstr &CSetMI = |
| *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr)) |
| .addDef(I.getOperand(0).getReg()) |
| .addUse(AArch64::WZR) |
| .addUse(AArch64::WZR) |
| .addImm(invCC); |
| |
| constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI); |
| constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| case TargetOpcode::G_FCMP: { |
| if (Ty != LLT::scalar(32)) { |
| LLVM_DEBUG(dbgs() << "G_FCMP result has type: " << Ty |
| << ", expected: " << LLT::scalar(32) << '\n'); |
| return false; |
| } |
| |
| unsigned CmpOpc = 0; |
| LLT CmpTy = MRI.getType(I.getOperand(2).getReg()); |
| if (CmpTy == LLT::scalar(32)) { |
| CmpOpc = AArch64::FCMPSrr; |
| } else if (CmpTy == LLT::scalar(64)) { |
| CmpOpc = AArch64::FCMPDrr; |
| } else { |
| return false; |
| } |
| |
| // FIXME: regbank |
| |
| AArch64CC::CondCode CC1, CC2; |
| changeFCMPPredToAArch64CC( |
| (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2); |
| |
| MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc)) |
| .addUse(I.getOperand(2).getReg()) |
| .addUse(I.getOperand(3).getReg()); |
| |
| const unsigned DefReg = I.getOperand(0).getReg(); |
| unsigned Def1Reg = DefReg; |
| if (CC2 != AArch64CC::AL) |
| Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); |
| |
| MachineInstr &CSetMI = |
| *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr)) |
| .addDef(Def1Reg) |
| .addUse(AArch64::WZR) |
| .addUse(AArch64::WZR) |
| .addImm(getInvertedCondCode(CC1)); |
| |
| if (CC2 != AArch64CC::AL) { |
| unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); |
| MachineInstr &CSet2MI = |
| *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr)) |
| .addDef(Def2Reg) |
| .addUse(AArch64::WZR) |
| .addUse(AArch64::WZR) |
| .addImm(getInvertedCondCode(CC2)); |
| MachineInstr &OrMI = |
| *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr)) |
| .addDef(DefReg) |
| .addUse(Def1Reg) |
| .addUse(Def2Reg); |
| constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI); |
| constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI); |
| } |
| |
| constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI); |
| constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| case TargetOpcode::G_VASTART: |
| return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI) |
| : selectVaStartAAPCS(I, MF, MRI); |
| case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: |
| if (!I.getOperand(0).isIntrinsicID()) |
| return false; |
| if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap) |
| return false; |
| BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::BRK)) |
| .addImm(1); |
| I.eraseFromParent(); |
| return true; |
| case TargetOpcode::G_IMPLICIT_DEF: { |
| I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); |
| const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); |
| const unsigned DstReg = I.getOperand(0).getReg(); |
| const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); |
| const TargetRegisterClass *DstRC = |
| getRegClassForTypeOnBank(DstTy, DstRB, RBI); |
| RBI.constrainGenericRegister(DstReg, *DstRC, MRI); |
| return true; |
| } |
| case TargetOpcode::G_BLOCK_ADDR: { |
| if (TM.getCodeModel() == CodeModel::Large) { |
| materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0); |
| I.eraseFromParent(); |
| return true; |
| } else { |
| I.setDesc(TII.get(AArch64::MOVaddrBA)); |
| auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA), |
| I.getOperand(0).getReg()) |
| .addBlockAddress(I.getOperand(1).getBlockAddress(), |
| /* Offset */ 0, AArch64II::MO_PAGE) |
| .addBlockAddress( |
| I.getOperand(1).getBlockAddress(), /* Offset */ 0, |
| AArch64II::MO_NC | AArch64II::MO_PAGEOFF); |
| I.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI); |
| } |
| } |
| } |
| |
| return false; |
| } |
| |
| /// SelectArithImmed - Select an immediate value that can be represented as |
| /// a 12-bit value shifted left by either 0 or 12. If so, return true with |
| /// Val set to the 12-bit value and Shift set to the shifter operand. |
| InstructionSelector::ComplexRendererFns |
| AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const { |
| MachineInstr &MI = *Root.getParent(); |
| MachineBasicBlock &MBB = *MI.getParent(); |
| MachineFunction &MF = *MBB.getParent(); |
| MachineRegisterInfo &MRI = MF.getRegInfo(); |
| |
| // This function is called from the addsub_shifted_imm ComplexPattern, |
| // which lists [imm] as the list of opcode it's interested in, however |
| // we still need to check whether the operand is actually an immediate |
| // here because the ComplexPattern opcode list is only used in |
| // root-level opcode matching. |
| uint64_t Immed; |
| if (Root.isImm()) |
| Immed = Root.getImm(); |
| else if (Root.isCImm()) |
| Immed = Root.getCImm()->getZExtValue(); |
| else if (Root.isReg()) { |
| MachineInstr *Def = MRI.getVRegDef(Root.getReg()); |
| if (Def->getOpcode() != TargetOpcode::G_CONSTANT) |
| return None; |
| MachineOperand &Op1 = Def->getOperand(1); |
| if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64) |
| return None; |
| Immed = Op1.getCImm()->getZExtValue(); |
| } else |
| return None; |
| |
| unsigned ShiftAmt; |
| |
| if (Immed >> 12 == 0) { |
| ShiftAmt = 0; |
| } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) { |
| ShiftAmt = 12; |
| Immed = Immed >> 12; |
| } else |
| return None; |
| |
| unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); |
| return {{ |
| [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); }, |
| [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); }, |
| }}; |
| } |
| |
| /// Select a "register plus unscaled signed 9-bit immediate" address. This |
| /// should only match when there is an offset that is not valid for a scaled |
| /// immediate addressing mode. The "Size" argument is the size in bytes of the |
| /// memory reference, which is needed here to know what is valid for a scaled |
| /// immediate. |
| InstructionSelector::ComplexRendererFns |
| AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root, |
| unsigned Size) const { |
| MachineRegisterInfo &MRI = |
| Root.getParent()->getParent()->getParent()->getRegInfo(); |
| |
| if (!Root.isReg()) |
| return None; |
| |
| if (!isBaseWithConstantOffset(Root, MRI)) |
| return None; |
| |
| MachineInstr *RootDef = MRI.getVRegDef(Root.getReg()); |
| if (!RootDef) |
| return None; |
| |
| MachineOperand &OffImm = RootDef->getOperand(2); |
| if (!OffImm.isReg()) |
| return None; |
| MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg()); |
| if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT) |
| return None; |
| int64_t RHSC; |
| MachineOperand &RHSOp1 = RHS->getOperand(1); |
| if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64) |
| return None; |
| RHSC = RHSOp1.getCImm()->getSExtValue(); |
| |
| // If the offset is valid as a scaled immediate, don't match here. |
| if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size))) |
| return None; |
| if (RHSC >= -256 && RHSC < 256) { |
| MachineOperand &Base = RootDef->getOperand(1); |
| return {{ |
| [=](MachineInstrBuilder &MIB) { MIB.add(Base); }, |
| [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }, |
| }}; |
| } |
| return None; |
| } |
| |
| /// Select a "register plus scaled unsigned 12-bit immediate" address. The |
| /// "Size" argument is the size in bytes of the memory reference, which |
| /// determines the scale. |
| InstructionSelector::ComplexRendererFns |
| AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root, |
| unsigned Size) const { |
| MachineRegisterInfo &MRI = |
| Root.getParent()->getParent()->getParent()->getRegInfo(); |
| |
| if (!Root.isReg()) |
| return None; |
| |
| MachineInstr *RootDef = MRI.getVRegDef(Root.getReg()); |
| if (!RootDef) |
| return None; |
| |
| if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { |
| return {{ |
| [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }, |
| [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, |
| }}; |
| } |
| |
| if (isBaseWithConstantOffset(Root, MRI)) { |
| MachineOperand &LHS = RootDef->getOperand(1); |
| MachineOperand &RHS = RootDef->getOperand(2); |
| MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg()); |
| MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg()); |
| if (LHSDef && RHSDef) { |
| int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue(); |
| unsigned Scale = Log2_32(Size); |
| if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) { |
| if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) |
| return {{ |
| [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); }, |
| [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); }, |
| }}; |
| |
| return {{ |
| [=](MachineInstrBuilder &MIB) { MIB.add(LHS); }, |
| [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); }, |
| }}; |
| } |
| } |
| } |
| |
| // Before falling back to our general case, check if the unscaled |
| // instructions can handle this. If so, that's preferable. |
| if (selectAddrModeUnscaled(Root, Size).hasValue()) |
| return None; |
| |
| return {{ |
| [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, |
| [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, |
| }}; |
| } |
| |
| void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB, |
| const MachineInstr &MI) const { |
| const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"); |
| Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI); |
| assert(CstVal && "Expected constant value"); |
| MIB.addImm(CstVal.getValue()); |
| } |
| |
| namespace llvm { |
| InstructionSelector * |
| createAArch64InstructionSelector(const AArch64TargetMachine &TM, |
| AArch64Subtarget &Subtarget, |
| AArch64RegisterBankInfo &RBI) { |
| return new AArch64InstructionSelector(TM, Subtarget, RBI); |
| } |
| } |