| //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// |
| // |
| // The Subzero Code Generator |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| /// |
| /// \file |
| /// \brief Implements the TargetLoweringX8632 class, which consists almost |
| /// entirely of the lowering sequence for each high-level instruction. |
| /// |
| //===----------------------------------------------------------------------===// |
| |
| #include "IceTargetLoweringX8632.h" |
| |
| #include "IceCfg.h" |
| #include "IceCfgNode.h" |
| #include "IceClFlags.h" |
| #include "IceDefs.h" |
| #include "IceELFObjectWriter.h" |
| #include "IceGlobalInits.h" |
| #include "IceInstVarIter.h" |
| #include "IceInstX8632.h" |
| #include "IceLiveness.h" |
| #include "IceOperand.h" |
| #include "IcePhiLoweringImpl.h" |
| #include "IceTargetLoweringX8632.def" |
| #include "IceUtils.h" |
| #include "IceVariableSplitting.h" |
| |
| #include "llvm/Support/MathExtras.h" |
| |
| #include <stack> |
| |
| #if defined(_WIN32) |
| extern "C" void _chkstk(); |
| #endif |
| |
| namespace X8632 { |
| |
| std::unique_ptr<::Ice::TargetLowering> createTargetLowering(::Ice::Cfg *Func) { |
| return ::Ice::X8632::TargetX8632::create(Func); |
| } |
| |
| std::unique_ptr<::Ice::TargetDataLowering> |
| createTargetDataLowering(::Ice::GlobalContext *Ctx) { |
| return ::Ice::X8632::TargetDataX8632::create(Ctx); |
| } |
| |
| std::unique_ptr<::Ice::TargetHeaderLowering> |
| createTargetHeaderLowering(::Ice::GlobalContext *Ctx) { |
| return ::Ice::X8632::TargetHeaderX86::create(Ctx); |
| } |
| |
| void staticInit(::Ice::GlobalContext *Ctx) { |
| ::Ice::X8632::TargetX8632::staticInit(Ctx); |
| } |
| |
| bool shouldBePooled(const class ::Ice::Constant *C) { |
| return ::Ice::X8632::TargetX8632::shouldBePooled(C); |
| } |
| |
| ::Ice::Type getPointerType() { return ::Ice::Type::IceType_i32; } |
| |
| } // end of namespace X8632 |
| |
| namespace Ice { |
| namespace X8632 { |
| |
| /// The number of bits in a byte |
| static constexpr uint32_t X86_CHAR_BIT = 8; |
| /// Size of the return address on the stack |
| static constexpr uint32_t X86_RET_IP_SIZE_BYTES = 4; |
| |
| /// \name Limits for unrolling memory intrinsics. |
| /// @{ |
| static constexpr uint32_t MEMCPY_UNROLL_LIMIT = 8; |
| static constexpr uint32_t MEMMOVE_UNROLL_LIMIT = 8; |
| static constexpr uint32_t MEMSET_UNROLL_LIMIT = 8; |
| /// @} |
| |
| BoolFoldingEntry::BoolFoldingEntry(Inst *I) |
| : Instr(I), IsComplex(BoolFolding::hasComplexLowering(I)) {} |
| |
| BoolFolding::BoolFoldingProducerKind |
| BoolFolding::getProducerKind(const Inst *Instr) { |
| if (llvm::isa<InstIcmp>(Instr)) { |
| if (Instr->getSrc(0)->getType() != IceType_i64) |
| return PK_Icmp32; |
| return PK_Icmp64; |
| } |
| if (llvm::isa<InstFcmp>(Instr)) |
| return PK_Fcmp; |
| if (auto *Arith = llvm::dyn_cast<InstArithmetic>(Instr)) { |
| if (Arith->getSrc(0)->getType() != IceType_i64) { |
| switch (Arith->getOp()) { |
| default: |
| return PK_None; |
| case InstArithmetic::And: |
| case InstArithmetic::Or: |
| return PK_Arith; |
| } |
| } |
| } |
| return PK_None; // TODO(stichnot): remove this |
| |
| if (auto *Cast = llvm::dyn_cast<InstCast>(Instr)) { |
| switch (Cast->getCastKind()) { |
| default: |
| return PK_None; |
| case InstCast::Trunc: |
| return PK_Trunc; |
| } |
| } |
| return PK_None; |
| } |
| |
| BoolFolding::BoolFoldingConsumerKind |
| BoolFolding::getConsumerKind(const Inst *Instr) { |
| if (llvm::isa<InstBr>(Instr)) |
| return CK_Br; |
| if (llvm::isa<InstSelect>(Instr)) |
| return CK_Select; |
| return CK_None; // TODO(stichnot): remove this |
| |
| if (auto *Cast = llvm::dyn_cast<InstCast>(Instr)) { |
| switch (Cast->getCastKind()) { |
| default: |
| return CK_None; |
| case InstCast::Sext: |
| return CK_Sext; |
| case InstCast::Zext: |
| return CK_Zext; |
| } |
| } |
| return CK_None; |
| } |
| |
| /// Returns true if the producing instruction has a "complex" lowering sequence. |
| /// This generally means that its lowering sequence requires more than one |
| /// conditional branch, namely 64-bit integer compares and some floating-point |
| /// compares. When this is true, and there is more than one consumer, we prefer |
| /// to disable the folding optimization because it minimizes branches. |
| |
| bool BoolFolding::hasComplexLowering(const Inst *Instr) { |
| switch (getProducerKind(Instr)) { |
| default: |
| return false; |
| case PK_Icmp64: |
| return true; |
| case PK_Fcmp: |
| return TargetX8632::TableFcmp[llvm::cast<InstFcmp>(Instr)->getCondition()] |
| .C2 != CondX86::Br_None; |
| } |
| } |
| |
| bool BoolFolding::isValidFolding( |
| BoolFolding::BoolFoldingProducerKind ProducerKind, |
| BoolFolding::BoolFoldingConsumerKind ConsumerKind) { |
| switch (ProducerKind) { |
| default: |
| return false; |
| case PK_Icmp32: |
| case PK_Icmp64: |
| case PK_Fcmp: |
| return (ConsumerKind == CK_Br) || (ConsumerKind == CK_Select); |
| case PK_Arith: |
| return ConsumerKind == CK_Br; |
| } |
| } |
| |
| void BoolFolding::init(CfgNode *Node) { |
| Producers.clear(); |
| for (Inst &Instr : Node->getInsts()) { |
| if (Instr.isDeleted()) |
| continue; |
| invalidateProducersOnStore(&Instr); |
| // Check whether Instr is a valid producer. |
| Variable *Var = Instr.getDest(); |
| if (Var) { // only consider instructions with an actual dest var |
| if (isBooleanType(Var->getType())) { // only bool-type dest vars |
| if (getProducerKind(&Instr) != PK_None) { // white-listed instructions |
| Producers[Var->getIndex()] = BoolFoldingEntry(&Instr); |
| } |
| } |
| } |
| // Check each src variable against the map. |
| FOREACH_VAR_IN_INST(Var, Instr) { |
| SizeT VarNum = Var->getIndex(); |
| if (!containsValid(VarNum)) |
| continue; |
| // All valid consumers use Var as the first source operand |
| if (IndexOfVarOperandInInst(Var) != 0) { |
| setInvalid(VarNum); |
| continue; |
| } |
| // Consumer instructions must be white-listed |
| BoolFolding::BoolFoldingConsumerKind ConsumerKind = |
| getConsumerKind(&Instr); |
| if (ConsumerKind == CK_None) { |
| setInvalid(VarNum); |
| continue; |
| } |
| BoolFolding::BoolFoldingProducerKind ProducerKind = |
| getProducerKind(Producers[VarNum].Instr); |
| if (!isValidFolding(ProducerKind, ConsumerKind)) { |
| setInvalid(VarNum); |
| continue; |
| } |
| // Avoid creating multiple copies of complex producer instructions. |
| if (Producers[VarNum].IsComplex && Producers[VarNum].NumUses > 0) { |
| setInvalid(VarNum); |
| continue; |
| } |
| ++Producers[VarNum].NumUses; |
| if (Instr.isLastUse(Var)) { |
| Producers[VarNum].IsLiveOut = false; |
| } |
| } |
| } |
| for (auto &I : Producers) { |
| // Ignore entries previously marked invalid. |
| if (I.second.Instr == nullptr) |
| continue; |
| // Disable the producer if its dest may be live beyond this block. |
| if (I.second.IsLiveOut) { |
| setInvalid(I.first); |
| continue; |
| } |
| // Mark as "dead" rather than outright deleting. This is so that other |
| // peephole style optimizations during or before lowering have access to |
| // this instruction in undeleted form. See for example |
| // tryOptimizedCmpxchgCmpBr(). |
| I.second.Instr->setDead(); |
| } |
| } |
| |
| const Inst *BoolFolding::getProducerFor(const Operand *Opnd) const { |
| auto *Var = llvm::dyn_cast<const Variable>(Opnd); |
| if (Var == nullptr) |
| return nullptr; |
| SizeT VarNum = Var->getIndex(); |
| auto Element = Producers.find(VarNum); |
| if (Element == Producers.end()) |
| return nullptr; |
| return Element->second.Instr; |
| } |
| |
| void BoolFolding::dump(const Cfg *Func) const { |
| if (!BuildDefs::dump() || !Func->isVerbose(IceV_Folding)) |
| return; |
| OstreamLocker L(Func->getContext()); |
| Ostream &Str = Func->getContext()->getStrDump(); |
| for (auto &I : Producers) { |
| if (I.second.Instr == nullptr) |
| continue; |
| Str << "Found foldable producer:\n "; |
| I.second.Instr->dump(Func); |
| Str << "\n"; |
| } |
| } |
| |
| /// If the given instruction has potential memory side effects (e.g. store, rmw, |
| /// or a call instruction with potential memory side effects), then we must not |
| /// allow a pre-store Producer instruction with memory operands to be folded |
| /// into a post-store Consumer instruction. If this is detected, the Producer |
| /// is invalidated. |
| /// |
| /// We use the Producer's IsLiveOut field to determine whether any potential |
| /// Consumers come after this store instruction. The IsLiveOut field is |
| /// initialized to true, and BoolFolding::init() sets IsLiveOut to false when it |
| /// sees the variable's definitive last use (indicating the variable is not in |
| /// the node's live-out set). Thus if we see here that IsLiveOut is false, we |
| /// know that there can be no consumers after the store, and therefore we know |
| /// the folding is safe despite the store instruction. |
| |
| void BoolFolding::invalidateProducersOnStore(const Inst *Instr) { |
| if (!Instr->isMemoryWrite()) |
| return; |
| for (auto &ProducerPair : Producers) { |
| if (!ProducerPair.second.IsLiveOut) |
| continue; |
| Inst *PInst = ProducerPair.second.Instr; |
| if (PInst == nullptr) |
| continue; |
| bool HasMemOperand = false; |
| const SizeT SrcSize = PInst->getSrcSize(); |
| for (SizeT I = 0; I < SrcSize; ++I) { |
| if (llvm::isa<X86OperandMem>(PInst->getSrc(I))) { |
| HasMemOperand = true; |
| break; |
| } |
| } |
| if (!HasMemOperand) |
| continue; |
| setInvalid(ProducerPair.first); |
| } |
| } |
| |
| void TargetX8632::initNodeForLowering(CfgNode *Node) { |
| FoldingInfo.init(Node); |
| FoldingInfo.dump(Func); |
| } |
| |
| TargetX8632::TargetX8632(Cfg *Func) : TargetX86(Func) {} |
| |
| void TargetX8632::staticInit(GlobalContext *Ctx) { |
| RegNumT::setLimit(RegX8632::Reg_NUM); |
| RegX8632::initRegisterSet(getFlags(), &TypeToRegisterSet, &RegisterAliases); |
| for (size_t i = 0; i < TypeToRegisterSet.size(); ++i) |
| TypeToRegisterSetUnfiltered[i] = TypeToRegisterSet[i]; |
| filterTypeToRegisterSet(Ctx, RegX8632::Reg_NUM, TypeToRegisterSet.data(), |
| TypeToRegisterSet.size(), RegX8632::getRegName, |
| getRegClassName); |
| } |
| |
| bool TargetX8632::shouldBePooled(const Constant *C) { |
| if (auto *ConstFloat = llvm::dyn_cast<ConstantFloat>(C)) { |
| return !Utils::isPositiveZero(ConstFloat->getValue()); |
| } |
| if (auto *ConstDouble = llvm::dyn_cast<ConstantDouble>(C)) { |
| return !Utils::isPositiveZero(ConstDouble->getValue()); |
| } |
| return false; |
| } |
| |
| Type TargetX8632::getPointerType() { return IceType_i32; } |
| |
| void TargetX8632::translateO2() { |
| TimerMarker T(TimerStack::TT_O2, Func); |
| |
| genTargetHelperCalls(); |
| Func->dump("After target helper call insertion"); |
| |
| // Merge Alloca instructions, and lay out the stack. |
| static constexpr bool SortAndCombineAllocas = true; |
| Func->processAllocas(SortAndCombineAllocas); |
| Func->dump("After Alloca processing"); |
| |
| // Run this early so it can be used to focus optimizations on potentially hot |
| // code. |
| // TODO(stichnot,ascull): currently only used for regalloc not |
| // expensive high level optimizations which could be focused on potentially |
| // hot code. |
| Func->generateLoopInfo(); |
| Func->dump("After loop analysis"); |
| if (getFlags().getLoopInvariantCodeMotion()) { |
| Func->loopInvariantCodeMotion(); |
| Func->dump("After LICM"); |
| } |
| |
| if (getFlags().getLocalCSE() != Ice::LCSE_Disabled) { |
| Func->localCSE(getFlags().getLocalCSE() == Ice::LCSE_EnabledSSA); |
| Func->dump("After Local CSE"); |
| Func->floatConstantCSE(); |
| } |
| if (getFlags().getEnableShortCircuit()) { |
| Func->shortCircuitJumps(); |
| Func->dump("After Short Circuiting"); |
| } |
| |
| if (!getFlags().getEnablePhiEdgeSplit()) { |
| // Lower Phi instructions. |
| Func->placePhiLoads(); |
| if (Func->hasError()) |
| return; |
| Func->placePhiStores(); |
| if (Func->hasError()) |
| return; |
| Func->deletePhis(); |
| if (Func->hasError()) |
| return; |
| Func->dump("After Phi lowering"); |
| } |
| |
| // Address mode optimization. |
| Func->getVMetadata()->init(VMK_SingleDefs); |
| Func->doAddressOpt(); |
| Func->materializeVectorShuffles(); |
| |
| // Find read-modify-write opportunities. Do this after address mode |
| // optimization so that doAddressOpt() doesn't need to be applied to RMW |
| // instructions as well. |
| findRMW(); |
| Func->dump("After RMW transform"); |
| |
| // Argument lowering |
| Func->doArgLowering(); |
| |
| // Target lowering. This requires liveness analysis for some parts of the |
| // lowering decisions, such as compare/branch fusing. If non-lightweight |
| // liveness analysis is used, the instructions need to be renumbered first |
| // TODO: This renumbering should only be necessary if we're actually |
| // calculating live intervals, which we only do for register allocation. |
| Func->renumberInstructions(); |
| if (Func->hasError()) |
| return; |
| |
| // TODO: It should be sufficient to use the fastest liveness calculation, |
| // i.e. livenessLightweight(). However, for some reason that slows down the |
| // rest of the translation. Investigate. |
| Func->liveness(Liveness_Basic); |
| if (Func->hasError()) |
| return; |
| Func->dump("After x86 address mode opt"); |
| |
| doLoadOpt(); |
| |
| Func->genCode(); |
| if (Func->hasError()) |
| return; |
| Func->dump("After x86 codegen"); |
| splitBlockLocalVariables(Func); |
| |
| // Register allocation. This requires instruction renumbering and full |
| // liveness analysis. Loops must be identified before liveness so variable |
| // use weights are correct. |
| Func->renumberInstructions(); |
| if (Func->hasError()) |
| return; |
| Func->liveness(Liveness_Intervals); |
| if (Func->hasError()) |
| return; |
| // The post-codegen dump is done here, after liveness analysis and associated |
| // cleanup, to make the dump cleaner and more useful. |
| Func->dump("After initial x86 codegen"); |
| // Validate the live range computations. The expensive validation call is |
| // deliberately only made when assertions are enabled. |
| assert(Func->validateLiveness()); |
| Func->getVMetadata()->init(VMK_All); |
| regAlloc(RAK_Global); |
| if (Func->hasError()) |
| return; |
| Func->dump("After linear scan regalloc"); |
| |
| if (getFlags().getEnablePhiEdgeSplit()) { |
| Func->advancedPhiLowering(); |
| Func->dump("After advanced Phi lowering"); |
| } |
| |
| // Stack frame mapping. |
| Func->genFrame(); |
| if (Func->hasError()) |
| return; |
| Func->dump("After stack frame mapping"); |
| |
| Func->contractEmptyNodes(); |
| Func->reorderNodes(); |
| |
| // Branch optimization. This needs to be done just before code emission. In |
| // particular, no transformations that insert or reorder CfgNodes should be |
| // done after branch optimization. We go ahead and do it before nop insertion |
| // to reduce the amount of work needed for searching for opportunities. |
| Func->doBranchOpt(); |
| Func->dump("After branch optimization"); |
| } |
| |
| void TargetX8632::translateOm1() { |
| TimerMarker T(TimerStack::TT_Om1, Func); |
| |
| genTargetHelperCalls(); |
| |
| // Do not merge Alloca instructions, and lay out the stack. |
| // static constexpr bool SortAndCombineAllocas = false; |
| static constexpr bool SortAndCombineAllocas = |
| true; // TODO(b/171222930): Fix Win32 bug when this is false |
| Func->processAllocas(SortAndCombineAllocas); |
| Func->dump("After Alloca processing"); |
| |
| Func->placePhiLoads(); |
| if (Func->hasError()) |
| return; |
| Func->placePhiStores(); |
| if (Func->hasError()) |
| return; |
| Func->deletePhis(); |
| if (Func->hasError()) |
| return; |
| Func->dump("After Phi lowering"); |
| |
| Func->doArgLowering(); |
| Func->genCode(); |
| if (Func->hasError()) |
| return; |
| Func->dump("After initial x86 codegen"); |
| |
| regAlloc(RAK_InfOnly); |
| if (Func->hasError()) |
| return; |
| Func->dump("After regalloc of infinite-weight variables"); |
| |
| Func->genFrame(); |
| if (Func->hasError()) |
| return; |
| Func->dump("After stack frame mapping"); |
| } |
| |
| inline bool canRMW(const InstArithmetic *Arith) { |
| Type Ty = Arith->getDest()->getType(); |
| // X86 vector instructions write to a register and have no RMW option. |
| if (isVectorType(Ty)) |
| return false; |
| bool isI64 = Ty == IceType_i64; |
| |
| switch (Arith->getOp()) { |
| // Not handled for lack of simple lowering: |
| // shift on i64 |
| // mul, udiv, urem, sdiv, srem, frem |
| // Not handled for lack of RMW instructions: |
| // fadd, fsub, fmul, fdiv (also vector types) |
| default: |
| return false; |
| case InstArithmetic::Add: |
| case InstArithmetic::Sub: |
| case InstArithmetic::And: |
| case InstArithmetic::Or: |
| case InstArithmetic::Xor: |
| return true; |
| case InstArithmetic::Shl: |
| case InstArithmetic::Lshr: |
| case InstArithmetic::Ashr: |
| return false; // TODO(stichnot): implement |
| return !isI64; |
| } |
| } |
| |
| bool isSameMemAddressOperand(const Operand *A, const Operand *B) { |
| if (A == B) |
| return true; |
| if (auto *MemA = llvm::dyn_cast<X86OperandMem>(A)) { |
| if (auto *MemB = llvm::dyn_cast<X86OperandMem>(B)) { |
| return MemA->getBase() == MemB->getBase() && |
| MemA->getOffset() == MemB->getOffset() && |
| MemA->getIndex() == MemB->getIndex() && |
| MemA->getShift() == MemB->getShift() && |
| MemA->getSegmentRegister() == MemB->getSegmentRegister(); |
| } |
| } |
| return false; |
| } |
| |
| void TargetX8632::findRMW() { |
| TimerMarker _(TimerStack::TT_findRMW, Func); |
| Func->dump("Before RMW"); |
| if (Func->isVerbose(IceV_RMW)) |
| Func->getContext()->lockStr(); |
| for (CfgNode *Node : Func->getNodes()) { |
| // Walk through the instructions, considering each sequence of 3 |
| // instructions, and look for the particular RMW pattern. Note that this |
| // search can be "broken" (false negatives) if there are intervening |
| // deleted instructions, or intervening instructions that could be safely |
| // moved out of the way to reveal an RMW pattern. |
| auto E = Node->getInsts().end(); |
| auto I1 = E, I2 = E, I3 = Node->getInsts().begin(); |
| for (; I3 != E; I1 = I2, I2 = I3, ++I3) { |
| // Make I3 skip over deleted instructions. |
| while (I3 != E && I3->isDeleted()) |
| ++I3; |
| if (I1 == E || I2 == E || I3 == E) |
| continue; |
| assert(!I1->isDeleted()); |
| assert(!I2->isDeleted()); |
| assert(!I3->isDeleted()); |
| auto *Load = llvm::dyn_cast<InstLoad>(I1); |
| auto *Arith = llvm::dyn_cast<InstArithmetic>(I2); |
| auto *Store = llvm::dyn_cast<InstStore>(I3); |
| if (!Load || !Arith || !Store) |
| continue; |
| // Look for: |
| // a = Load addr |
| // b = <op> a, other |
| // Store b, addr |
| // Change to: |
| // a = Load addr |
| // b = <op> a, other |
| // x = FakeDef |
| // RMW <op>, addr, other, x |
| // b = Store b, addr, x |
| // Note that inferTwoAddress() makes sure setDestRedefined() gets called |
| // on the updated Store instruction, to avoid liveness problems later. |
| // |
| // With this transformation, the Store instruction acquires a Dest |
| // variable and is now subject to dead code elimination if there are no |
| // more uses of "b". Variable "x" is a beacon for determining whether the |
| // Store instruction gets dead-code eliminated. If the Store instruction |
| // is eliminated, then it must be the case that the RMW instruction ends |
| // x's live range, and therefore the RMW instruction will be retained and |
| // later lowered. On the other hand, if the RMW instruction does not end |
| // x's live range, then the Store instruction must still be present, and |
| // therefore the RMW instruction is ignored during lowering because it is |
| // redundant with the Store instruction. |
| // |
| // Note that if "a" has further uses, the RMW transformation may still |
| // trigger, resulting in two loads and one store, which is worse than the |
| // original one load and one store. However, this is probably rare, and |
| // caching probably keeps it just as fast. |
| if (!isSameMemAddressOperand(Load->getLoadAddress(), |
| Store->getStoreAddress())) |
| continue; |
| Operand *ArithSrcFromLoad = Arith->getSrc(0); |
| Operand *ArithSrcOther = Arith->getSrc(1); |
| if (ArithSrcFromLoad != Load->getDest()) { |
| if (!Arith->isCommutative() || ArithSrcOther != Load->getDest()) |
| continue; |
| std::swap(ArithSrcFromLoad, ArithSrcOther); |
| } |
| if (Arith->getDest() != Store->getData()) |
| continue; |
| if (!canRMW(Arith)) |
| continue; |
| if (Func->isVerbose(IceV_RMW)) { |
| Ostream &Str = Func->getContext()->getStrDump(); |
| Str << "Found RMW in " << Func->getFunctionName() << ":\n "; |
| Load->dump(Func); |
| Str << "\n "; |
| Arith->dump(Func); |
| Str << "\n "; |
| Store->dump(Func); |
| Str << "\n"; |
| } |
| Variable *Beacon = Func->makeVariable(IceType_i32); |
| Beacon->setMustNotHaveReg(); |
| Store->setRmwBeacon(Beacon); |
| auto *BeaconDef = InstFakeDef::create(Func, Beacon); |
| Node->getInsts().insert(I3, BeaconDef); |
| auto *RMW = |
| InstX86FakeRMW::create(Func, ArithSrcOther, Store->getStoreAddress(), |
| Beacon, Arith->getOp()); |
| Node->getInsts().insert(I3, RMW); |
| } |
| } |
| if (Func->isVerbose(IceV_RMW)) |
| Func->getContext()->unlockStr(); |
| } |
| |
| /// Value is in bytes. Return Value adjusted to the next highest multiple of |
| /// the stack alignment. |
| uint32_t TargetX8632::applyStackAlignment(uint32_t Value) { |
| return Utils::applyAlignment(Value, X86_STACK_ALIGNMENT_BYTES); |
| } |
| |
| // Converts a ConstantInteger32 operand into its constant value, or |
| // MemoryOrderInvalid if the operand is not a ConstantInteger32. |
| inline uint64_t getConstantMemoryOrder(Operand *Opnd) { |
| if (auto *Integer = llvm::dyn_cast<ConstantInteger32>(Opnd)) |
| return Integer->getValue(); |
| return Intrinsics::MemoryOrderInvalid; |
| } |
| |
| /// Determines whether the dest of a Load instruction can be folded into one of |
| /// the src operands of a 2-operand instruction. This is true as long as the |
| /// load dest matches exactly one of the binary instruction's src operands. |
| /// Replaces Src0 or Src1 with LoadSrc if the answer is true. |
| inline bool canFoldLoadIntoBinaryInst(Operand *LoadSrc, Variable *LoadDest, |
| Operand *&Src0, Operand *&Src1) { |
| if (Src0 == LoadDest && Src1 != LoadDest) { |
| Src0 = LoadSrc; |
| return true; |
| } |
| if (Src0 != LoadDest && Src1 == LoadDest) { |
| Src1 = LoadSrc; |
| return true; |
| } |
| return false; |
| } |
| |
| void TargetX8632::doLoadOpt() { |
| TimerMarker _(TimerStack::TT_loadOpt, Func); |
| for (CfgNode *Node : Func->getNodes()) { |
| Context.init(Node); |
| while (!Context.atEnd()) { |
| Variable *LoadDest = nullptr; |
| Operand *LoadSrc = nullptr; |
| Inst *CurInst = iteratorToInst(Context.getCur()); |
| Inst *Next = Context.getNextInst(); |
| // Determine whether the current instruction is a Load instruction or |
| // equivalent. |
| if (auto *Load = llvm::dyn_cast<InstLoad>(CurInst)) { |
| // An InstLoad qualifies unless it uses a 64-bit absolute address, |
| // which requires legalization to insert a copy to register. |
| // TODO(b/148272103): Fold these after legalization. |
| LoadDest = Load->getDest(); |
| constexpr bool DoLegalize = false; |
| LoadSrc = formMemoryOperand(Load->getLoadAddress(), LoadDest->getType(), |
| DoLegalize); |
| } else if (auto *Intrin = llvm::dyn_cast<InstIntrinsic>(CurInst)) { |
| // An AtomicLoad intrinsic qualifies as long as it has a valid memory |
| // ordering, and can be implemented in a single instruction (i.e., not |
| // i64 on x86-32). |
| Intrinsics::IntrinsicID ID = Intrin->getIntrinsicID(); |
| if (ID == Intrinsics::AtomicLoad && |
| (Intrin->getDest()->getType() != IceType_i64) && |
| Intrinsics::isMemoryOrderValid( |
| ID, getConstantMemoryOrder(Intrin->getArg(1)))) { |
| LoadDest = Intrin->getDest(); |
| constexpr bool DoLegalize = false; |
| LoadSrc = formMemoryOperand(Intrin->getArg(0), LoadDest->getType(), |
| DoLegalize); |
| } |
| } |
| // A Load instruction can be folded into the following instruction only |
| // if the following instruction ends the Load's Dest variable's live |
| // range. |
| if (LoadDest && Next && Next->isLastUse(LoadDest)) { |
| assert(LoadSrc); |
| Inst *NewInst = nullptr; |
| if (auto *Arith = llvm::dyn_cast<InstArithmetic>(Next)) { |
| Operand *Src0 = Arith->getSrc(0); |
| Operand *Src1 = Arith->getSrc(1); |
| if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { |
| NewInst = InstArithmetic::create(Func, Arith->getOp(), |
| Arith->getDest(), Src0, Src1); |
| } |
| } else if (auto *Icmp = llvm::dyn_cast<InstIcmp>(Next)) { |
| Operand *Src0 = Icmp->getSrc(0); |
| Operand *Src1 = Icmp->getSrc(1); |
| if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { |
| NewInst = InstIcmp::create(Func, Icmp->getCondition(), |
| Icmp->getDest(), Src0, Src1); |
| } |
| } else if (auto *Fcmp = llvm::dyn_cast<InstFcmp>(Next)) { |
| Operand *Src0 = Fcmp->getSrc(0); |
| Operand *Src1 = Fcmp->getSrc(1); |
| if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { |
| NewInst = InstFcmp::create(Func, Fcmp->getCondition(), |
| Fcmp->getDest(), Src0, Src1); |
| } |
| } else if (auto *Select = llvm::dyn_cast<InstSelect>(Next)) { |
| Operand *Src0 = Select->getTrueOperand(); |
| Operand *Src1 = Select->getFalseOperand(); |
| if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { |
| NewInst = InstSelect::create(Func, Select->getDest(), |
| Select->getCondition(), Src0, Src1); |
| } |
| } else if (auto *Cast = llvm::dyn_cast<InstCast>(Next)) { |
| // The load dest can always be folded into a Cast instruction. |
| auto *Src0 = llvm::dyn_cast<Variable>(Cast->getSrc(0)); |
| if (Src0 == LoadDest) { |
| NewInst = InstCast::create(Func, Cast->getCastKind(), |
| Cast->getDest(), LoadSrc); |
| } |
| } |
| if (NewInst) { |
| CurInst->setDeleted(); |
| Next->setDeleted(); |
| Context.insert(NewInst); |
| // Update NewInst->LiveRangesEnded so that target lowering may |
| // benefit. Also update NewInst->HasSideEffects. |
| NewInst->spliceLivenessInfo(Next, CurInst); |
| } |
| } |
| Context.advanceCur(); |
| Context.advanceNext(); |
| } |
| } |
| Func->dump("After load optimization"); |
| } |
| |
| bool TargetX8632::doBranchOpt(Inst *I, const CfgNode *NextNode) { |
| if (auto *Br = llvm::dyn_cast<InstX86Br>(I)) { |
| return Br->optimizeBranch(NextNode); |
| } |
| return false; |
| } |
| |
| Variable *TargetX8632::getPhysicalRegister(RegNumT RegNum, Type Ty) { |
| if (Ty == IceType_void) |
| Ty = IceType_i32; |
| if (PhysicalRegisters[Ty].empty()) |
| PhysicalRegisters[Ty].resize(RegX8632::Reg_NUM); |
| assert(unsigned(RegNum) < PhysicalRegisters[Ty].size()); |
| Variable *Reg = PhysicalRegisters[Ty][RegNum]; |
| if (Reg == nullptr) { |
| Reg = Func->makeVariable(Ty); |
| Reg->setRegNum(RegNum); |
| PhysicalRegisters[Ty][RegNum] = Reg; |
| // Specially mark a named physical register as an "argument" so that it is |
| // considered live upon function entry. Otherwise it's possible to get |
| // liveness validation errors for saving callee-save registers. |
| Func->addImplicitArg(Reg); |
| // Don't bother tracking the live range of a named physical register. |
| Reg->setIgnoreLiveness(); |
| } |
| assert(RegX8632::getGprForType(Ty, RegNum) == RegNum); |
| return Reg; |
| } |
| |
| const char *TargetX8632::getRegName(RegNumT RegNum, Type Ty) const { |
| return RegX8632::getRegName(RegX8632::getGprForType(Ty, RegNum)); |
| } |
| |
| void TargetX8632::emitVariable(const Variable *Var) const { |
| if (!BuildDefs::dump()) |
| return; |
| Ostream &Str = Ctx->getStrEmit(); |
| if (Var->hasReg()) { |
| Str << "%" << getRegName(Var->getRegNum(), Var->getType()); |
| return; |
| } |
| if (Var->mustHaveReg()) { |
| llvm::report_fatal_error("Infinite-weight Variable (" + Var->getName() + |
| ") has no register assigned - function " + |
| Func->getFunctionName()); |
| } |
| const int32_t Offset = Var->getStackOffset(); |
| auto BaseRegNum = Var->getBaseRegNum(); |
| if (BaseRegNum.hasNoValue()) |
| BaseRegNum = getFrameOrStackReg(); |
| |
| // Print in the form "Offset(%reg)", omitting Offset when it is 0. |
| if (getFlags().getDecorateAsm()) { |
| Str << Var->getSymbolicStackOffset(); |
| } else if (Offset != 0) { |
| Str << Offset; |
| } |
| const Type FrameSPTy = WordType; |
| Str << "(%" << getRegName(BaseRegNum, FrameSPTy) << ")"; |
| } |
| |
| void TargetX8632::addProlog(CfgNode *Node) { |
| // Stack frame layout: |
| // |
| // +------------------------+ ^ + |
| // | 1. return address | | |
| // +------------------------+ v - |
| // | 2. preserved registers | |
| // +------------------------+ <--- BasePointer (if used) |
| // | 3. padding | |
| // +------------------------+ |
| // | 4. global spill area | |
| // +------------------------+ |
| // | 5. padding | |
| // +------------------------+ |
| // | 6. local spill area | |
| // +------------------------+ |
| // | 7. padding | |
| // +------------------------+ |
| // | 7.5 shadow (WinX64) | |
| // +------------------------+ |
| // | 8. allocas | |
| // +------------------------+ |
| // | 9. padding | |
| // +------------------------+ |
| // | 10. out args | |
| // +------------------------+ <--- StackPointer |
| // |
| // The following variables record the size in bytes of the given areas: |
| // * X86_RET_IP_SIZE_BYTES: area 1 |
| // * PreservedRegsSizeBytes: area 2 |
| // * SpillAreaPaddingBytes: area 3 |
| // * GlobalsSize: area 4 |
| // * LocalsSlotsPaddingBytes: area 5 |
| // * GlobalsAndSubsequentPaddingSize: areas 4 - 5 |
| // * LocalsSpillAreaSize: area 6 |
| // * FixedAllocaSizeBytes: areas 7 - 8 |
| // * SpillAreaSizeBytes: areas 3 - 10 |
| // * maxOutArgsSizeBytes(): areas 9 - 10 |
| |
| // Determine stack frame offsets for each Variable without a register |
| // assignment. This can be done as one variable per stack slot. Or, do |
| // coalescing by running the register allocator again with an infinite set of |
| // registers (as a side effect, this gives variables a second chance at |
| // physical register assignment). |
| // |
| // A middle ground approach is to leverage sparsity and allocate one block of |
| // space on the frame for globals (variables with multi-block lifetime), and |
| // one block to share for locals (single-block lifetime). |
| |
| // StackPointer: points just past return address of calling function |
| |
| Context.init(Node); |
| Context.setInsertPoint(Context.getCur()); |
| |
| SmallBitVector CalleeSaves = getRegisterSet(RegSet_CalleeSave, RegSet_None); |
| RegsUsed = SmallBitVector(CalleeSaves.size()); |
| VarList SortedSpilledVariables, VariablesLinkedToSpillSlots; |
| size_t GlobalsSize = 0; |
| // If there is a separate locals area, this represents that area. Otherwise |
| // it counts any variable not counted by GlobalsSize. |
| SpillAreaSizeBytes = 0; |
| // If there is a separate locals area, this specifies the alignment for it. |
| uint32_t LocalsSlotsAlignmentBytes = 0; |
| // The entire spill locations area gets aligned to largest natural alignment |
| // of the variables that have a spill slot. |
| uint32_t SpillAreaAlignmentBytes = 0; |
| // A spill slot linked to a variable with a stack slot should reuse that |
| // stack slot. |
| std::function<bool(Variable *)> TargetVarHook = |
| [&VariablesLinkedToSpillSlots](Variable *Var) { |
| // TODO(stichnot): Refactor this into the base class. |
| Variable *Root = Var->getLinkedToStackRoot(); |
| if (Root != nullptr) { |
| assert(!Root->hasReg()); |
| if (!Root->hasReg()) { |
| VariablesLinkedToSpillSlots.push_back(Var); |
| return true; |
| } |
| } |
| return false; |
| }; |
| |
| // Compute the list of spilled variables and bounds for GlobalsSize, etc. |
| getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize, |
| &SpillAreaSizeBytes, &SpillAreaAlignmentBytes, |
| &LocalsSlotsAlignmentBytes, TargetVarHook); |
| uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes; |
| SpillAreaSizeBytes += GlobalsSize; |
| |
| // Add push instructions for preserved registers. |
| uint32_t NumCallee = 0; |
| size_t PreservedRegsSizeBytes = 0; |
| SmallBitVector Pushed(CalleeSaves.size()); |
| for (RegNumT i : RegNumBVIter(CalleeSaves)) { |
| const auto Canonical = RegX8632::getBaseReg(i); |
| assert(Canonical == RegX8632::getBaseReg(Canonical)); |
| if (RegsUsed[i]) { |
| Pushed[Canonical] = true; |
| } |
| } |
| for (RegNumT RegNum : RegNumBVIter(Pushed)) { |
| assert(RegNum == RegX8632::getBaseReg(RegNum)); |
| ++NumCallee; |
| if (RegX8632::isXmm(RegNum)) { |
| PreservedRegsSizeBytes += 16; |
| } else { |
| PreservedRegsSizeBytes += typeWidthInBytes(WordType); |
| } |
| _push_reg(RegNum); |
| } |
| Ctx->statsUpdateRegistersSaved(NumCallee); |
| |
| // StackPointer: points past preserved registers at start of spill area |
| |
| // Generate "push frameptr; mov frameptr, stackptr" |
| if (IsEbpBasedFrame) { |
| assert( |
| (RegsUsed & getRegisterSet(RegSet_FramePointer, RegSet_None)).count() == |
| 0); |
| PreservedRegsSizeBytes += typeWidthInBytes(WordType); |
| _link_bp(); |
| } |
| |
| // Align the variables area. SpillAreaPaddingBytes is the size of the region |
| // after the preserved registers and before the spill areas. |
| // LocalsSlotsPaddingBytes is the amount of padding between the globals and |
| // locals area if they are separate. |
| assert(LocalsSlotsAlignmentBytes <= SpillAreaAlignmentBytes); |
| uint32_t SpillAreaPaddingBytes = 0; |
| uint32_t LocalsSlotsPaddingBytes = 0; |
| alignStackSpillAreas(X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes, |
| SpillAreaAlignmentBytes, GlobalsSize, |
| LocalsSlotsAlignmentBytes, &SpillAreaPaddingBytes, |
| &LocalsSlotsPaddingBytes); |
| SpillAreaSizeBytes += SpillAreaPaddingBytes + LocalsSlotsPaddingBytes; |
| uint32_t GlobalsAndSubsequentPaddingSize = |
| GlobalsSize + LocalsSlotsPaddingBytes; |
| |
| // Functions returning scalar floating point types may need to convert values |
| // from an in-register xmm value to the top of the x87 floating point stack. |
| // This is done by a movp[sd] and an fld[sd]. Ensure there is enough scratch |
| // space on the stack for this. |
| const Type ReturnType = Func->getReturnType(); |
| if (isScalarFloatingType(ReturnType)) { |
| // Avoid misaligned double-precision load/store. |
| RequiredStackAlignment = |
| std::max<size_t>(RequiredStackAlignment, X86_STACK_ALIGNMENT_BYTES); |
| SpillAreaSizeBytes = |
| std::max(typeWidthInBytesOnStack(ReturnType), SpillAreaSizeBytes); |
| } |
| |
| RequiredStackAlignment = |
| std::max<size_t>(RequiredStackAlignment, SpillAreaAlignmentBytes); |
| |
| if (PrologEmitsFixedAllocas) { |
| RequiredStackAlignment = |
| std::max(RequiredStackAlignment, FixedAllocaAlignBytes); |
| } |
| |
| // Combine fixed allocations into SpillAreaSizeBytes if we are emitting the |
| // fixed allocations in the prolog. |
| if (PrologEmitsFixedAllocas) |
| SpillAreaSizeBytes += FixedAllocaSizeBytes; |
| |
| // Entering the function has made the stack pointer unaligned. Re-align it by |
| // adjusting the stack size. |
| // Note that StackOffset does not include spill area. It's the offset from the |
| // base stack pointer (epb), whether we set it or not, to the the first stack |
| // arg (if any). StackSize, on the other hand, does include the spill area. |
| const uint32_t StackOffset = X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes; |
| uint32_t StackSize = Utils::applyAlignment(StackOffset + SpillAreaSizeBytes, |
| RequiredStackAlignment); |
| StackSize = Utils::applyAlignment(StackSize + maxOutArgsSizeBytes(), |
| RequiredStackAlignment); |
| SpillAreaSizeBytes = StackSize - StackOffset; // Adjust for alignment, if any |
| |
| if (SpillAreaSizeBytes) { |
| auto *Func = Node->getCfg(); |
| if (SpillAreaSizeBytes > Func->getStackSizeLimit()) { |
| Func->setError("Stack size limit exceeded"); |
| } |
| |
| emitStackProbe(SpillAreaSizeBytes); |
| |
| // Generate "sub stackptr, SpillAreaSizeBytes" |
| _sub_sp(Ctx->getConstantInt32(SpillAreaSizeBytes)); |
| } |
| |
| // StackPointer: points just past the spill area (end of stack frame) |
| |
| // If the required alignment is greater than the stack pointer's guaranteed |
| // alignment, align the stack pointer accordingly. |
| if (RequiredStackAlignment > X86_STACK_ALIGNMENT_BYTES) { |
| assert(IsEbpBasedFrame); |
| _and(getPhysicalRegister(getStackReg(), WordType), |
| Ctx->getConstantInt32(-RequiredStackAlignment)); |
| } |
| |
| // StackPointer: may have just been offset for alignment |
| |
| // Account for known-frame-offset alloca instructions that were not already |
| // combined into the prolog. |
| if (!PrologEmitsFixedAllocas) |
| SpillAreaSizeBytes += FixedAllocaSizeBytes; |
| |
| Ctx->statsUpdateFrameBytes(SpillAreaSizeBytes); |
| |
| // Fill in stack offsets for stack args, and copy args into registers for |
| // those that were register-allocated. Args are pushed right to left, so |
| // Arg[0] is closest to the stack/frame pointer. |
| RegNumT FrameOrStackReg = IsEbpBasedFrame ? getFrameReg() : getStackReg(); |
| Variable *FramePtr = getPhysicalRegister(FrameOrStackReg, WordType); |
| size_t BasicFrameOffset = StackOffset; |
| if (!IsEbpBasedFrame) |
| BasicFrameOffset += SpillAreaSizeBytes; |
| |
| const VarList &Args = Func->getArgs(); |
| size_t InArgsSizeBytes = 0; |
| unsigned NumXmmArgs = 0; |
| unsigned NumGPRArgs = 0; |
| for (SizeT i = 0, NumArgs = Args.size(); i < NumArgs; ++i) { |
| Variable *Arg = Args[i]; |
| // Skip arguments passed in registers. |
| if (isVectorType(Arg->getType())) { |
| if (RegX8632::getRegisterForXmmArgNum( |
| RegX8632::getArgIndex(i, NumXmmArgs)) |
| .hasValue()) { |
| ++NumXmmArgs; |
| continue; |
| } |
| } else if (!isScalarFloatingType(Arg->getType())) { |
| assert(isScalarIntegerType(Arg->getType())); |
| if (RegX8632::getRegisterForGprArgNum( |
| WordType, RegX8632::getArgIndex(i, NumGPRArgs)) |
| .hasValue()) { |
| ++NumGPRArgs; |
| continue; |
| } |
| } |
| // For esp-based frames where the allocas are done outside the prolog, the |
| // esp value may not stabilize to its home value until after all the |
| // fixed-size alloca instructions have executed. In this case, a stack |
| // adjustment is needed when accessing in-args in order to copy them into |
| // registers. |
| size_t StackAdjBytes = 0; |
| if (!IsEbpBasedFrame && !PrologEmitsFixedAllocas) |
| StackAdjBytes -= FixedAllocaSizeBytes; |
| finishArgumentLowering(Arg, FramePtr, BasicFrameOffset, StackAdjBytes, |
| InArgsSizeBytes); |
| } |
| |
| // Fill in stack offsets for locals. |
| assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes, |
| SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize, |
| IsEbpBasedFrame && !needsStackPointerAlignment()); |
| // Assign stack offsets to variables that have been linked to spilled |
| // variables. |
| for (Variable *Var : VariablesLinkedToSpillSlots) { |
| const Variable *Root = Var->getLinkedToStackRoot(); |
| assert(Root != nullptr); |
| Var->setStackOffset(Root->getStackOffset()); |
| |
| // If the stack root variable is an arg, make this variable an arg too so |
| // that stackVarToAsmAddress uses the correct base pointer (e.g. ebp on |
| // x86). |
| Var->setIsArg(Root->getIsArg()); |
| } |
| this->HasComputedFrame = true; |
| |
| if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) { |
| OstreamLocker L(Func->getContext()); |
| Ostream &Str = Func->getContext()->getStrDump(); |
| |
| Str << "Stack layout:\n"; |
| uint32_t EspAdjustmentPaddingSize = |
| SpillAreaSizeBytes - LocalsSpillAreaSize - |
| GlobalsAndSubsequentPaddingSize - SpillAreaPaddingBytes - |
| maxOutArgsSizeBytes(); |
| Str << " in-args = " << InArgsSizeBytes << " bytes\n" |
| << " return address = " << X86_RET_IP_SIZE_BYTES << " bytes\n" |
| << " preserved registers = " << PreservedRegsSizeBytes << " bytes\n" |
| << " spill area padding = " << SpillAreaPaddingBytes << " bytes\n" |
| << " globals spill area = " << GlobalsSize << " bytes\n" |
| << " globals-locals spill areas intermediate padding = " |
| << GlobalsAndSubsequentPaddingSize - GlobalsSize << " bytes\n" |
| << " locals spill area = " << LocalsSpillAreaSize << " bytes\n" |
| << " esp alignment padding = " << EspAdjustmentPaddingSize |
| << " bytes\n"; |
| |
| Str << "Stack details:\n" |
| << " esp adjustment = " << SpillAreaSizeBytes << " bytes\n" |
| << " spill area alignment = " << SpillAreaAlignmentBytes << " bytes\n" |
| << " outgoing args size = " << maxOutArgsSizeBytes() << " bytes\n" |
| << " locals spill area alignment = " << LocalsSlotsAlignmentBytes |
| << " bytes\n" |
| << " is ebp based = " << IsEbpBasedFrame << "\n"; |
| } |
| } |
| |
| /// Helper function for addProlog(). |
| /// |
| /// This assumes Arg is an argument passed on the stack. This sets the frame |
| /// offset for Arg and updates InArgsSizeBytes according to Arg's width. For an |
| /// I64 arg that has been split into Lo and Hi components, it calls itself |
| /// recursively on the components, taking care to handle Lo first because of the |
| /// little-endian architecture. Lastly, this function generates an instruction |
| /// to copy Arg into its assigned register if applicable. |
| |
| void TargetX8632::finishArgumentLowering(Variable *Arg, Variable *FramePtr, |
| size_t BasicFrameOffset, |
| size_t StackAdjBytes, |
| size_t &InArgsSizeBytes) { |
| if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) { |
| Variable *Lo = Arg64On32->getLo(); |
| Variable *Hi = Arg64On32->getHi(); |
| finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, StackAdjBytes, |
| InArgsSizeBytes); |
| finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, StackAdjBytes, |
| InArgsSizeBytes); |
| return; |
| } |
| Type Ty = Arg->getType(); |
| if (isVectorType(Ty)) { |
| InArgsSizeBytes = applyStackAlignment(InArgsSizeBytes); |
| } |
| Arg->setStackOffset(BasicFrameOffset + InArgsSizeBytes); |
| InArgsSizeBytes += typeWidthInBytesOnStack(Ty); |
| if (Arg->hasReg()) { |
| assert(Ty != IceType_i64); |
| auto *Mem = X86OperandMem::create( |
| Func, Ty, FramePtr, |
| Ctx->getConstantInt32(Arg->getStackOffset() + StackAdjBytes)); |
| if (isVectorType(Arg->getType())) { |
| _movp(Arg, Mem); |
| } else { |
| _mov(Arg, Mem); |
| } |
| // This argument-copying instruction uses an explicit X86OperandMem |
| // operand instead of a Variable, so its fill-from-stack operation has to |
| // be tracked separately for statistics. |
| Ctx->statsUpdateFills(); |
| } |
| } |
| |
| void TargetX8632::addEpilog(CfgNode *Node) { |
| InstList &Insts = Node->getInsts(); |
| InstList::reverse_iterator RI, E; |
| for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) { |
| if (llvm::isa<Insts::Ret>(*RI)) |
| break; |
| } |
| if (RI == E) |
| return; |
| |
| // Convert the reverse_iterator position into its corresponding (forward) |
| // iterator position. |
| InstList::iterator InsertPoint = reverseToForwardIterator(RI); |
| --InsertPoint; |
| Context.init(Node); |
| Context.setInsertPoint(InsertPoint); |
| |
| if (IsEbpBasedFrame) { |
| _unlink_bp(); |
| } else { |
| // add stackptr, SpillAreaSizeBytes |
| if (SpillAreaSizeBytes != 0) { |
| _add_sp(Ctx->getConstantInt32(SpillAreaSizeBytes)); |
| } |
| } |
| |
| // Add pop instructions for preserved registers. |
| SmallBitVector CalleeSaves = getRegisterSet(RegSet_CalleeSave, RegSet_None); |
| SmallBitVector Popped(CalleeSaves.size()); |
| for (int32_t i = CalleeSaves.size() - 1; i >= 0; --i) { |
| const auto RegNum = RegNumT::fromInt(i); |
| if (RegNum == getFrameReg() && IsEbpBasedFrame) |
| continue; |
| const RegNumT Canonical = RegX8632::getBaseReg(RegNum); |
| if (CalleeSaves[i] && RegsUsed[i]) { |
| Popped[Canonical] = true; |
| } |
| } |
| for (int32_t i = Popped.size() - 1; i >= 0; --i) { |
| if (!Popped[i]) |
| continue; |
| const auto RegNum = RegNumT::fromInt(i); |
| assert(RegNum == RegX8632::getBaseReg(RegNum)); |
| _pop_reg(RegNum); |
| } |
| } |
| |
| Type TargetX8632::stackSlotType() { return WordType; } |
| |
| Operand *TargetX8632::loOperand(Operand *Operand) { |
| assert(Operand->getType() == IceType_i64 || |
| Operand->getType() == IceType_f64); |
| if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64) |
| return Operand; |
| if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand)) |
| return Var64On32->getLo(); |
| if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { |
| auto *ConstInt = llvm::dyn_cast<ConstantInteger32>( |
| Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue()))); |
| // Check if we need to blind/pool the constant. |
| return legalize(ConstInt); |
| } |
| if (auto *Mem = llvm::dyn_cast<X86OperandMem>(Operand)) { |
| auto *MemOperand = X86OperandMem::create( |
| Func, IceType_i32, Mem->getBase(), Mem->getOffset(), Mem->getIndex(), |
| Mem->getShift(), Mem->getSegmentRegister(), Mem->getIsRebased()); |
| // Test if we should randomize or pool the offset, if so randomize it or |
| // pool it then create mem operand with the blinded/pooled constant. |
| // Otherwise, return the mem operand as ordinary mem operand. |
| return legalize(MemOperand); |
| } |
| llvm_unreachable("Unsupported operand type"); |
| return nullptr; |
| } |
| |
| Operand *TargetX8632::hiOperand(Operand *Operand) { |
| assert(Operand->getType() == IceType_i64 || |
| Operand->getType() == IceType_f64); |
| if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64) |
| return Operand; |
| if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand)) |
| return Var64On32->getHi(); |
| if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { |
| auto *ConstInt = llvm::dyn_cast<ConstantInteger32>( |
| Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue() >> 32))); |
| // Check if we need to blind/pool the constant. |
| return legalize(ConstInt); |
| } |
| if (auto *Mem = llvm::dyn_cast<X86OperandMem>(Operand)) { |
| Constant *Offset = Mem->getOffset(); |
| if (Offset == nullptr) { |
| Offset = Ctx->getConstantInt32(4); |
| } else if (auto *IntOffset = llvm::dyn_cast<ConstantInteger32>(Offset)) { |
| Offset = Ctx->getConstantInt32(4 + IntOffset->getValue()); |
| } else if (auto *SymOffset = llvm::dyn_cast<ConstantRelocatable>(Offset)) { |
| assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4)); |
| Offset = |
| Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName()); |
| } |
| auto *MemOperand = X86OperandMem::create( |
| Func, IceType_i32, Mem->getBase(), Offset, Mem->getIndex(), |
| Mem->getShift(), Mem->getSegmentRegister(), Mem->getIsRebased()); |
| // Test if the Offset is an eligible i32 constants for randomization and |
| // pooling. Blind/pool it if it is. Otherwise return as oridinary mem |
| // operand. |
| return legalize(MemOperand); |
| } |
| llvm_unreachable("Unsupported operand type"); |
| return nullptr; |
| } |
| |
| SmallBitVector TargetX8632::getRegisterSet(RegSetMask Include, |
| RegSetMask Exclude) const { |
| return RegX8632::getRegisterSet(getFlags(), Include, Exclude); |
| } |
| |
| void TargetX8632::lowerAlloca(const InstAlloca *Instr) { |
| // Conservatively require the stack to be aligned. Some stack adjustment |
| // operations implemented below assume that the stack is aligned before the |
| // alloca. All the alloca code ensures that the stack alignment is preserved |
| // after the alloca. The stack alignment restriction can be relaxed in some |
| // cases. |
| RequiredStackAlignment = |
| std::max<size_t>(RequiredStackAlignment, X86_STACK_ALIGNMENT_BYTES); |
| |
| // For default align=0, set it to the real value 1, to avoid any |
| // bit-manipulation problems below. |
| const uint32_t AlignmentParam = std::max(1u, Instr->getAlignInBytes()); |
| |
| // LLVM enforces power of 2 alignment. |
| assert(llvm::isPowerOf2_32(AlignmentParam)); |
| assert(llvm::isPowerOf2_32(X86_STACK_ALIGNMENT_BYTES)); |
| |
| const uint32_t Alignment = |
| std::max(AlignmentParam, X86_STACK_ALIGNMENT_BYTES); |
| const bool OverAligned = Alignment > X86_STACK_ALIGNMENT_BYTES; |
| const bool OptM1 = Func->getOptLevel() == Opt_m1; |
| const bool AllocaWithKnownOffset = Instr->getKnownFrameOffset(); |
| const bool UseFramePointer = |
| hasFramePointer() || OverAligned || !AllocaWithKnownOffset || OptM1; |
| |
| if (UseFramePointer) |
| setHasFramePointer(); |
| |
| Variable *esp = getPhysicalRegister(getStackReg(), WordType); |
| if (OverAligned) { |
| _and(esp, Ctx->getConstantInt32(-Alignment)); |
| } |
| |
| Variable *Dest = Instr->getDest(); |
| Operand *TotalSize = legalize(Instr->getSizeInBytes()); |
| |
| if (const auto *ConstantTotalSize = |
| llvm::dyn_cast<ConstantInteger32>(TotalSize)) { |
| const uint32_t Value = |
| Utils::applyAlignment(ConstantTotalSize->getValue(), Alignment); |
| if (UseFramePointer) { |
| _sub_sp(Ctx->getConstantInt32(Value)); |
| } else { |
| // If we don't need a Frame Pointer, this alloca has a known offset to the |
| // stack pointer. We don't need adjust the stack pointer, nor assign any |
| // value to Dest, as Dest is rematerializable. |
| assert(Dest->isRematerializable()); |
| FixedAllocaSizeBytes += Value; |
| Context.insert<InstFakeDef>(Dest); |
| } |
| } else { |
| // Non-constant sizes need to be adjusted to the next highest multiple of |
| // the required alignment at runtime. |
| Variable *T = makeReg(IceType_i32); |
| _mov(T, TotalSize); |
| _add(T, Ctx->getConstantInt32(Alignment - 1)); |
| _and(T, Ctx->getConstantInt32(-Alignment)); |
| _sub_sp(T); |
| } |
| // Add enough to the returned address to account for the out args area. |
| uint32_t OutArgsSize = maxOutArgsSizeBytes(); |
| if (OutArgsSize > 0) { |
| Variable *T = makeReg(Dest->getType()); |
| auto *CalculateOperand = X86OperandMem::create( |
| Func, IceType_void, esp, Ctx->getConstantInt(IceType_i32, OutArgsSize)); |
| _lea(T, CalculateOperand); |
| _mov(Dest, T); |
| } else { |
| _mov(Dest, esp); |
| } |
| } |
| |
| void TargetX8632::lowerArguments() { |
| const bool OptM1 = Func->getOptLevel() == Opt_m1; |
| VarList &Args = Func->getArgs(); |
| unsigned NumXmmArgs = 0; |
| bool XmmSlotsRemain = true; |
| unsigned NumGprArgs = 0; |
| bool GprSlotsRemain = true; |
| |
| Context.init(Func->getEntryNode()); |
| Context.setInsertPoint(Context.getCur()); |
| |
| for (SizeT i = 0, End = Args.size(); |
| i < End && (XmmSlotsRemain || GprSlotsRemain); ++i) { |
| Variable *Arg = Args[i]; |
| Type Ty = Arg->getType(); |
| Variable *RegisterArg = nullptr; |
| RegNumT RegNum; |
| if (isVectorType(Ty)) { |
| RegNum = RegX8632::getRegisterForXmmArgNum( |
| RegX8632::getArgIndex(i, NumXmmArgs)); |
| if (RegNum.hasNoValue()) { |
| XmmSlotsRemain = false; |
| continue; |
| } |
| ++NumXmmArgs; |
| RegisterArg = Func->makeVariable(Ty); |
| } else if (isScalarFloatingType(Ty)) { |
| continue; |
| } else if (isScalarIntegerType(Ty)) { |
| RegNum = RegX8632::getRegisterForGprArgNum( |
| Ty, RegX8632::getArgIndex(i, NumGprArgs)); |
| if (RegNum.hasNoValue()) { |
| GprSlotsRemain = false; |
| continue; |
| } |
| ++NumGprArgs; |
| RegisterArg = Func->makeVariable(Ty); |
| } |
| assert(RegNum.hasValue()); |
| assert(RegisterArg != nullptr); |
| // Replace Arg in the argument list with the home register. Then generate |
| // an instruction in the prolog to copy the home register to the assigned |
| // location of Arg. |
| if (BuildDefs::dump()) |
| RegisterArg->setName(Func, "home_reg:" + Arg->getName()); |
| RegisterArg->setRegNum(RegNum); |
| RegisterArg->setIsArg(); |
| Arg->setIsArg(false); |
| |
| Args[i] = RegisterArg; |
| // When not Om1, do the assignment through a temporary, instead of directly |
| // from the pre-colored variable, so that a subsequent availabilityGet() |
| // call has a chance to work. (In Om1, don't bother creating extra |
| // instructions with extra variables to register-allocate.) |
| if (OptM1) { |
| Context.insert<InstAssign>(Arg, RegisterArg); |
| } else { |
| Variable *Tmp = makeReg(RegisterArg->getType()); |
| Context.insert<InstAssign>(Tmp, RegisterArg); |
| Context.insert<InstAssign>(Arg, Tmp); |
| } |
| } |
| if (!OptM1) |
| Context.availabilityUpdate(); |
| } |
| |
| /// Strength-reduce scalar integer multiplication by a constant (for i32 or |
| /// narrower) for certain constants. The lea instruction can be used to multiply |
| /// by 3, 5, or 9, and the lsh instruction can be used to multiply by powers of |
| /// 2. These can be combined such that e.g. multiplying by 100 can be done as 2 |
| /// lea-based multiplies by 5, combined with left-shifting by 2. |
| |
| bool TargetX8632::optimizeScalarMul(Variable *Dest, Operand *Src0, |
| int32_t Src1) { |
| // Disable this optimization for Om1 and O0, just to keep things simple |
| // there. |
| if (Func->getOptLevel() < Opt_1) |
| return false; |
| Type Ty = Dest->getType(); |
| if (Src1 == -1) { |
| Variable *T = nullptr; |
| _mov(T, Src0); |
| _neg(T); |
| _mov(Dest, T); |
| return true; |
| } |
| if (Src1 == 0) { |
| _mov(Dest, Ctx->getConstantZero(Ty)); |
| return true; |
| } |
| if (Src1 == 1) { |
| Variable *T = nullptr; |
| _mov(T, Src0); |
| _mov(Dest, T); |
| return true; |
| } |
| // Don't bother with the edge case where Src1 == MININT. |
| if (Src1 == -Src1) |
| return false; |
| const bool Src1IsNegative = Src1 < 0; |
| if (Src1IsNegative) |
| Src1 = -Src1; |
| uint32_t Count9 = 0; |
| uint32_t Count5 = 0; |
| uint32_t Count3 = 0; |
| uint32_t Count2 = 0; |
| uint32_t CountOps = 0; |
| while (Src1 > 1) { |
| if (Src1 % 9 == 0) { |
| ++CountOps; |
| ++Count9; |
| Src1 /= 9; |
| } else if (Src1 % 5 == 0) { |
| ++CountOps; |
| ++Count5; |
| Src1 /= 5; |
| } else if (Src1 % 3 == 0) { |
| ++CountOps; |
| ++Count3; |
| Src1 /= 3; |
| } else if (Src1 % 2 == 0) { |
| if (Count2 == 0) |
| ++CountOps; |
| ++Count2; |
| Src1 /= 2; |
| } else { |
| return false; |
| } |
| } |
| // Lea optimization only works for i16 and i32 types, not i8. |
| if (Ty != IceType_i32 && (Count3 || Count5 || Count9)) |
| return false; |
| // Limit the number of lea/shl operations for a single multiply, to a |
| // somewhat arbitrary choice of 3. |
| constexpr uint32_t MaxOpsForOptimizedMul = 3; |
| if (CountOps > MaxOpsForOptimizedMul) |
| return false; |
| Variable *T = makeReg(WordType); |
| if (typeWidthInBytes(Src0->getType()) < typeWidthInBytes(T->getType())) { |
| Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem); |
| _movzx(T, Src0RM); |
| } else { |
| _mov(T, Src0); |
| } |
| Constant *Zero = Ctx->getConstantZero(IceType_i32); |
| for (uint32_t i = 0; i < Count9; ++i) { |
| constexpr uint16_t Shift = 3; // log2(9-1) |
| _lea(T, X86OperandMem::create(Func, IceType_void, T, Zero, T, Shift)); |
| } |
| for (uint32_t i = 0; i < Count5; ++i) { |
| constexpr uint16_t Shift = 2; // log2(5-1) |
| _lea(T, X86OperandMem::create(Func, IceType_void, T, Zero, T, Shift)); |
| } |
| for (uint32_t i = 0; i < Count3; ++i) { |
| constexpr uint16_t Shift = 1; // log2(3-1) |
| _lea(T, X86OperandMem::create(Func, IceType_void, T, Zero, T, Shift)); |
| } |
| if (Count2) { |
| _shl(T, Ctx->getConstantInt(Ty, Count2)); |
| } |
| if (Src1IsNegative) |
| _neg(T); |
| _mov(Dest, T); |
| return true; |
| } |
| |
| void TargetX8632::lowerShift64(InstArithmetic::OpKind Op, Operand *Src0Lo, |
| Operand *Src0Hi, Operand *Src1Lo, |
| Variable *DestLo, Variable *DestHi) { |
| // TODO: Refactor the similarities between Shl, Lshr, and Ashr. |
| Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr; |
| Constant *Zero = Ctx->getConstantZero(IceType_i32); |
| Constant *SignExtend = Ctx->getConstantInt32(0x1f); |
| if (auto *ConstantShiftAmount = llvm::dyn_cast<ConstantInteger32>(Src1Lo)) { |
| uint32_t ShiftAmount = ConstantShiftAmount->getValue(); |
| if (ShiftAmount > 32) { |
| Constant *ReducedShift = Ctx->getConstantInt32(ShiftAmount - 32); |
| switch (Op) { |
| default: |
| assert(0 && "non-shift op"); |
| break; |
| case InstArithmetic::Shl: { |
| // a=b<<c ==> |
| // t2 = b.lo |
| // t2 = shl t2, ShiftAmount-32 |
| // t3 = t2 |
| // t2 = 0 |
| _mov(T_2, Src0Lo); |
| _shl(T_2, ReducedShift); |
| _mov(DestHi, T_2); |
| _mov(DestLo, Zero); |
| } break; |
| case InstArithmetic::Lshr: { |
| // a=b>>c (unsigned) ==> |
| // t2 = b.hi |
| // t2 = shr t2, ShiftAmount-32 |
| // a.lo = t2 |
| // a.hi = 0 |
| _mov(T_2, Src0Hi); |
| _shr(T_2, ReducedShift); |
| _mov(DestLo, T_2); |
| _mov(DestHi, Zero); |
| } break; |
| case InstArithmetic::Ashr: { |
| // a=b>>c (signed) ==> |
| // t3 = b.hi |
| // t3 = sar t3, 0x1f |
| // t2 = b.hi |
| // t2 = shrd t2, t3, ShiftAmount-32 |
| // a.lo = t2 |
| // a.hi = t3 |
| _mov(T_3, Src0Hi); |
| _sar(T_3, SignExtend); |
| _mov(T_2, Src0Hi); |
| _shrd(T_2, T_3, ReducedShift); |
| _mov(DestLo, T_2); |
| _mov(DestHi, T_3); |
| } break; |
| } |
| } else if (ShiftAmount == 32) { |
| switch (Op) { |
| default: |
| assert(0 && "non-shift op"); |
| break; |
| case InstArithmetic::Shl: { |
| // a=b<<c ==> |
| // t2 = b.lo |
| // a.hi = t2 |
| // a.lo = 0 |
| _mov(T_2, Src0Lo); |
| _mov(DestHi, T_2); |
| _mov(DestLo, Zero); |
| } break; |
| case InstArithmetic::Lshr: { |
| // a=b>>c (unsigned) ==> |
| // t2 = b.hi |
| // a.lo = t2 |
| // a.hi = 0 |
| _mov(T_2, Src0Hi); |
| _mov(DestLo, T_2); |
| _mov(DestHi, Zero); |
| } break; |
| case InstArithmetic::Ashr: { |
| // a=b>>c (signed) ==> |
| // t2 = b.hi |
| // a.lo = t2 |
| // t3 = b.hi |
| // t3 = sar t3, 0x1f |
| // a.hi = t3 |
| _mov(T_2, Src0Hi); |
| _mov(DestLo, T_2); |
| _mov(T_3, Src0Hi); |
| _sar(T_3, SignExtend); |
| _mov(DestHi, T_3); |
| } break; |
| } |
| } else { |
| // COMMON PREFIX OF: a=b SHIFT_OP c ==> |
| // t2 = b.lo |
| // t3 = b.hi |
| _mov(T_2, Src0Lo); |
| _mov(T_3, Src0Hi); |
| switch (Op) { |
| default: |
| assert(0 && "non-shift op"); |
| break; |
| case InstArithmetic::Shl: { |
| // a=b<<c ==> |
| // t3 = shld t3, t2, ShiftAmount |
| // t2 = shl t2, ShiftAmount |
| _shld(T_3, T_2, ConstantShiftAmount); |
| _shl(T_2, ConstantShiftAmount); |
| } break; |
| case InstArithmetic::Lshr: { |
| // a=b>>c (unsigned) ==> |
| // t2 = shrd t2, t3, ShiftAmount |
| // t3 = shr t3, ShiftAmount |
| _shrd(T_2, T_3, ConstantShiftAmount); |
| _shr(T_3, ConstantShiftAmount); |
| } break; |
| case InstArithmetic::Ashr: { |
| // a=b>>c (signed) ==> |
| // t2 = shrd t2, t3, ShiftAmount |
| // t3 = sar t3, ShiftAmount |
| _shrd(T_2, T_3, ConstantShiftAmount); |
| _sar(T_3, ConstantShiftAmount); |
| } break; |
| } |
| // COMMON SUFFIX OF: a=b SHIFT_OP c ==> |
| // a.lo = t2 |
| // a.hi = t3 |
| _mov(DestLo, T_2); |
| _mov(DestHi, T_3); |
| } |
| } else { |
| // NON-CONSTANT CASES. |
| Constant *BitTest = Ctx->getConstantInt32(0x20); |
| InstX86Label *Label = InstX86Label::create(Func, this); |
| // COMMON PREFIX OF: a=b SHIFT_OP c ==> |
| // t1:ecx = c.lo & 0xff |
| // t2 = b.lo |
| // t3 = b.hi |
| T_1 = copyToReg8(Src1Lo, RegX8632::Reg_cl); |
| _mov(T_2, Src0Lo); |
| _mov(T_3, Src0Hi); |
| switch (Op) { |
| default: |
| assert(0 && "non-shift op"); |
| break; |
| case InstArithmetic::Shl: { |
| // a=b<<c ==> |
| // t3 = shld t3, t2, t1 |
| // t2 = shl t2, t1 |
| // test t1, 0x20 |
| // je L1 |
| // use(t3) |
| // t3 = t2 |
| // t2 = 0 |
| _shld(T_3, T_2, T_1); |
| _shl(T_2, T_1); |
| _test(T_1, BitTest); |
| _br(CondX86::Br_e, Label); |
| // T_2 and T_3 are being assigned again because of the intra-block control |
| // flow, so we need to use _redefined to avoid liveness problems. |
| _redefined(_mov(T_3, T_2)); |
| _redefined(_mov(T_2, Zero)); |
| } break; |
| case InstArithmetic::Lshr: { |
| // a=b>>c (unsigned) ==> |
| // t2 = shrd t2, t3, t1 |
| // t3 = shr t3, t1 |
| // test t1, 0x20 |
| // je L1 |
| // use(t2) |
| // t2 = t3 |
| // t3 = 0 |
| _shrd(T_2, T_3, T_1); |
| _shr(T_3, T_1); |
| _test(T_1, BitTest); |
| _br(CondX86::Br_e, Label); |
| // T_2 and T_3 are being assigned again because of the intra-block control |
| // flow, so we need to use _redefined to avoid liveness problems. |
| _redefined(_mov(T_2, T_3)); |
| _redefined(_mov(T_3, Zero)); |
| } break; |
| case InstArithmetic::Ashr: { |
| // a=b>>c (signed) ==> |
| // t2 = shrd t2, t3, t1 |
| // t3 = sar t3, t1 |
| // test t1, 0x20 |
| // je L1 |
| // use(t2) |
| // t2 = t3 |
| // t3 = sar t3, 0x1f |
| Constant *SignExtend = Ctx->getConstantInt32(0x1f); |
| _shrd(T_2, T_3, T_1); |
| _sar(T_3, T_1); |
| _test(T_1, BitTest); |
| _br(CondX86::Br_e, Label); |
| // T_2 and T_3 are being assigned again because of the intra-block control |
| // flow, so T_2 needs to use _redefined to avoid liveness problems. T_3 |
| // doesn't need special treatment because it is reassigned via _sar |
| // instead of _mov. |
| _redefined(_mov(T_2, T_3)); |
| _sar(T_3, SignExtend); |
| } break; |
| } |
| // COMMON SUFFIX OF: a=b SHIFT_OP c ==> |
| // L1: |
| // a.lo = t2 |
| // a.hi = t3 |
| Context.insert(Label); |
| _mov(DestLo, T_2); |
| _mov(DestHi, T_3); |
| } |
| } |
| |
| void TargetX8632::lowerArithmetic(const InstArithmetic *Instr) { |
| Variable *Dest = Instr->getDest(); |
| if (Dest->isRematerializable()) { |
| Context.insert<InstFakeDef>(Dest); |
| return; |
| } |
| Type Ty = Dest->getType(); |
| Operand *Src0 = legalize(Instr->getSrc(0)); |
| Operand *Src1 = legalize(Instr->getSrc(1)); |
| if (Instr->isCommutative()) { |
| uint32_t SwapCount = 0; |
| if (!llvm::isa<Variable>(Src0) && llvm::isa<Variable>(Src1)) { |
| std::swap(Src0, Src1); |
| ++SwapCount; |
| } |
| if (llvm::isa<Constant>(Src0) && !llvm::isa<Constant>(Src1)) { |
| std::swap(Src0, Src1); |
| ++SwapCount; |
| } |
| // Improve two-address code patterns by avoiding a copy to the dest |
| // register when one of the source operands ends its lifetime here. |
| if (!Instr->isLastUse(Src0) && Instr->isLastUse(Src1)) { |
| std::swap(Src0, Src1); |
| ++SwapCount; |
| } |
| assert(SwapCount <= 1); |
| (void)SwapCount; |
| } |
| if (Ty == IceType_i64) { |
| // These x86-32 helper-call-involved instructions are lowered in this |
| // separate switch. This is because loOperand() and hiOperand() may insert |
| // redundant instructions for constant blinding and pooling. Such redundant |
| // instructions will fail liveness analysis under -Om1 setting. And, |
| // actually these arguments do not need to be processed with loOperand() |
| // and hiOperand() to be used. |
| switch (Instr->getOp()) { |
| case InstArithmetic::Udiv: |
| case InstArithmetic::Sdiv: |
| case InstArithmetic::Urem: |
| case InstArithmetic::Srem: |
| llvm::report_fatal_error("Helper call was expected"); |
| return; |
| default: |
| break; |
| } |
| |
| auto *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
| auto *DestHi = llvm::cast<Variable>(hiOperand(Dest)); |
| Operand *Src0Lo = loOperand(Src0); |
| Operand *Src0Hi = hiOperand(Src0); |
| Operand *Src1Lo = loOperand(Src1); |
| Operand *Src1Hi = hiOperand(Src1); |
| Variable *T_Lo = nullptr, *T_Hi = nullptr; |
| switch (Instr->getOp()) { |
| case InstArithmetic::_num: |
| llvm_unreachable("Unknown arithmetic operator"); |
| break; |
| case InstArithmetic::Add: |
| _mov(T_Lo, Src0Lo); |
| _add(T_Lo, Src1Lo); |
| _mov(DestLo, T_Lo); |
| _mov(T_Hi, Src0Hi); |
| _adc(T_Hi, Src1Hi); |
| _mov(DestHi, T_Hi); |
| break; |
| case InstArithmetic::And: |
| _mov(T_Lo, Src0Lo); |
| _and(T_Lo, Src1Lo); |
| _mov(DestLo, T_Lo); |
| _mov(T_Hi, Src0Hi); |
| _and(T_Hi, Src1Hi); |
| _mov(DestHi, T_Hi); |
| break; |
| case InstArithmetic::Or: |
| _mov(T_Lo, Src0Lo); |
| _or(T_Lo, Src1Lo); |
| _mov(DestLo, T_Lo); |
| _mov(T_Hi, Src0Hi); |
| _or(T_Hi, Src1Hi); |
| _mov(DestHi, T_Hi); |
| break; |
| case InstArithmetic::Xor: |
| _mov(T_Lo, Src0Lo); |
| _xor(T_Lo, Src1Lo); |
| _mov(DestLo, T_Lo); |
| _mov(T_Hi, Src0Hi); |
| _xor(T_Hi, Src1Hi); |
| _mov(DestHi, T_Hi); |
| break; |
| case InstArithmetic::Sub: |
| _mov(T_Lo, Src0Lo); |
| _sub(T_Lo, Src1Lo); |
| _mov(DestLo, T_Lo); |
| _mov(T_Hi, Src0Hi); |
| _sbb(T_Hi, Src1Hi); |
| _mov(DestHi, T_Hi); |
| break; |
| case InstArithmetic::Mul: { |
| Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr; |
| Variable *T_4Lo = makeReg(IceType_i32, RegX8632::Reg_eax); |
| Variable *T_4Hi = makeReg(IceType_i32, RegX8632::Reg_edx); |
| // gcc does the following: |
| // a=b*c ==> |
| // t1 = b.hi; t1 *=(imul) c.lo |
| // t2 = c.hi; t2 *=(imul) b.lo |
| // t3:eax = b.lo |
| // t4.hi:edx,t4.lo:eax = t3:eax *(mul) c.lo |
| // a.lo = t4.lo |
| // t4.hi += t1 |
| // t4.hi += t2 |
| // a.hi = t4.hi |
| // The mul instruction cannot take an immediate operand. |
| Src1Lo = legalize(Src1Lo, Legal_Reg | Legal_Mem); |
| _mov(T_1, Src0Hi); |
| _imul(T_1, Src1Lo); |
| _mov(T_3, Src0Lo, RegX8632::Reg_eax); |
| _mul(T_4Lo, T_3, Src1Lo); |
| // The mul instruction produces two dest variables, edx:eax. We create a |
| // fake definition of edx to account for this. |
| Context.insert<InstFakeDef>(T_4Hi, T_4Lo); |
| Context.insert<InstFakeUse>(T_4Hi); |
| _mov(DestLo, T_4Lo); |
| _add(T_4Hi, T_1); |
| _mov(T_2, Src1Hi); |
| Src0Lo = legalize(Src0Lo, Legal_Reg | Legal_Mem); |
| _imul(T_2, Src0Lo); |
| _add(T_4Hi, T_2); |
| _mov(DestHi, T_4Hi); |
| } break; |
| case InstArithmetic::Shl: |
| case InstArithmetic::Lshr: |
| case InstArithmetic::Ashr: |
| lowerShift64(Instr->getOp(), Src0Lo, Src0Hi, Src1Lo, DestLo, DestHi); |
| break; |
| case InstArithmetic::Fadd: |
| case InstArithmetic::Fsub: |
| case InstArithmetic::Fmul: |
| case InstArithmetic::Fdiv: |
| case InstArithmetic::Frem: |
| llvm_unreachable("FP instruction with i64 type"); |
| break; |
| case InstArithmetic::Udiv: |
| case InstArithmetic::Sdiv: |
| case InstArithmetic::Urem: |
| case InstArithmetic::Srem: |
| llvm_unreachable("Call-helper-involved instruction for i64 type \ |
| should have already been handled before"); |
| break; |
| } |
| return; |
| } |
| if (isVectorType(Ty)) { |
| // TODO: Trap on integer divide and integer modulo by zero. See: |
| // https://code.google.com/p/nativeclient/issues/detail?id=3899 |
| if (llvm::isa<X86OperandMem>(Src1)) |
| Src1 = legalizeToReg(Src1); |
| switch (Instr->getOp()) { |
| case InstArithmetic::_num: |
| llvm_unreachable("Unknown arithmetic operator"); |
| break; |
| case InstArithmetic::Add: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _padd(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::And: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _pand(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Or: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _por(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Xor: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _pxor(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Sub: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _psub(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Mul: { |
| bool TypesAreValidForPmull = Ty == IceType_v4i32 || Ty == IceType_v8i16; |
| bool InstructionSetIsValidForPmull = |
| Ty == IceType_v8i16 || InstructionSet >= SSE4_1; |
| if (TypesAreValidForPmull && InstructionSetIsValidForPmull) { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _pmull(T, Src0 == Src1 ? T : Src1); |
| _movp(Dest, T); |
| } else if (Ty == IceType_v4i32) { |
| // Lowering sequence: |
| // Note: The mask arguments have index 0 on the left. |
| // |
| // movups T1, Src0 |
| // pshufd T2, Src0, {1,0,3,0} |
| // pshufd T3, Src1, {1,0,3,0} |
| // # T1 = {Src0[0] * Src1[0], Src0[2] * Src1[2]} |
| // pmuludq T1, Src1 |
| // # T2 = {Src0[1] * Src1[1], Src0[3] * Src1[3]} |
| // pmuludq T2, T3 |
| // # T1 = {lo(T1[0]), lo(T1[2]), lo(T2[0]), lo(T2[2])} |
| // shufps T1, T2, {0,2,0,2} |
| // pshufd T4, T1, {0,2,1,3} |
| // movups Dest, T4 |
| |
| // Mask that directs pshufd to create a vector with entries |
| // Src[1, 0, 3, 0] |
| constexpr unsigned Constant1030 = 0x31; |
| Constant *Mask1030 = Ctx->getConstantInt32(Constant1030); |
| // Mask that directs shufps to create a vector with entries |
| // Dest[0, 2], Src[0, 2] |
| constexpr unsigned Mask0202 = 0x88; |
| // Mask that directs pshufd to create a vector with entries |
| // Src[0, 2, 1, 3] |
| constexpr unsigned Mask0213 = 0xd8; |
| Variable *T1 = makeReg(IceType_v4i32); |
| Variable *T2 = makeReg(IceType_v4i32); |
| Variable *T3 = makeReg(IceType_v4i32); |
| Variable *T4 = makeReg(IceType_v4i32); |
| _movp(T1, Src0); |
| _pshufd(T2, Src0, Mask1030); |
| _pshufd(T3, Src1, Mask1030); |
| _pmuludq(T1, Src1); |
| _pmuludq(T2, T3); |
| _shufps(T1, T2, Ctx->getConstantInt32(Mask0202)); |
| _pshufd(T4, T1, Ctx->getConstantInt32(Mask0213)); |
| _movp(Dest, T4); |
| } else if (Ty == IceType_v16i8) { |
| llvm::report_fatal_error("Scalarized operation was expected"); |
| } else { |
| llvm::report_fatal_error("Invalid vector multiply type"); |
| } |
| } break; |
| case InstArithmetic::Shl: { |
| assert(llvm::isa<Constant>(Src1) && "Non-constant shift not scalarized"); |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _psll(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Lshr: { |
| assert(llvm::isa<Constant>(Src1) && "Non-constant shift not scalarized"); |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _psrl(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Ashr: { |
| assert(llvm::isa<Constant>(Src1) && "Non-constant shift not scalarized"); |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _psra(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Udiv: |
| case InstArithmetic::Urem: |
| case InstArithmetic::Sdiv: |
| case InstArithmetic::Srem: |
| llvm::report_fatal_error("Scalarized operation was expected"); |
| break; |
| case InstArithmetic::Fadd: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _addps(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Fsub: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _subps(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Fmul: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _mulps(T, Src0 == Src1 ? T : Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Fdiv: { |
| Variable *T = makeReg(Ty); |
| _movp(T, Src0); |
| _divps(T, Src1); |
| _movp(Dest, T); |
| } break; |
| case InstArithmetic::Frem: |
| llvm::report_fatal_error("Scalarized operation was expected"); |
| break; |
| } |
| return; |
| } |
| Variable *T_edx = nullptr; |
| Variable *T = nullptr; |
| switch (Instr->getOp()) { |
| case InstArithmetic::_num: |
| llvm_unreachable("Unknown arithmetic operator"); |
| break; |
| case InstArithmetic::Add: { |
| const bool ValidType = Ty == IceType_i32; |
| auto *Const = llvm::dyn_cast<Constant>(Instr->getSrc(1)); |
| const bool ValidKind = |
| Const != nullptr && (llvm::isa<ConstantInteger32>(Const) || |
| llvm::isa<ConstantRelocatable>(Const)); |
| if (getFlags().getAggressiveLea() && ValidType && ValidKind) { |
| auto *Var = legalizeToReg(Src0); |
| auto *Mem = X86OperandMem::create(Func, IceType_void, Var, Const); |
| T = makeReg(Ty); |
| _lea(T, Mem); |
| _mov(Dest, T); |
| break; |
| } |
| _mov(T, Src0); |
| _add(T, Src1); |
| _mov(Dest, T); |
| } break; |
| case InstArithmetic::And: |
| _mov(T, Src0); |
| _and(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Or: |
| _mov(T, Src0); |
| _or(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Xor: |
| _mov(T, Src0); |
| _xor(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Sub: |
| _mov(T, Src0); |
| _sub(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Mul: |
| if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) { |
| if (optimizeScalarMul(Dest, Src0, C->getValue())) |
| return; |
| } |
| // The 8-bit version of imul only allows the form "imul r/m8" where T must |
| // be in al. |
| if (isByteSizedArithType(Ty)) { |
| _mov(T, Src0, RegX8632::Reg_al); |
| Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| _imul(T, Src0 == Src1 ? T : Src1); |
| _mov(Dest, T); |
| } else if (auto *ImmConst = llvm::dyn_cast<ConstantInteger32>(Src1)) { |
| T = makeReg(Ty); |
| Src0 = legalize(Src0, Legal_Reg | Legal_Mem); |
| _imul_imm(T, Src0, ImmConst); |
| _mov(Dest, T); |
| } else { |
| _mov(T, Src0); |
| // No need to legalize Src1 to Reg | Mem because the Imm case is handled |
| // already by the ConstantInteger32 case above. |
| _imul(T, Src0 == Src1 ? T : Src1); |
| _mov(Dest, T); |
| } |
| break; |
| case InstArithmetic::Shl: |
| _mov(T, Src0); |
| if (!llvm::isa<ConstantInteger32>(Src1) && |
| !llvm::isa<ConstantInteger64>(Src1)) |
| Src1 = copyToReg8(Src1, RegX8632::Reg_cl); |
| _shl(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Lshr: |
| _mov(T, Src0); |
| if (!llvm::isa<ConstantInteger32>(Src1) && |
| !llvm::isa<ConstantInteger64>(Src1)) |
| Src1 = copyToReg8(Src1, RegX8632::Reg_cl); |
| _shr(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Ashr: |
| _mov(T, Src0); |
| if (!llvm::isa<ConstantInteger32>(Src1) && |
| !llvm::isa<ConstantInteger64>(Src1)) |
| Src1 = copyToReg8(Src1, RegX8632::Reg_cl); |
| _sar(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Udiv: { |
| // div and idiv are the few arithmetic operators that do not allow |
| // immediates as the operand. |
| Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| RegNumT Eax; |
| RegNumT Edx; |
| switch (Ty) { |
| default: |
| llvm::report_fatal_error("Bad type for udiv"); |
| case IceType_i32: |
| Eax = RegX8632::Reg_eax; |
| Edx = RegX8632::Reg_edx; |
| break; |
| case IceType_i16: |
| Eax = RegX8632::Reg_ax; |
| Edx = RegX8632::Reg_dx; |
| break; |
| case IceType_i8: |
| Eax = RegX8632::Reg_al; |
| Edx = RegX8632::Reg_ah; |
| break; |
| } |
| T_edx = makeReg(Ty, Edx); |
| _mov(T, Src0, Eax); |
| _mov(T_edx, Ctx->getConstantZero(Ty)); |
| _div(T_edx, Src1, T); |
| _redefined(Context.insert<InstFakeDef>(T, T_edx)); |
| _mov(Dest, T); |
| } break; |
| case InstArithmetic::Sdiv: |
| // TODO(stichnot): Enable this after doing better performance and cross |
| // testing. |
| if (false && Func->getOptLevel() >= Opt_1) { |
| // Optimize division by constant power of 2, but not for Om1 or O0, just |
| // to keep things simple there. |
| if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) { |
| const int32_t Divisor = C->getValue(); |
| const uint32_t UDivisor = Divisor; |
| if (Divisor > 0 && llvm::isPowerOf2_32(UDivisor)) { |
| uint32_t LogDiv = llvm::Log2_32(UDivisor); |
| // LLVM does the following for dest=src/(1<<log): |
| // t=src |
| // sar t,typewidth-1 // -1 if src is negative, 0 if not |
| // shr t,typewidth-log |
| // add t,src |
| // sar t,log |
| // dest=t |
| uint32_t TypeWidth = X86_CHAR_BIT * typeWidthInBytes(Ty); |
| _mov(T, Src0); |
| // If for some reason we are dividing by 1, just treat it like an |
| // assignment. |
| if (LogDiv > 0) { |
| // The initial sar is unnecessary when dividing by 2. |
| if (LogDiv > 1) |
| _sar(T, Ctx->getConstantInt(Ty, TypeWidth - 1)); |
| _shr(T, Ctx->getConstantInt(Ty, TypeWidth - LogDiv)); |
| _add(T, Src0); |
| _sar(T, Ctx->getConstantInt(Ty, LogDiv)); |
| } |
| _mov(Dest, T); |
| return; |
| } |
| } |
| } |
| Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| switch (Ty) { |
| default: |
| llvm::report_fatal_error("Bad type for sdiv"); |
| case IceType_i32: |
| T_edx = makeReg(Ty, RegX8632::Reg_edx); |
| _mov(T, Src0, RegX8632::Reg_eax); |
| break; |
| case IceType_i16: |
| T_edx = makeReg(Ty, RegX8632::Reg_dx); |
| _mov(T, Src0, RegX8632::Reg_ax); |
| break; |
| case IceType_i8: |
| T_edx = makeReg(IceType_i16, RegX8632::Reg_ax); |
| _mov(T, Src0, RegX8632::Reg_al); |
| break; |
| } |
| _cbwdq(T_edx, T); |
| _idiv(T_edx, Src1, T); |
| _redefined(Context.insert<InstFakeDef>(T, T_edx)); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Urem: { |
| Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| RegNumT Eax; |
| RegNumT Edx; |
| switch (Ty) { |
| default: |
| llvm::report_fatal_error("Bad type for urem"); |
| case IceType_i32: |
| Eax = RegX8632::Reg_eax; |
| Edx = RegX8632::Reg_edx; |
| break; |
| case IceType_i16: |
| Eax = RegX8632::Reg_ax; |
| Edx = RegX8632::Reg_dx; |
| break; |
| case IceType_i8: |
| Eax = RegX8632::Reg_al; |
| Edx = RegX8632::Reg_ah; |
| break; |
| } |
| T_edx = makeReg(Ty, Edx); |
| _mov(T_edx, Ctx->getConstantZero(Ty)); |
| _mov(T, Src0, Eax); |
| _div(T, Src1, T_edx); |
| _redefined(Context.insert<InstFakeDef>(T_edx, T)); |
| if (Ty == IceType_i8) { |
| // Register ah must be moved into one of {al,bl,cl,dl} before it can be |
| // moved into a general 8-bit register. |
| auto *T_AhRcvr = makeReg(Ty); |
| T_AhRcvr->setRegClass(RCX86_IsAhRcvr); |
| _mov(T_AhRcvr, T_edx); |
| T_edx = T_AhRcvr; |
| } |
| _mov(Dest, T_edx); |
| } break; |
| case InstArithmetic::Srem: { |
| // TODO(stichnot): Enable this after doing better performance and cross |
| // testing. |
| if (false && Func->getOptLevel() >= Opt_1) { |
| // Optimize mod by constant power of 2, but not for Om1 or O0, just to |
| // keep things simple there. |
| if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) { |
| const int32_t Divisor = C->getValue(); |
| const uint32_t UDivisor = Divisor; |
| if (Divisor > 0 && llvm::isPowerOf2_32(UDivisor)) { |
| uint32_t LogDiv = llvm::Log2_32(UDivisor); |
| // LLVM does the following for dest=src%(1<<log): |
| // t=src |
| // sar t,typewidth-1 // -1 if src is negative, 0 if not |
| // shr t,typewidth-log |
| // add t,src |
| // and t, -(1<<log) |
| // sub t,src |
| // neg t |
| // dest=t |
| uint32_t TypeWidth = X86_CHAR_BIT * typeWidthInBytes(Ty); |
| // If for some reason we are dividing by 1, just assign 0. |
| if (LogDiv == 0) { |
| _mov(Dest, Ctx->getConstantZero(Ty)); |
| return; |
| } |
| _mov(T, Src0); |
| // The initial sar is unnecessary when dividing by 2. |
| if (LogDiv > 1) |
| _sar(T, Ctx->getConstantInt(Ty, TypeWidth - 1)); |
| _shr(T, Ctx->getConstantInt(Ty, TypeWidth - LogDiv)); |
| _add(T, Src0); |
| _and(T, Ctx->getConstantInt(Ty, -(1 << LogDiv))); |
| _sub(T, Src0); |
| _neg(T); |
| _mov(Dest, T); |
| return; |
| } |
| } |
| } |
| Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| RegNumT Eax; |
| RegNumT Edx; |
| switch (Ty) { |
| default: |
| llvm::report_fatal_error("Bad type for srem"); |
| case IceType_i32: |
| Eax = RegX8632::Reg_eax; |
| Edx = RegX8632::Reg_edx; |
| break; |
| case IceType_i16: |
| Eax = RegX8632::Reg_ax; |
| Edx = RegX8632::Reg_dx; |
| break; |
| case IceType_i8: |
| Eax = RegX8632::Reg_al; |
| Edx = RegX8632::Reg_ah; |
| break; |
| } |
| T_edx = makeReg(Ty, Edx); |
| _mov(T, Src0, Eax); |
| _cbwdq(T_edx, T); |
| _idiv(T, Src1, T_edx); |
| _redefined(Context.insert<InstFakeDef>(T_edx, T)); |
| if (Ty == IceType_i8) { |
| // Register ah must be moved into one of {al,bl,cl,dl} before it can be |
| // moved into a general 8-bit register. |
| auto *T_AhRcvr = makeReg(Ty); |
| T_AhRcvr->setRegClass(RCX86_IsAhRcvr); |
| _mov(T_AhRcvr, T_edx); |
| T_edx = T_AhRcvr; |
| } |
| _mov(Dest, T_edx); |
| } break; |
| case InstArithmetic::Fadd: |
| _mov(T, Src0); |
| _addss(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Fsub: |
| _mov(T, Src0); |
| _subss(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Fmul: |
| _mov(T, Src0); |
| _mulss(T, Src0 == Src1 ? T : Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Fdiv: |
| _mov(T, Src0); |
| _divss(T, Src1); |
| _mov(Dest, T); |
| break; |
| case InstArithmetic::Frem: |
| llvm::report_fatal_error("Helper call was expected"); |
| break; |
| } |
| } |
| |
| void TargetX8632::lowerAssign(const InstAssign *Instr) { |
| Variable *Dest = Instr->getDest(); |
| if (Dest->isRematerializable()) { |
| Context.insert<InstFakeDef>(Dest); |
| return; |
| } |
| Operand *Src = Instr->getSrc(0); |
| assert(Dest->getType() == Src->getType()); |
| lowerMove(Dest, Src, false); |
| } |
| |
| void TargetX8632::lowerBr(const InstBr *Br) { |
| if (Br->isUnconditional()) { |
| _br(Br->getTargetUnconditional()); |
| return; |
| } |
| Operand *Cond = Br->getCondition(); |
| |
| // Handle folding opportunities. |
| if (const Inst *Producer = FoldingInfo.getProducerFor(Cond)) { |
| assert(Producer->isDeleted()); |
| switch (BoolFolding::getProducerKind(Producer)) { |
| default: |
| break; |
| case BoolFolding::PK_Icmp32: |
| case BoolFolding::PK_Icmp64: { |
| lowerIcmpAndConsumer(llvm::cast<InstIcmp>(Producer), Br); |
| return; |
| } |
| case BoolFolding::PK_Fcmp: { |
| lowerFcmpAndConsumer(llvm::cast<InstFcmp>(Producer), Br); |
| return; |
| } |
| case BoolFolding::PK_Arith: { |
| lowerArithAndConsumer(llvm::cast<InstArithmetic>(Producer), Br); |
| return; |
| } |
| } |
| } |
| Operand *Src0 = legalize(Cond, Legal_Reg | Legal_Mem); |
| Constant *Zero = Ctx->getConstantZero(IceType_i32); |
| _cmp(Src0, Zero); |
| _br(CondX86::Br_ne, Br->getTargetTrue(), Br->getTargetFalse()); |
| } |
| |
| // constexprMax returns a (constexpr) max(S0, S1), and it is used for defining |
| // OperandList in lowerCall. std::max() is supposed to work, but it doesn't. |
| inline constexpr SizeT constexprMax(SizeT S0, SizeT S1) { |
| return S0 < S1 ? S1 : S0; |
| } |
| |
| void TargetX8632::lowerCall(const InstCall *Instr) { |
| // System V x86-32 calling convention lowering: |
| // |
| // * At the point before the call, the stack must be aligned to 16 bytes. |
| // |
| // * Non-register arguments are pushed onto the stack in right-to-left order, |
| // such that the left-most argument ends up on the top of the stack at the |
| // lowest memory address. |
| // |
| // * Stack arguments of vector type are aligned to start at the next highest |
| // multiple of 16 bytes. Other stack arguments are aligned to the next word |
| // size boundary (4 or 8 bytes, respectively). |
| // |
| // This is compatible with the Microsoft x86-32 'cdecl' calling convention, |
| // which doesn't have a 16-byte stack alignment requirement. |
| |
| RequiredStackAlignment = |
| std::max<size_t>(RequiredStackAlignment, X86_STACK_ALIGNMENT_BYTES); |
| |
| constexpr SizeT MaxOperands = |
| constexprMax(RegX8632::X86_MAX_XMM_ARGS, RegX8632::X86_MAX_GPR_ARGS); |
| using OperandList = llvm::SmallVector<Operand *, MaxOperands>; |
| |
| OperandList XmmArgs; |
| llvm::SmallVector<SizeT, MaxOperands> XmmArgIndices; |
| CfgVector<std::pair<const Type, Operand *>> GprArgs; |
| CfgVector<SizeT> GprArgIndices; |
| OperandList StackArgs, StackArgLocations; |
| uint32_t ParameterAreaSizeBytes = 0; |
| |
| // Classify each argument operand according to the location where the argument |
| // is passed. |
| for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) { |
| Operand *Arg = Instr->getArg(i); |
| const Type Ty = Arg->getType(); |
| // The PNaCl ABI requires the width of arguments to be at least 32 bits. |
| assert(typeWidthInBytes(Ty) >= 4); |
| if (isVectorType(Ty) && RegX8632::getRegisterForXmmArgNum( |
| RegX8632::getArgIndex(i, XmmArgs.size())) |
| .hasValue()) { |
| XmmArgs.push_back(Arg); |
| XmmArgIndices.push_back(i); |
| } else if (isScalarIntegerType(Ty) && |
| RegX8632::getRegisterForGprArgNum( |
| Ty, RegX8632::getArgIndex(i, GprArgs.size())) |
| .hasValue()) { |
| GprArgs.emplace_back(Ty, Arg); |
| GprArgIndices.push_back(i); |
| } else { |
| // Place on stack. |
| StackArgs.push_back(Arg); |
| if (isVectorType(Arg->getType())) { |
| ParameterAreaSizeBytes = applyStackAlignment(ParameterAreaSizeBytes); |
| } |
| Variable *esp = getPhysicalRegister(getStackReg(), WordType); |
| Constant *Loc = Ctx->getConstantInt32(ParameterAreaSizeBytes); |
| StackArgLocations.push_back(X86OperandMem::create(Func, Ty, esp, Loc)); |
| ParameterAreaSizeBytes += typeWidthInBytesOnStack(Arg->getType()); |
| } |
| } |
| // Ensure there is enough space for the fstp/movs for floating returns. |
| Variable *Dest = Instr->getDest(); |
| const Type DestTy = Dest ? Dest->getType() : IceType_void; |
| if (isScalarFloatingType(DestTy)) { |
| ParameterAreaSizeBytes = |
| std::max(static_cast<size_t>(ParameterAreaSizeBytes), |
| typeWidthInBytesOnStack(DestTy)); |
| } |
| // Adjust the parameter area so that the stack is aligned. It is assumed that |
| // the stack is already aligned at the start of the calling sequence. |
| ParameterAreaSizeBytes = applyStackAlignment(ParameterAreaSizeBytes); |
| assert(ParameterAreaSizeBytes <= maxOutArgsSizeBytes()); |
| // Copy arguments that are passed on the stack to the appropriate stack |
| // locations. We make sure legalize() is called on each argument at this |
| // point, to allow availabilityGet() to work. |
| for (SizeT i = 0, NumStackArgs = StackArgs.size(); i < NumStackArgs; ++i) { |
| lowerStore( |
| InstStore::create(Func, legalize(StackArgs[i]), StackArgLocations[i])); |
| } |
| // Copy arguments to be passed in registers to the appropriate registers. |
| for (SizeT i = 0, NumXmmArgs = XmmArgs.size(); i < NumXmmArgs; ++i) { |
| XmmArgs[i] = legalizeToReg(legalize(XmmArgs[i]), |
| RegX8632::getRegisterForXmmArgNum( |
| RegX8632::getArgIndex(XmmArgIndices[i], i))); |
| } |
| // Materialize moves for arguments passed in GPRs. |
| for (SizeT i = 0, NumGprArgs = GprArgs.size(); i < NumGprArgs; ++i) { |
| const Type SignatureTy = GprArgs[i].first; |
| Operand *Arg = |
| legalize(GprArgs[i].second, Legal_Default | Legal_Rematerializable); |
| GprArgs[i].second = legalizeToReg( |
| Arg, RegX8632::getRegisterForGprArgNum( |
| Arg->getType(), RegX8632::getArgIndex(GprArgIndices[i], i))); |
| assert(SignatureTy == IceType_i64 || SignatureTy == IceType_i32); |
| assert(SignatureTy == Arg->getType()); |
| (void)SignatureTy; |
| } |
| // Generate a FakeUse of register arguments so that they do not get dead code |
| // eliminated as a result of the FakeKill of scratch registers after the call. |
| // These need to be right before the call instruction. |
| for (auto *Arg : XmmArgs) { |
| Context.insert<InstFakeUse>(llvm::cast<Variable>(Arg)); |
| } |
| for (auto &ArgPair : GprArgs) { |
| Context.insert<InstFakeUse>(llvm::cast<Variable>(ArgPair.second)); |
| } |
| // Generate the call instruction. Assign its result to a temporary with high |
| // register allocation weight. |
| // ReturnReg doubles as ReturnRegLo as necessary. |
| Variable *ReturnReg = nullptr; |
| Variable *ReturnRegHi = nullptr; |
| if (Dest) { |
| switch (DestTy) { |
| case IceType_NUM: |
| case IceType_void: |
| case IceType_i1: |
| case IceType_i8: |
| case IceType_i16: |
| llvm::report_fatal_error("Invalid Call dest type"); |
| break; |
| case IceType_i32: |
| ReturnReg = makeReg(DestTy, RegX8632::Reg_eax); |
| break; |
| case IceType_i64: |
| ReturnReg = makeReg(IceType_i32, RegX8632::Reg_eax); |
| ReturnRegHi = makeReg(IceType_i32, RegX8632::Reg_edx); |
| break; |
| case IceType_f32: |
| case IceType_f64: |
| // Leave ReturnReg==ReturnRegHi==nullptr, and capture the result with |
| // the fstp instruction. |
| break; |
| // Fallthrough intended. |
| case IceType_v4i1: |
| case IceType_v8i1: |
| case IceType_v16i1: |
| case IceType_v16i8: |
| case IceType_v8i16: |
| case IceType_v4i32: |
| case IceType_v4f32: |
| ReturnReg = makeReg(DestTy, RegX8632::Reg_xmm0); |
| break; |
| } |
| } |
| // Emit the call to the function. |
| Operand *CallTarget = |
| legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm | Legal_AddrAbs); |
| size_t NumVariadicFpArgs = Instr->isVariadic() ? XmmArgs.size() : 0; |
| Inst *NewCall = emitCallToTarget(CallTarget, ReturnReg, NumVariadicFpArgs); |
| // Keep the upper return register live on 32-bit platform. |
| if (ReturnRegHi) |
| Context.insert<InstFakeDef>(ReturnRegHi); |
| // Mark the call as killing all the caller-save registers. |
| Context.insert<InstFakeKill>(NewCall); |
| // Handle x86-32 floating point returns. |
| if (Dest != nullptr && isScalarFloatingType(DestTy)) { |
| // Special treatment for an FP function which returns its result in st(0). |
| // If Dest ends up being a physical xmm register, the fstp emit code will |
| // route st(0) through the space reserved in the function argument area |
| // we allocated. |
| _fstp(Dest); |
| // Create a fake use of Dest in case it actually isn't used, because st(0) |
| // still needs to be popped. |
| Context.insert<InstFakeUse>(Dest); |
| } |
| // Generate a FakeUse to keep the call live if necessary. |
| if (Instr->hasSideEffects() && ReturnReg) { |
| Context.insert<InstFakeUse>(ReturnReg); |
| } |
| // Process the return value, if any. |
| if (Dest == nullptr) |
| return; |
| // Assign the result of the call to Dest. Route it through a temporary so |
| // that the local register availability peephole can be subsequently used. |
| Variable *Tmp = nullptr; |
| if (isVectorType(DestTy)) { |
| assert(ReturnReg && "Vector type requires a return register"); |
| Tmp = makeReg(DestTy); |
| _movp(Tmp, ReturnReg); |
| _movp(Dest, Tmp); |
| } else if (!isScalarFloatingType(DestTy)) { |
| assert(isScalarIntegerType(DestTy)); |
| assert(ReturnReg && "Integer type requires a return register"); |
| if (DestTy == IceType_i64) { |
| assert(ReturnRegHi && "64-bit type requires two return registers"); |
| auto *Dest64On32 = llvm::cast<Variable64On32>(Dest); |
| Variable *DestLo = Dest64On32->getLo(); |
| Variable *DestHi = Dest64On32->getHi(); |
| _mov(Tmp, ReturnReg); |
| _mov(DestLo, Tmp); |
| Variable *TmpHi = nullptr; |
| _mov(TmpHi, ReturnRegHi); |
| _mov(DestHi, TmpHi); |
| } else { |
| _mov(Tmp, ReturnReg); |
| _mov(Dest, Tmp); |
| } |
| } |
| } |
| |
| void TargetX8632::lowerCast(const InstCast *Instr) { |
| // a = cast(b) ==> t=cast(b); a=t; (link t->b, link a->t, no overlap) |
| InstCast::OpKind CastKind = Instr->getCastKind(); |
| Variable *Dest = Instr->getDest(); |
| Type DestTy = Dest->getType(); |
| switch (CastKind) { |
| default: |
| Func->setError("Cast type not supported"); |
| return; |
| case InstCast::Sext: { |
| // Src0RM is the source operand legalized to physical register or memory, |
| // but not immediate, since the relevant x86 native instructions don't |
| // allow an immediate operand. If the operand is an immediate, we could |
| // consider computing the strength-reduced result at translation time, but |
| // we're unlikely to see something like that in the bitcode that the |
| // optimizer wouldn't have already taken care of. |
| Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem); |
| if (isVectorType(DestTy)) { |
| if (DestTy == IceType_v16i8) { |
| // onemask = materialize(1,1,...); dst = (src & onemask) > 0 |
| Variable *OneMask = makeVectorOfOnes(DestTy); |
| Variable *T = makeReg(DestTy); |
| _movp(T, Src0RM); |
| _pand(T, OneMask); |
| Variable *Zeros = makeVectorOfZeros(DestTy); |
| _pcmpgt(T, Zeros); |
| _movp(Dest, T); |
| } else { |
| /// width = width(elty) - 1; dest = (src << width) >> width |
| SizeT ShiftAmount = |
| X86_CHAR_BIT * typeWidthInBytes(typeElementType(DestTy)) - 1; |
| Constant *ShiftConstant = Ctx->getConstantInt8(ShiftAmount); |
| Variable *T = makeReg(DestTy); |
| _movp(T, Src0RM); |
| _psll(T, ShiftConstant); |
| _psra(T, ShiftConstant); |
| _movp(Dest, T); |
| } |
| } else if (DestTy == IceType_i64) { |
| // t1=movsx src; t2=t1; t2=sar t2, 31; dst.lo=t1; dst.hi=t2 |
| Constant *Shift = Ctx->getConstantInt32(31); |
| auto *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
| auto *DestHi = llvm::cast<Variable>(hiOperand(Dest)); |
| Variable *T_Lo = makeReg(DestLo->getType()); |
| if (Src0RM->getType() == IceType_i32) { |
| _mov(T_Lo, Src0RM); |
| } else if (Src0RM->getType() == IceType_i1) { |
| _movzx(T_Lo, Src0RM); |
| _shl(T_Lo, Shift); |
| _sar(T_Lo, Shift); |
| } else { |
| _movsx(T_Lo, Src0RM); |
| } |
| _mov(DestLo, T_Lo); |
| Variable *T_Hi = nullptr; |
| _mov(T_Hi, T_Lo); |
| if (Src0RM->getType() != IceType_i1) |
| // For i1, the sar instruction is already done above. |
| _sar(T_Hi, Shift); |
| _mov(DestHi, T_Hi); |
| } else if (Src0RM->getType() == IceType_i1) { |
| // t1 = src |
| // shl t1, dst_bitwidth - 1 |
| // sar t1, dst_bitwidth - 1 |
| // dst = t1 |
| size_t DestBits = X86_CHAR_BIT * typeWidthInBytes(DestTy); |
| Constant *ShiftAmount = Ctx->getConstantInt32(DestBits - 1); |
| Variable *T = makeReg(DestTy); |
| if (typeWidthInBytes(DestTy) <= typeWidthInBytes(Src0RM->getType())) { |
| _mov(T, Src0RM); |
| } else { |
| // Widen the source using movsx or movzx. (It doesn't matter which one, |
| // since the following shl/sar overwrite the bits.) |
| _movzx(T, Src0RM); |
| } |
| _shl(T, ShiftAmount); |
| _sar(T, ShiftAmount); |
| _mov(Dest, T); |
| } else { |
| // t1 = movsx src; dst = t1 |
| Variable *T = makeReg(DestTy); |
| _movsx(T, Src0RM); |
| _mov(Dest, T); |
| } |
| break; |
| } |
| case InstCast::Zext: { |
| Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem); |
| if (isVectorType(DestTy)) { |
| // onemask = materialize(1,1,...); dest = onemask & src |
| Variable *OneMask = makeVectorOfOnes(DestTy); |
| Variable *T = makeReg(DestTy); |
|