blob: 986faaf9966426cd5aaaadd22ea24321b469d890 [file] [log] [blame]
//===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file contains the declarations of the Vectorization Plan base classes:
/// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
/// VPBlockBase, together implementing a Hierarchical CFG;
/// 2. Pure virtual VPRecipeBase serving as the base class for recipes contained
/// within VPBasicBlocks;
/// 3. VPInstruction, a concrete Recipe and VPUser modeling a single planned
/// instruction;
/// 4. The VPlan class holding a candidate for vectorization;
/// 5. The VPlanPrinter class providing a way to print a plan in dot format;
/// These are documented in docs/VectorizationPlan.rst.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
#define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
#include "VPlanValue.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/FMF.h"
#include "llvm/Transforms/Utils/LoopVersioning.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <string>
namespace llvm {
class BasicBlock;
class DominatorTree;
class InductionDescriptor;
class InnerLoopVectorizer;
class IRBuilderBase;
class LoopInfo;
class PredicateScalarEvolution;
class raw_ostream;
class RecurrenceDescriptor;
class SCEV;
class Type;
class VPBasicBlock;
class VPRegionBlock;
class VPlan;
class VPReplicateRecipe;
class VPlanSlp;
class Value;
namespace Intrinsic {
typedef unsigned ID;
}
/// Returns a calculation for the total number of elements for a given \p VF.
/// For fixed width vectors this value is a constant, whereas for scalable
/// vectors it is an expression determined at runtime.
Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF);
/// Return a value for Step multiplied by VF.
Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
int64_t Step);
const SCEV *createTripCountSCEV(Type *IdxTy, PredicatedScalarEvolution &PSE);
/// A range of powers-of-2 vectorization factors with fixed start and
/// adjustable end. The range includes start and excludes end, e.g.,:
/// [1, 9) = {1, 2, 4, 8}
struct VFRange {
// A power of 2.
const ElementCount Start;
// Need not be a power of 2. If End <= Start range is empty.
ElementCount End;
bool isEmpty() const {
return End.getKnownMinValue() <= Start.getKnownMinValue();
}
VFRange(const ElementCount &Start, const ElementCount &End)
: Start(Start), End(End) {
assert(Start.isScalable() == End.isScalable() &&
"Both Start and End should have the same scalable flag");
assert(isPowerOf2_32(Start.getKnownMinValue()) &&
"Expected Start to be a power of 2");
}
};
using VPlanPtr = std::unique_ptr<VPlan>;
/// In what follows, the term "input IR" refers to code that is fed into the
/// vectorizer whereas the term "output IR" refers to code that is generated by
/// the vectorizer.
/// VPLane provides a way to access lanes in both fixed width and scalable
/// vectors, where for the latter the lane index sometimes needs calculating
/// as a runtime expression.
class VPLane {
public:
/// Kind describes how to interpret Lane.
enum class Kind : uint8_t {
/// For First, Lane is the index into the first N elements of a
/// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
First,
/// For ScalableLast, Lane is the offset from the start of the last
/// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
/// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
/// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
ScalableLast
};
private:
/// in [0..VF)
unsigned Lane;
/// Indicates how the Lane should be interpreted, as described above.
Kind LaneKind;
public:
VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
static VPLane getLastLaneForVF(const ElementCount &VF) {
unsigned LaneOffset = VF.getKnownMinValue() - 1;
Kind LaneKind;
if (VF.isScalable())
// In this case 'LaneOffset' refers to the offset from the start of the
// last subvector with VF.getKnownMinValue() elements.
LaneKind = VPLane::Kind::ScalableLast;
else
LaneKind = VPLane::Kind::First;
return VPLane(LaneOffset, LaneKind);
}
/// Returns a compile-time known value for the lane index and asserts if the
/// lane can only be calculated at runtime.
unsigned getKnownLane() const {
assert(LaneKind == Kind::First);
return Lane;
}
/// Returns an expression describing the lane index that can be used at
/// runtime.
Value *getAsRuntimeExpr(IRBuilderBase &Builder, const ElementCount &VF) const;
/// Returns the Kind of lane offset.
Kind getKind() const { return LaneKind; }
/// Returns true if this is the first lane of the whole vector.
bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
/// Maps the lane to a cache index based on \p VF.
unsigned mapToCacheIndex(const ElementCount &VF) const {
switch (LaneKind) {
case VPLane::Kind::ScalableLast:
assert(VF.isScalable() && Lane < VF.getKnownMinValue());
return VF.getKnownMinValue() + Lane;
default:
assert(Lane < VF.getKnownMinValue());
return Lane;
}
}
/// Returns the maxmimum number of lanes that we are able to consider
/// caching for \p VF.
static unsigned getNumCachedLanes(const ElementCount &VF) {
return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
}
};
/// VPIteration represents a single point in the iteration space of the output
/// (vectorized and/or unrolled) IR loop.
struct VPIteration {
/// in [0..UF)
unsigned Part;
VPLane Lane;
VPIteration(unsigned Part, unsigned Lane,
VPLane::Kind Kind = VPLane::Kind::First)
: Part(Part), Lane(Lane, Kind) {}
VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
};
/// VPTransformState holds information passed down when "executing" a VPlan,
/// needed for generating the output IR.
struct VPTransformState {
VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
DominatorTree *DT, IRBuilderBase &Builder,
InnerLoopVectorizer *ILV, VPlan *Plan)
: VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan),
LVer(nullptr) {}
/// The chosen Vectorization and Unroll Factors of the loop being vectorized.
ElementCount VF;
unsigned UF;
/// Hold the indices to generate specific scalar instructions. Null indicates
/// that all instances are to be generated, using either scalar or vector
/// instructions.
std::optional<VPIteration> Instance;
struct DataState {
/// A type for vectorized values in the new loop. Each value from the
/// original loop, when vectorized, is represented by UF vector values in
/// the new unrolled loop, where UF is the unroll factor.
typedef SmallVector<Value *, 2> PerPartValuesTy;
DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
} Data;
/// Get the generated Value for a given VPValue and a given Part. Note that
/// as some Defs are still created by ILV and managed in its ValueMap, this
/// method will delegate the call to ILV in such cases in order to provide
/// callers a consistent API.
/// \see set.
Value *get(VPValue *Def, unsigned Part);
/// Get the generated Value for a given VPValue and given Part and Lane.
Value *get(VPValue *Def, const VPIteration &Instance);
bool hasVectorValue(VPValue *Def, unsigned Part) {
auto I = Data.PerPartOutput.find(Def);
return I != Data.PerPartOutput.end() && Part < I->second.size() &&
I->second[Part];
}
bool hasAnyVectorValue(VPValue *Def) const {
return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
}
bool hasScalarValue(VPValue *Def, VPIteration Instance) {
auto I = Data.PerPartScalars.find(Def);
if (I == Data.PerPartScalars.end())
return false;
unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
return Instance.Part < I->second.size() &&
CacheIdx < I->second[Instance.Part].size() &&
I->second[Instance.Part][CacheIdx];
}
/// Set the generated Value for a given VPValue and a given Part.
void set(VPValue *Def, Value *V, unsigned Part) {
if (!Data.PerPartOutput.count(Def)) {
DataState::PerPartValuesTy Entry(UF);
Data.PerPartOutput[Def] = Entry;
}
Data.PerPartOutput[Def][Part] = V;
}
/// Reset an existing vector value for \p Def and a given \p Part.
void reset(VPValue *Def, Value *V, unsigned Part) {
auto Iter = Data.PerPartOutput.find(Def);
assert(Iter != Data.PerPartOutput.end() &&
"need to overwrite existing value");
Iter->second[Part] = V;
}
/// Set the generated scalar \p V for \p Def and the given \p Instance.
void set(VPValue *Def, Value *V, const VPIteration &Instance) {
auto Iter = Data.PerPartScalars.insert({Def, {}});
auto &PerPartVec = Iter.first->second;
while (PerPartVec.size() <= Instance.Part)
PerPartVec.emplace_back();
auto &Scalars = PerPartVec[Instance.Part];
unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
while (Scalars.size() <= CacheIdx)
Scalars.push_back(nullptr);
assert(!Scalars[CacheIdx] && "should overwrite existing value");
Scalars[CacheIdx] = V;
}
/// Reset an existing scalar value for \p Def and a given \p Instance.
void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
auto Iter = Data.PerPartScalars.find(Def);
assert(Iter != Data.PerPartScalars.end() &&
"need to overwrite existing value");
assert(Instance.Part < Iter->second.size() &&
"need to overwrite existing value");
unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
assert(CacheIdx < Iter->second[Instance.Part].size() &&
"need to overwrite existing value");
Iter->second[Instance.Part][CacheIdx] = V;
}
/// Add additional metadata to \p To that was not present on \p Orig.
///
/// Currently this is used to add the noalias annotations based on the
/// inserted memchecks. Use this for instructions that are *cloned* into the
/// vector loop.
void addNewMetadata(Instruction *To, const Instruction *Orig);
/// Add metadata from one instruction to another.
///
/// This includes both the original MDs from \p From and additional ones (\see
/// addNewMetadata). Use this for *newly created* instructions in the vector
/// loop.
void addMetadata(Instruction *To, Instruction *From);
/// Similar to the previous function but it adds the metadata to a
/// vector of instructions.
void addMetadata(ArrayRef<Value *> To, Instruction *From);
/// Set the debug location in the builder using the debug location in \p V.
void setDebugLocFromInst(const Value *V);
/// Hold state information used when constructing the CFG of the output IR,
/// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
struct CFGState {
/// The previous VPBasicBlock visited. Initially set to null.
VPBasicBlock *PrevVPBB = nullptr;
/// The previous IR BasicBlock created or used. Initially set to the new
/// header BasicBlock.
BasicBlock *PrevBB = nullptr;
/// The last IR BasicBlock in the output IR. Set to the exit block of the
/// vector loop.
BasicBlock *ExitBB = nullptr;
/// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
/// of replication, maps the BasicBlock of the last replica created.
SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
CFGState() = default;
/// Returns the BasicBlock* mapped to the pre-header of the loop region
/// containing \p R.
BasicBlock *getPreheaderBBFor(VPRecipeBase *R);
} CFG;
/// Hold a pointer to LoopInfo to register new basic blocks in the loop.
LoopInfo *LI;
/// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
DominatorTree *DT;
/// Hold a reference to the IRBuilder used to generate output IR code.
IRBuilderBase &Builder;
VPValue2ValueTy VPValue2Value;
/// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
Value *CanonicalIV = nullptr;
/// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
InnerLoopVectorizer *ILV;
/// Pointer to the VPlan code is generated for.
VPlan *Plan;
/// Holds recipes that may generate a poison value that is used after
/// vectorization, even when their operands are not poison.
SmallPtrSet<VPRecipeBase *, 16> MayGeneratePoisonRecipes;
/// The loop object for the current parent region, or nullptr.
Loop *CurrentVectorLoop = nullptr;
/// LoopVersioning. It's only set up (non-null) if memchecks were
/// used.
///
/// This is currently only used to add no-alias metadata based on the
/// memchecks. The actually versioning is performed manually.
std::unique_ptr<LoopVersioning> LVer;
};
/// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
/// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
class VPBlockBase {
friend class VPBlockUtils;
const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
/// An optional name for the block.
std::string Name;
/// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
/// it is a topmost VPBlockBase.
VPRegionBlock *Parent = nullptr;
/// List of predecessor blocks.
SmallVector<VPBlockBase *, 1> Predecessors;
/// List of successor blocks.
SmallVector<VPBlockBase *, 1> Successors;
/// VPlan containing the block. Can only be set on the entry block of the
/// plan.
VPlan *Plan = nullptr;
/// Add \p Successor as the last successor to this block.
void appendSuccessor(VPBlockBase *Successor) {
assert(Successor && "Cannot add nullptr successor!");
Successors.push_back(Successor);
}
/// Add \p Predecessor as the last predecessor to this block.
void appendPredecessor(VPBlockBase *Predecessor) {
assert(Predecessor && "Cannot add nullptr predecessor!");
Predecessors.push_back(Predecessor);
}
/// Remove \p Predecessor from the predecessors of this block.
void removePredecessor(VPBlockBase *Predecessor) {
auto Pos = find(Predecessors, Predecessor);
assert(Pos && "Predecessor does not exist");
Predecessors.erase(Pos);
}
/// Remove \p Successor from the successors of this block.
void removeSuccessor(VPBlockBase *Successor) {
auto Pos = find(Successors, Successor);
assert(Pos && "Successor does not exist");
Successors.erase(Pos);
}
protected:
VPBlockBase(const unsigned char SC, const std::string &N)
: SubclassID(SC), Name(N) {}
public:
/// An enumeration for keeping track of the concrete subclass of VPBlockBase
/// that are actually instantiated. Values of this enumeration are kept in the
/// SubclassID field of the VPBlockBase objects. They are used for concrete
/// type identification.
using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
virtual ~VPBlockBase() = default;
const std::string &getName() const { return Name; }
void setName(const Twine &newName) { Name = newName.str(); }
/// \return an ID for the concrete type of this object.
/// This is used to implement the classof checks. This should not be used
/// for any other purpose, as the values may change as LLVM evolves.
unsigned getVPBlockID() const { return SubclassID; }
VPRegionBlock *getParent() { return Parent; }
const VPRegionBlock *getParent() const { return Parent; }
/// \return A pointer to the plan containing the current block.
VPlan *getPlan();
const VPlan *getPlan() const;
/// Sets the pointer of the plan containing the block. The block must be the
/// entry block into the VPlan.
void setPlan(VPlan *ParentPlan);
void setParent(VPRegionBlock *P) { Parent = P; }
/// \return the VPBasicBlock that is the entry of this VPBlockBase,
/// recursively, if the latter is a VPRegionBlock. Otherwise, if this
/// VPBlockBase is a VPBasicBlock, it is returned.
const VPBasicBlock *getEntryBasicBlock() const;
VPBasicBlock *getEntryBasicBlock();
/// \return the VPBasicBlock that is the exiting this VPBlockBase,
/// recursively, if the latter is a VPRegionBlock. Otherwise, if this
/// VPBlockBase is a VPBasicBlock, it is returned.
const VPBasicBlock *getExitingBasicBlock() const;
VPBasicBlock *getExitingBasicBlock();
const VPBlocksTy &getSuccessors() const { return Successors; }
VPBlocksTy &getSuccessors() { return Successors; }
iterator_range<VPBlockBase **> successors() { return Successors; }
const VPBlocksTy &getPredecessors() const { return Predecessors; }
VPBlocksTy &getPredecessors() { return Predecessors; }
/// \return the successor of this VPBlockBase if it has a single successor.
/// Otherwise return a null pointer.
VPBlockBase *getSingleSuccessor() const {
return (Successors.size() == 1 ? *Successors.begin() : nullptr);
}
/// \return the predecessor of this VPBlockBase if it has a single
/// predecessor. Otherwise return a null pointer.
VPBlockBase *getSinglePredecessor() const {
return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
}
size_t getNumSuccessors() const { return Successors.size(); }
size_t getNumPredecessors() const { return Predecessors.size(); }
/// An Enclosing Block of a block B is any block containing B, including B
/// itself. \return the closest enclosing block starting from "this", which
/// has successors. \return the root enclosing block if all enclosing blocks
/// have no successors.
VPBlockBase *getEnclosingBlockWithSuccessors();
/// \return the closest enclosing block starting from "this", which has
/// predecessors. \return the root enclosing block if all enclosing blocks
/// have no predecessors.
VPBlockBase *getEnclosingBlockWithPredecessors();
/// \return the successors either attached directly to this VPBlockBase or, if
/// this VPBlockBase is the exit block of a VPRegionBlock and has no
/// successors of its own, search recursively for the first enclosing
/// VPRegionBlock that has successors and return them. If no such
/// VPRegionBlock exists, return the (empty) successors of the topmost
/// VPBlockBase reached.
const VPBlocksTy &getHierarchicalSuccessors() {
return getEnclosingBlockWithSuccessors()->getSuccessors();
}
/// \return the hierarchical successor of this VPBlockBase if it has a single
/// hierarchical successor. Otherwise return a null pointer.
VPBlockBase *getSingleHierarchicalSuccessor() {
return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
}
/// \return the predecessors either attached directly to this VPBlockBase or,
/// if this VPBlockBase is the entry block of a VPRegionBlock and has no
/// predecessors of its own, search recursively for the first enclosing
/// VPRegionBlock that has predecessors and return them. If no such
/// VPRegionBlock exists, return the (empty) predecessors of the topmost
/// VPBlockBase reached.
const VPBlocksTy &getHierarchicalPredecessors() {
return getEnclosingBlockWithPredecessors()->getPredecessors();
}
/// \return the hierarchical predecessor of this VPBlockBase if it has a
/// single hierarchical predecessor. Otherwise return a null pointer.
VPBlockBase *getSingleHierarchicalPredecessor() {
return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
}
/// Set a given VPBlockBase \p Successor as the single successor of this
/// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
/// This VPBlockBase must have no successors.
void setOneSuccessor(VPBlockBase *Successor) {
assert(Successors.empty() && "Setting one successor when others exist.");
appendSuccessor(Successor);
}
/// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
/// successors of this VPBlockBase. This VPBlockBase is not added as
/// predecessor of \p IfTrue or \p IfFalse. This VPBlockBase must have no
/// successors.
void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse) {
assert(Successors.empty() && "Setting two successors when others exist.");
appendSuccessor(IfTrue);
appendSuccessor(IfFalse);
}
/// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
/// This VPBlockBase must have no predecessors. This VPBlockBase is not added
/// as successor of any VPBasicBlock in \p NewPreds.
void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
assert(Predecessors.empty() && "Block predecessors already set.");
for (auto *Pred : NewPreds)
appendPredecessor(Pred);
}
/// Remove all the predecessor of this block.
void clearPredecessors() { Predecessors.clear(); }
/// Remove all the successors of this block.
void clearSuccessors() { Successors.clear(); }
/// The method which generates the output IR that correspond to this
/// VPBlockBase, thereby "executing" the VPlan.
virtual void execute(VPTransformState *State) = 0;
/// Delete all blocks reachable from a given VPBlockBase, inclusive.
static void deleteCFG(VPBlockBase *Entry);
/// Return true if it is legal to hoist instructions into this block.
bool isLegalToHoistInto() {
// There are currently no constraints that prevent an instruction to be
// hoisted into a VPBlockBase.
return true;
}
/// Replace all operands of VPUsers in the block with \p NewValue and also
/// replaces all uses of VPValues defined in the block with NewValue.
virtual void dropAllReferences(VPValue *NewValue) = 0;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void printAsOperand(raw_ostream &OS, bool PrintType) const {
OS << getName();
}
/// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
/// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
/// consequtive numbers.
///
/// Note that the numbering is applied to the whole VPlan, so printing
/// individual blocks is consistent with the whole VPlan printing.
virtual void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const = 0;
/// Print plain-text dump of this VPlan to \p O.
void print(raw_ostream &O) const {
VPSlotTracker SlotTracker(getPlan());
print(O, "", SlotTracker);
}
/// Print the successors of this block to \p O, prefixing all lines with \p
/// Indent.
void printSuccessors(raw_ostream &O, const Twine &Indent) const;
/// Dump this VPBlockBase to dbgs().
LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
#endif
};
/// A value that is used outside the VPlan. The operand of the user needs to be
/// added to the associated LCSSA phi node.
class VPLiveOut : public VPUser {
PHINode *Phi;
public:
VPLiveOut(PHINode *Phi, VPValue *Op)
: VPUser({Op}, VPUser::VPUserID::LiveOut), Phi(Phi) {}
/// Fixup the wrapped LCSSA phi node in the unique exit block. This simply
/// means we need to add the appropriate incoming value from the middle
/// block as exiting edges from the scalar epilogue loop (if present) are
/// already in place, and we exit the vector loop exclusively to the middle
/// block.
void fixPhi(VPlan &Plan, VPTransformState &State);
/// Returns true if the VPLiveOut uses scalars of operand \p Op.
bool usesScalars(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return true;
}
PHINode *getPhi() const { return Phi; }
};
/// VPRecipeBase is a base class modeling a sequence of one or more output IR
/// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
/// and is responsible for deleting its defined values. Single-value
/// VPRecipeBases that also inherit from VPValue must make sure to inherit from
/// VPRecipeBase before VPValue.
class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
public VPDef,
public VPUser {
friend VPBasicBlock;
friend class VPBlockUtils;
/// Each VPRecipe belongs to a single VPBasicBlock.
VPBasicBlock *Parent = nullptr;
public:
VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
: VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
template <typename IterT>
VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
: VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
virtual ~VPRecipeBase() = default;
/// \return the VPBasicBlock which this VPRecipe belongs to.
VPBasicBlock *getParent() { return Parent; }
const VPBasicBlock *getParent() const { return Parent; }
/// The method which generates the output IR instructions that correspond to
/// this VPRecipe, thereby "executing" the VPlan.
virtual void execute(VPTransformState &State) = 0;
/// Insert an unlinked recipe into a basic block immediately before
/// the specified recipe.
void insertBefore(VPRecipeBase *InsertPos);
/// Insert an unlinked recipe into \p BB immediately before the insertion
/// point \p IP;
void insertBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator IP);
/// Insert an unlinked Recipe into a basic block immediately after
/// the specified Recipe.
void insertAfter(VPRecipeBase *InsertPos);
/// Unlink this recipe from its current VPBasicBlock and insert it into
/// the VPBasicBlock that MovePos lives in, right after MovePos.
void moveAfter(VPRecipeBase *MovePos);
/// Unlink this recipe and insert into BB before I.
///
/// \pre I is a valid iterator into BB.
void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
/// This method unlinks 'this' from the containing basic block, but does not
/// delete it.
void removeFromParent();
/// This method unlinks 'this' from the containing basic block and deletes it.
///
/// \returns an iterator pointing to the element after the erased one
iplist<VPRecipeBase>::iterator eraseFromParent();
/// Returns the underlying instruction, if the recipe is a VPValue or nullptr
/// otherwise.
Instruction *getUnderlyingInstr() {
return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
}
const Instruction *getUnderlyingInstr() const {
return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
}
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPDef *D) {
// All VPDefs are also VPRecipeBases.
return true;
}
static inline bool classof(const VPUser *U) {
return U->getVPUserID() == VPUser::VPUserID::Recipe;
}
/// Returns true if the recipe may have side-effects.
bool mayHaveSideEffects() const;
/// Returns true for PHI-like recipes.
bool isPhi() const {
return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
}
/// Returns true if the recipe may read from memory.
bool mayReadFromMemory() const;
/// Returns true if the recipe may write to memory.
bool mayWriteToMemory() const;
/// Returns true if the recipe may read from or write to memory.
bool mayReadOrWriteMemory() const {
return mayReadFromMemory() || mayWriteToMemory();
}
};
// Helper macro to define common classof implementations for recipes.
#define VP_CLASSOF_IMPL(VPDefID) \
static inline bool classof(const VPDef *D) { \
return D->getVPDefID() == VPDefID; \
} \
static inline bool classof(const VPValue *V) { \
auto *R = V->getDefiningRecipe(); \
return R && R->getVPDefID() == VPDefID; \
} \
static inline bool classof(const VPUser *U) { \
auto *R = dyn_cast<VPRecipeBase>(U); \
return R && R->getVPDefID() == VPDefID; \
} \
static inline bool classof(const VPRecipeBase *R) { \
return R->getVPDefID() == VPDefID; \
}
/// This is a concrete Recipe that models a single VPlan-level instruction.
/// While as any Recipe it may generate a sequence of IR instructions when
/// executed, these instructions would always form a single-def expression as
/// the VPInstruction is also a single def-use vertex.
class VPInstruction : public VPRecipeBase, public VPValue {
friend class VPlanSlp;
public:
/// VPlan opcodes, extending LLVM IR with idiomatics instructions.
enum {
FirstOrderRecurrenceSplice =
Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
// values of a first-order recurrence.
Not,
ICmpULE,
SLPLoad,
SLPStore,
ActiveLaneMask,
CanonicalIVIncrement,
CanonicalIVIncrementNUW,
// The next two are similar to the above, but instead increment the
// canonical IV separately for each unrolled part.
CanonicalIVIncrementForPart,
CanonicalIVIncrementForPartNUW,
BranchOnCount,
BranchOnCond
};
private:
typedef unsigned char OpcodeTy;
OpcodeTy Opcode;
FastMathFlags FMF;
DebugLoc DL;
/// An optional name that can be used for the generated IR instruction.
const std::string Name;
/// Utility method serving execute(): generates a single instance of the
/// modeled instruction.
void generateInstruction(VPTransformState &State, unsigned Part);
protected:
void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
public:
VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands, DebugLoc DL,
const Twine &Name = "")
: VPRecipeBase(VPDef::VPInstructionSC, Operands), VPValue(this),
Opcode(Opcode), DL(DL), Name(Name.str()) {}
VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands,
DebugLoc DL = {}, const Twine &Name = "")
: VPInstruction(Opcode, ArrayRef<VPValue *>(Operands), DL, Name) {}
VP_CLASSOF_IMPL(VPDef::VPInstructionSC)
VPInstruction *clone() const {
SmallVector<VPValue *, 2> Operands(operands());
return new VPInstruction(Opcode, Operands, DL, Name);
}
unsigned getOpcode() const { return Opcode; }
/// Generate the instruction.
/// TODO: We currently execute only per-part unless a specific instance is
/// provided.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the VPInstruction to \p O.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
/// Print the VPInstruction to dbgs() (for debugging).
LLVM_DUMP_METHOD void dump() const;
#endif
/// Return true if this instruction may modify memory.
bool mayWriteToMemory() const {
// TODO: we can use attributes of the called function to rule out memory
// modifications.
return Opcode == Instruction::Store || Opcode == Instruction::Call ||
Opcode == Instruction::Invoke || Opcode == SLPStore;
}
bool hasResult() const {
// CallInst may or may not have a result, depending on the called function.
// Conservatively return calls have results for now.
switch (getOpcode()) {
case Instruction::Ret:
case Instruction::Br:
case Instruction::Store:
case Instruction::Switch:
case Instruction::IndirectBr:
case Instruction::Resume:
case Instruction::CatchRet:
case Instruction::Unreachable:
case Instruction::Fence:
case Instruction::AtomicRMW:
case VPInstruction::BranchOnCond:
case VPInstruction::BranchOnCount:
return false;
default:
return true;
}
}
/// Set the fast-math flags.
void setFastMathFlags(FastMathFlags FMFNew);
/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
if (getOperand(0) != Op)
return false;
switch (getOpcode()) {
default:
return false;
case VPInstruction::ActiveLaneMask:
case VPInstruction::CanonicalIVIncrement:
case VPInstruction::CanonicalIVIncrementNUW:
case VPInstruction::CanonicalIVIncrementForPart:
case VPInstruction::CanonicalIVIncrementForPartNUW:
case VPInstruction::BranchOnCount:
return true;
};
llvm_unreachable("switch should return");
}
};
/// VPWidenRecipe is a recipe for producing a copy of vector type its
/// ingredient. This recipe covers most of the traditional vectorization cases
/// where each ingredient transforms into a vectorized version of itself.
class VPWidenRecipe : public VPRecipeBase, public VPValue {
public:
template <typename IterT>
VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
: VPRecipeBase(VPDef::VPWidenSC, Operands), VPValue(this, &I) {}
~VPWidenRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenSC)
/// Produce widened copies of all Ingredients.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
};
/// A recipe for widening Call instructions.
class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
/// ID of the vector intrinsic to call when widening the call. If set the
/// Intrinsic::not_intrinsic, a library call will be used instead.
Intrinsic::ID VectorIntrinsicID;
public:
template <typename IterT>
VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments,
Intrinsic::ID VectorIntrinsicID)
: VPRecipeBase(VPDef::VPWidenCallSC, CallArguments), VPValue(this, &I),
VectorIntrinsicID(VectorIntrinsicID) {}
~VPWidenCallRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenCallSC)
/// Produce a widened version of the call instruction.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
};
/// A recipe for widening select instructions.
class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
/// Is the condition of the select loop invariant?
bool InvariantCond;
public:
template <typename IterT>
VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
bool InvariantCond)
: VPRecipeBase(VPDef::VPWidenSelectSC, Operands), VPValue(this, &I),
InvariantCond(InvariantCond) {}
~VPWidenSelectRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenSelectSC)
/// Produce a widened version of the select instruction.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
};
/// A recipe for handling GEP instructions.
class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
bool IsPtrLoopInvariant;
SmallBitVector IsIndexLoopInvariant;
public:
template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
: VPRecipeBase(VPDef::VPWidenGEPSC, Operands), VPValue(this, GEP),
IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
template <typename IterT>
VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
Loop *OrigLoop)
: VPRecipeBase(VPDef::VPWidenGEPSC, Operands), VPValue(this, GEP),
IsIndexLoopInvariant(GEP->getNumIndices(), false) {
IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
for (auto Index : enumerate(GEP->indices()))
IsIndexLoopInvariant[Index.index()] =
OrigLoop->isLoopInvariant(Index.value().get());
}
~VPWidenGEPRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenGEPSC)
/// Generate the gep nodes.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
};
/// A recipe for handling phi nodes of integer and floating-point inductions,
/// producing their vector values.
class VPWidenIntOrFpInductionRecipe : public VPRecipeBase, public VPValue {
PHINode *IV;
const InductionDescriptor &IndDesc;
bool NeedsVectorIV;
public:
VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
const InductionDescriptor &IndDesc,
bool NeedsVectorIV)
: VPRecipeBase(VPDef::VPWidenIntOrFpInductionSC, {Start, Step}),
VPValue(this, IV), IV(IV), IndDesc(IndDesc),
NeedsVectorIV(NeedsVectorIV) {}
VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
const InductionDescriptor &IndDesc,
TruncInst *Trunc, bool NeedsVectorIV)
: VPRecipeBase(VPDef::VPWidenIntOrFpInductionSC, {Start, Step}),
VPValue(this, Trunc), IV(IV), IndDesc(IndDesc),
NeedsVectorIV(NeedsVectorIV) {}
~VPWidenIntOrFpInductionRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenIntOrFpInductionSC)
/// Generate the vectorized and scalarized versions of the phi node as
/// needed by their users.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Returns the start value of the induction.
VPValue *getStartValue() { return getOperand(0); }
const VPValue *getStartValue() const { return getOperand(0); }
/// Returns the step value of the induction.
VPValue *getStepValue() { return getOperand(1); }
const VPValue *getStepValue() const { return getOperand(1); }
/// Returns the first defined value as TruncInst, if it is one or nullptr
/// otherwise.
TruncInst *getTruncInst() {
return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
}
const TruncInst *getTruncInst() const {
return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
}
PHINode *getPHINode() { return IV; }
/// Returns the induction descriptor for the recipe.
const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
/// Returns true if the induction is canonical, i.e. starting at 0 and
/// incremented by UF * VF (= the original IV is incremented by 1).
bool isCanonical() const;
/// Returns the scalar type of the induction.
const Type *getScalarType() const {
const TruncInst *TruncI = getTruncInst();
return TruncI ? TruncI->getType() : IV->getType();
}
/// Returns true if a vector phi needs to be created for the induction.
bool needsVectorIV() const { return NeedsVectorIV; }
};
/// A pure virtual base class for all recipes modeling header phis, including
/// phis for first order recurrences, pointer inductions and reductions. The
/// start value is the first operand of the recipe and the incoming value from
/// the backedge is the second operand.
///
/// Inductions are modeled using the following sub-classes:
/// * VPCanonicalIVPHIRecipe: Canonical scalar induction of the vector loop,
/// starting at a specified value (zero for the main vector loop, the resume
/// value for the epilogue vector loop) and stepping by 1. The induction
/// controls exiting of the vector loop by comparing against the vector trip
/// count. Produces a single scalar PHI for the induction value per
/// iteration.
/// * VPWidenIntOrFpInductionRecipe: Generates vector values for integer and
/// floating point inductions with arbitrary start and step values. Produces
/// a vector PHI per-part.
/// * VPDerivedIVRecipe: Converts the canonical IV value to the corresponding
/// value of an IV with different start and step values. Produces a single
/// scalar value per iteration
/// * VPScalarIVStepsRecipe: Generates scalar values per-lane based on a
/// canonical or derived induction.
/// * VPWidenPointerInductionRecipe: Generate vector and scalar values for a
/// pointer induction. Produces either a vector PHI per-part or scalar values
/// per-lane based on the canonical induction.
class VPHeaderPHIRecipe : public VPRecipeBase, public VPValue {
protected:
VPHeaderPHIRecipe(unsigned char VPDefID, PHINode *Phi,
VPValue *Start = nullptr)
: VPRecipeBase(VPDefID, {}), VPValue(this, Phi) {
if (Start)
addOperand(Start);
}
public:
~VPHeaderPHIRecipe() override = default;
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPRecipeBase *B) {
return B->getVPDefID() >= VPDef::VPFirstHeaderPHISC &&
B->getVPDefID() <= VPDef::VPLastPHISC;
}
static inline bool classof(const VPValue *V) {
auto *B = V->getDefiningRecipe();
return B && B->getVPDefID() >= VPRecipeBase::VPFirstHeaderPHISC &&
B->getVPDefID() <= VPRecipeBase::VPLastPHISC;
}
/// Generate the phi nodes.
void execute(VPTransformState &State) override = 0;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override = 0;
#endif
/// Returns the start value of the phi, if one is set.
VPValue *getStartValue() {
return getNumOperands() == 0 ? nullptr : getOperand(0);
}
VPValue *getStartValue() const {
return getNumOperands() == 0 ? nullptr : getOperand(0);
}
/// Update the start value of the recipe.
void setStartValue(VPValue *V) { setOperand(0, V); }
/// Returns the incoming value from the loop backedge.
VPValue *getBackedgeValue() {
return getOperand(1);
}
/// Returns the backedge value as a recipe. The backedge value is guaranteed
/// to be a recipe.
VPRecipeBase &getBackedgeRecipe() {
return *getBackedgeValue()->getDefiningRecipe();
}
};
class VPWidenPointerInductionRecipe : public VPHeaderPHIRecipe {
const InductionDescriptor &IndDesc;
bool IsScalarAfterVectorization;
public:
/// Create a new VPWidenPointerInductionRecipe for \p Phi with start value \p
/// Start.
VPWidenPointerInductionRecipe(PHINode *Phi, VPValue *Start, VPValue *Step,
const InductionDescriptor &IndDesc,
bool IsScalarAfterVectorization)
: VPHeaderPHIRecipe(VPDef::VPWidenPointerInductionSC, Phi),
IndDesc(IndDesc),
IsScalarAfterVectorization(IsScalarAfterVectorization) {
addOperand(Start);
addOperand(Step);
}
~VPWidenPointerInductionRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenPointerInductionSC)
/// Generate vector values for the pointer induction.
void execute(VPTransformState &State) override;
/// Returns true if only scalar values will be generated.
bool onlyScalarsGenerated(ElementCount VF);
/// Returns the induction descriptor for the recipe.
const InductionDescriptor &getInductionDescriptor() const { return IndDesc; }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
};
/// A recipe for handling header phis that are widened in the vector loop.
/// In the VPlan native path, all incoming VPValues & VPBasicBlock pairs are
/// managed in the recipe directly.
class VPWidenPHIRecipe : public VPHeaderPHIRecipe {
/// List of incoming blocks. Only used in the VPlan native path.
SmallVector<VPBasicBlock *, 2> IncomingBlocks;
public:
/// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr)
: VPHeaderPHIRecipe(VPDef::VPWidenPHISC, Phi) {
if (Start)
addOperand(Start);
}
~VPWidenPHIRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenPHISC)
/// Generate the phi/select nodes.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
addOperand(IncomingV);
IncomingBlocks.push_back(IncomingBlock);
}
/// Returns the \p I th incoming VPBasicBlock.
VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
/// Returns the \p I th incoming VPValue.
VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
};
/// A recipe for handling first-order recurrence phis. The start value is the
/// first operand of the recipe and the incoming value from the backedge is the
/// second operand.
struct VPFirstOrderRecurrencePHIRecipe : public VPHeaderPHIRecipe {
VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
: VPHeaderPHIRecipe(VPDef::VPFirstOrderRecurrencePHISC, Phi, &Start) {}
VP_CLASSOF_IMPL(VPDef::VPFirstOrderRecurrencePHISC)
static inline bool classof(const VPHeaderPHIRecipe *R) {
return R->getVPDefID() == VPDef::VPFirstOrderRecurrencePHISC;
}
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
};
/// A recipe for handling reduction phis. The start value is the first operand
/// of the recipe and the incoming value from the backedge is the second
/// operand.
class VPReductionPHIRecipe : public VPHeaderPHIRecipe {
/// Descriptor for the reduction.
const RecurrenceDescriptor &RdxDesc;
/// The phi is part of an in-loop reduction.
bool IsInLoop;
/// The phi is part of an ordered reduction. Requires IsInLoop to be true.
bool IsOrdered;
public:
/// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
/// RdxDesc.
VPReductionPHIRecipe(PHINode *Phi, const RecurrenceDescriptor &RdxDesc,
VPValue &Start, bool IsInLoop = false,
bool IsOrdered = false)
: VPHeaderPHIRecipe(VPDef::VPReductionPHISC, Phi, &Start),
RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
}
~VPReductionPHIRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPReductionPHISC)
static inline bool classof(const VPHeaderPHIRecipe *R) {
return R->getVPDefID() == VPDef::VPReductionPHISC;
}
/// Generate the phi/select nodes.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
const RecurrenceDescriptor &getRecurrenceDescriptor() const {
return RdxDesc;
}
/// Returns true, if the phi is part of an ordered reduction.
bool isOrdered() const { return IsOrdered; }
/// Returns true, if the phi is part of an in-loop reduction.
bool isInLoop() const { return IsInLoop; }
};
/// A recipe for vectorizing a phi-node as a sequence of mask-based select
/// instructions.
class VPBlendRecipe : public VPRecipeBase, public VPValue {
PHINode *Phi;
public:
/// The blend operation is a User of the incoming values and of their
/// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
/// might be incoming with a full mask for which there is no VPValue.
VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
: VPRecipeBase(VPDef::VPBlendSC, Operands), VPValue(this, Phi), Phi(Phi) {
assert(Operands.size() > 0 &&
((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
"Expected either a single incoming value or a positive even number "
"of operands");
}
VP_CLASSOF_IMPL(VPDef::VPBlendSC)
/// Return the number of incoming values, taking into account that a single
/// incoming value has no mask.
unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
/// Return incoming value number \p Idx.
VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
/// Return mask number \p Idx.
VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
/// Generate the phi/select nodes.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
// Recursing through Blend recipes only, must terminate at header phi's the
// latest.
return all_of(users(),
[this](VPUser *U) { return U->onlyFirstLaneUsed(this); });
}
};
/// VPInterleaveRecipe is a recipe for transforming an interleave group of load
/// or stores into one wide load/store and shuffles. The first operand of a
/// VPInterleave recipe is the address, followed by the stored values, followed
/// by an optional mask.
class VPInterleaveRecipe : public VPRecipeBase {
const InterleaveGroup<Instruction> *IG;
bool HasMask = false;
public:
VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
ArrayRef<VPValue *> StoredValues, VPValue *Mask)
: VPRecipeBase(VPDef::VPInterleaveSC, {Addr}), IG(IG) {
for (unsigned i = 0; i < IG->getFactor(); ++i)
if (Instruction *I = IG->getMember(i)) {
if (I->getType()->isVoidTy())
continue;
new VPValue(I, this);
}
for (auto *SV : StoredValues)
addOperand(SV);
if (Mask) {
HasMask = true;
addOperand(Mask);
}
}
~VPInterleaveRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPInterleaveSC)
/// Return the address accessed by this recipe.
VPValue *getAddr() const {
return getOperand(0); // Address is the 1st, mandatory operand.
}
/// Return the mask used by this recipe. Note that a full mask is represented
/// by a nullptr.
VPValue *getMask() const {
// Mask is optional and therefore the last, currently 2nd operand.
return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
}
/// Return the VPValues stored by this interleave group. If it is a load
/// interleave group, return an empty ArrayRef.
ArrayRef<VPValue *> getStoredValues() const {
// The first operand is the address, followed by the stored values, followed
// by an optional mask.
return ArrayRef<VPValue *>(op_begin(), getNumOperands())
.slice(1, getNumStoreOperands());
}
/// Generate the wide load or store, and shuffles.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
/// Returns the number of stored operands of this interleave group. Returns 0
/// for load interleave groups.
unsigned getNumStoreOperands() const {
return getNumOperands() - (HasMask ? 2 : 1);
}
/// The recipe only uses the first lane of the address.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return Op == getAddr() && !llvm::is_contained(getStoredValues(), Op);
}
};
/// A recipe to represent inloop reduction operations, performing a reduction on
/// a vector operand into a scalar value, and adding the result to a chain.
/// The Operands are {ChainOp, VecOp, [Condition]}.
class VPReductionRecipe : public VPRecipeBase, public VPValue {
/// The recurrence decriptor for the reduction in question.
const RecurrenceDescriptor *RdxDesc;
/// Pointer to the TTI, needed to create the target reduction
const TargetTransformInfo *TTI;
public:
VPReductionRecipe(const RecurrenceDescriptor *R, Instruction *I,
VPValue *ChainOp, VPValue *VecOp, VPValue *CondOp,
const TargetTransformInfo *TTI)
: VPRecipeBase(VPDef::VPReductionSC, {ChainOp, VecOp}), VPValue(this, I),
RdxDesc(R), TTI(TTI) {
if (CondOp)
addOperand(CondOp);
}
~VPReductionRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPReductionSC)
/// Generate the reduction in the loop
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// The VPValue of the scalar Chain being accumulated.
VPValue *getChainOp() const { return getOperand(0); }
/// The VPValue of the vector value to be reduced.
VPValue *getVecOp() const { return getOperand(1); }
/// The VPValue of the condition for the block.
VPValue *getCondOp() const {
return getNumOperands() > 2 ? getOperand(2) : nullptr;
}
};
/// VPReplicateRecipe replicates a given instruction producing multiple scalar
/// copies of the original scalar type, one per lane, instead of producing a
/// single copy of widened type for all lanes. If the instruction is known to be
/// uniform only one copy, per lane zero, will be generated.
class VPReplicateRecipe : public VPRecipeBase, public VPValue {
/// Indicator if only a single replica per lane is needed.
bool IsUniform;
/// Indicator if the replicas are also predicated.
bool IsPredicated;
/// Indicator if the scalar values should also be packed into a vector.
bool AlsoPack;
public:
template <typename IterT>
VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
bool IsUniform, bool IsPredicated = false)
: VPRecipeBase(VPDef::VPReplicateSC, Operands), VPValue(this, I),
IsUniform(IsUniform), IsPredicated(IsPredicated) {
// Retain the previous behavior of predicateInstructions(), where an
// insert-element of a predicated instruction got hoisted into the
// predicated basic block iff it was its only user. This is achieved by
// having predicated instructions also pack their values into a vector by
// default unless they have a replicated user which uses their scalar value.
AlsoPack = IsPredicated && !I->use_empty();
}
~VPReplicateRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPReplicateSC)
/// Generate replicas of the desired Ingredient. Replicas will be generated
/// for all parts and lanes unless a specific part and lane are specified in
/// the \p State.
void execute(VPTransformState &State) override;
void setAlsoPack(bool Pack) { AlsoPack = Pack; }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
bool isUniform() const { return IsUniform; }
bool isPacked() const { return AlsoPack; }
bool isPredicated() const { return IsPredicated; }
/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return isUniform();
}
/// Returns true if the recipe uses scalars of operand \p Op.
bool usesScalars(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return true;
}
};
/// A recipe for generating conditional branches on the bits of a mask.
class VPBranchOnMaskRecipe : public VPRecipeBase {
public:
VPBranchOnMaskRecipe(VPValue *BlockInMask)
: VPRecipeBase(VPDef::VPBranchOnMaskSC, {}) {
if (BlockInMask) // nullptr means all-one mask.
addOperand(BlockInMask);
}
VP_CLASSOF_IMPL(VPDef::VPBranchOnMaskSC)
/// Generate the extraction of the appropriate bit from the block mask and the
/// conditional branch.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override {
O << Indent << "BRANCH-ON-MASK ";
if (VPValue *Mask = getMask())
Mask->printAsOperand(O, SlotTracker);
else
O << " All-One";
}
#endif
/// Return the mask used by this recipe. Note that a full mask is represented
/// by a nullptr.
VPValue *getMask() const {
assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
// Mask is optional.
return getNumOperands() == 1 ? getOperand(0) : nullptr;
}
/// Returns true if the recipe uses scalars of operand \p Op.
bool usesScalars(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return true;
}
};
/// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
/// control converges back from a Branch-on-Mask. The phi nodes are needed in
/// order to merge values that are set under such a branch and feed their uses.
/// The phi nodes can be scalar or vector depending on the users of the value.
/// This recipe works in concert with VPBranchOnMaskRecipe.
class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
public:
/// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
/// nodes after merging back from a Branch-on-Mask.
VPPredInstPHIRecipe(VPValue *PredV)
: VPRecipeBase(VPDef::VPPredInstPHISC, PredV), VPValue(this) {}
~VPPredInstPHIRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPPredInstPHISC)
/// Generates phi nodes for live-outs as needed to retain SSA form.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Returns true if the recipe uses scalars of operand \p Op.
bool usesScalars(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return true;
}
};
/// A Recipe for widening load/store operations.
/// The recipe uses the following VPValues:
/// - For load: Address, optional mask
/// - For store: Address, stored value, optional mask
/// TODO: We currently execute only per-part unless a specific instance is
/// provided.
class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
Instruction &Ingredient;
// Whether the loaded-from / stored-to addresses are consecutive.
bool Consecutive;
// Whether the consecutive loaded/stored addresses are in reverse order.
bool Reverse;
void setMask(VPValue *Mask) {
if (!Mask)
return;
addOperand(Mask);
}
bool isMasked() const {
return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
}
public:
VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
bool Consecutive, bool Reverse)
: VPRecipeBase(VPDef::VPWidenMemoryInstructionSC, {Addr}),
Ingredient(Load), Consecutive(Consecutive), Reverse(Reverse) {
assert((Consecutive || !Reverse) && "Reverse implies consecutive");
new VPValue(this, &Load);
setMask(Mask);
}
VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
VPValue *StoredValue, VPValue *Mask,
bool Consecutive, bool Reverse)
: VPRecipeBase(VPDef::VPWidenMemoryInstructionSC, {Addr, StoredValue}),
Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
assert((Consecutive || !Reverse) && "Reverse implies consecutive");
setMask(Mask);
}
VP_CLASSOF_IMPL(VPDef::VPWidenMemoryInstructionSC)
/// Return the address accessed by this recipe.
VPValue *getAddr() const {
return getOperand(0); // Address is the 1st, mandatory operand.
}
/// Return the mask used by this recipe. Note that a full mask is represented
/// by a nullptr.
VPValue *getMask() const {
// Mask is optional and therefore the last operand.
return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
}
/// Returns true if this recipe is a store.
bool isStore() const { return isa<StoreInst>(Ingredient); }
/// Return the address accessed by this recipe.
VPValue *getStoredValue() const {
assert(isStore() && "Stored value only available for store instructions");
return getOperand(1); // Stored value is the 2nd, mandatory operand.
}
// Return whether the loaded-from / stored-to addresses are consecutive.
bool isConsecutive() const { return Consecutive; }
// Return whether the consecutive loaded/stored addresses are in reverse
// order.
bool isReverse() const { return Reverse; }
/// Generate the wide load/store.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
// Widened, consecutive memory operations only demand the first lane of
// their address, unless the same operand is also stored. That latter can
// happen with opaque pointers.
return Op == getAddr() && isConsecutive() &&
(!isStore() || Op != getStoredValue());
}
Instruction &getIngredient() const { return Ingredient; }
};
/// Recipe to expand a SCEV expression.
class VPExpandSCEVRecipe : public VPRecipeBase, public VPValue {
const SCEV *Expr;
ScalarEvolution &SE;
public:
VPExpandSCEVRecipe(const SCEV *Expr, ScalarEvolution &SE)
: VPRecipeBase(VPDef::VPExpandSCEVSC, {}), VPValue(this), Expr(Expr),
SE(SE) {}
~VPExpandSCEVRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPExpandSCEVSC)
/// Generate a canonical vector induction variable of the vector loop, with
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
const SCEV *getSCEV() const { return Expr; }
};
/// Canonical scalar induction phi of the vector loop. Starting at the specified
/// start value (either 0 or the resume value when vectorizing the epilogue
/// loop). VPWidenCanonicalIVRecipe represents the vector version of the
/// canonical induction variable.
class VPCanonicalIVPHIRecipe : public VPHeaderPHIRecipe {
DebugLoc DL;
public:
VPCanonicalIVPHIRecipe(VPValue *StartV, DebugLoc DL)
: VPHeaderPHIRecipe(VPDef::VPCanonicalIVPHISC, nullptr, StartV), DL(DL) {}
~VPCanonicalIVPHIRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPCanonicalIVPHISC)
static inline bool classof(const VPHeaderPHIRecipe *D) {
return D->getVPDefID() == VPDef::VPCanonicalIVPHISC;
}
/// Generate the canonical scalar induction phi of the vector loop.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Returns the scalar type of the induction.
const Type *getScalarType() const {
return getOperand(0)->getLiveInIRValue()->getType();
}
/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return true;
}
/// Check if the induction described by \p ID is canonical, i.e. has the same
/// start, step (of 1), and type as the canonical IV.
bool isCanonical(const InductionDescriptor &ID, Type *Ty) const;
};
/// A recipe for generating the active lane mask for the vector loop that is
/// used to predicate the vector operations.
/// TODO: It would be good to use the existing VPWidenPHIRecipe instead and
/// remove VPActiveLaneMaskPHIRecipe.
class VPActiveLaneMaskPHIRecipe : public VPHeaderPHIRecipe {
DebugLoc DL;
public:
VPActiveLaneMaskPHIRecipe(VPValue *StartMask, DebugLoc DL)
: VPHeaderPHIRecipe(VPDef::VPActiveLaneMaskPHISC, nullptr, StartMask),
DL(DL) {}
~VPActiveLaneMaskPHIRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPActiveLaneMaskPHISC)
static inline bool classof(const VPHeaderPHIRecipe *D) {
return D->getVPDefID() == VPDef::VPActiveLaneMaskPHISC;
}
/// Generate the active lane mask phi of the vector loop.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
};
/// A Recipe for widening the canonical induction variable of the vector loop.
class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
public:
VPWidenCanonicalIVRecipe(VPCanonicalIVPHIRecipe *CanonicalIV)
: VPRecipeBase(VPDef::VPWidenCanonicalIVSC, {CanonicalIV}),
VPValue(this) {}
~VPWidenCanonicalIVRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPWidenCanonicalIVSC)
/// Generate a canonical vector induction variable of the vector loop, with
/// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
/// step = <VF*UF, VF*UF, ..., VF*UF>.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
/// Returns the scalar type of the induction.
const Type *getScalarType() const {
return cast<VPCanonicalIVPHIRecipe>(getOperand(0)->getDefiningRecipe())
->getScalarType();
}
};
/// A recipe for converting the canonical IV value to the corresponding value of
/// an IV with different start and step values, using Start + CanonicalIV *
/// Step.
class VPDerivedIVRecipe : public VPRecipeBase, public VPValue {
/// The type of the result value. It may be smaller than the type of the
/// induction and in this case it will get truncated to ResultTy.
Type *ResultTy;
/// Induction descriptor for the induction the canonical IV is transformed to.
const InductionDescriptor &IndDesc;
public:
VPDerivedIVRecipe(const InductionDescriptor &IndDesc, VPValue *Start,
VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step,
Type *ResultTy)
: VPRecipeBase(VPDef::VPDerivedIVSC, {Start, CanonicalIV, Step}),
VPValue(this), ResultTy(ResultTy), IndDesc(IndDesc) {}
~VPDerivedIVRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPDerivedIVSC)
/// Generate the transformed value of the induction at offset StartValue (1.
/// operand) + IV (2. operand) * StepValue (3, operand).
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
VPValue *getStartValue() const { return getOperand(0); }
VPValue *getCanonicalIV() const { return getOperand(1); }
VPValue *getStepValue() const { return getOperand(2); }
/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return true;
}
};
/// A recipe for handling phi nodes of integer and floating-point inductions,
/// producing their scalar values.
class VPScalarIVStepsRecipe : public VPRecipeBase, public VPValue {
const InductionDescriptor &IndDesc;
public:
VPScalarIVStepsRecipe(const InductionDescriptor &IndDesc, VPValue *IV,
VPValue *Step)
: VPRecipeBase(VPDef::VPScalarIVStepsSC, {IV, Step}), VPValue(this),
IndDesc(IndDesc) {}
~VPScalarIVStepsRecipe() override = default;
VP_CLASSOF_IMPL(VPDef::VPScalarIVStepsSC)
/// Generate the scalarized versions of the phi node as needed by their users.
void execute(VPTransformState &State) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif
VPValue *getStepValue() const { return getOperand(1); }
/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
return true;
}
};
/// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
/// holds a sequence of zero or more VPRecipe's each representing a sequence of
/// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
class VPBasicBlock : public VPBlockBase {
public:
using RecipeListTy = iplist<VPRecipeBase>;
private:
/// The VPRecipes held in the order of output instructions to generate.
RecipeListTy Recipes;
public:
VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
: VPBlockBase(VPBasicBlockSC, Name.str()) {
if (Recipe)
appendRecipe(Recipe);
}
~VPBasicBlock() override {
while (!Recipes.empty())
Recipes.pop_back();
}
/// Instruction iterators...
using iterator = RecipeListTy::iterator;
using const_iterator = RecipeListTy::const_iterator;
using reverse_iterator = RecipeListTy::reverse_iterator;
using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
//===--------------------------------------------------------------------===//
/// Recipe iterator methods
///
inline iterator begin() { return Recipes.begin(); }
inline const_iterator begin() const { return Recipes.begin(); }
inline iterator end() { return Recipes.end(); }
inline const_iterator end() const { return Recipes.end(); }
inline reverse_iterator rbegin() { return Recipes.rbegin(); }
inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
inline reverse_iterator rend() { return Recipes.rend(); }
inline const_reverse_iterator rend() const { return Recipes.rend(); }
inline size_t size() const { return Recipes.size(); }
inline bool empty() const { return Recipes.empty(); }
inline const VPRecipeBase &front() const { return Recipes.front(); }
inline VPRecipeBase &front() { return Recipes.front(); }
inline const VPRecipeBase &back() const { return Recipes.back(); }
inline VPRecipeBase &back() { return Recipes.back(); }
/// Returns a reference to the list of recipes.
RecipeListTy &getRecipeList() { return Recipes; }
/// Returns a pointer to a member of the recipe list.
static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
return &VPBasicBlock::Recipes;
}
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPBlockBase *V) {
return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
}
void insert(VPRecipeBase *Recipe, iterator InsertPt) {
assert(Recipe && "No recipe to append.");
assert(!Recipe->Parent && "Recipe already in VPlan");
Recipe->Parent = this;
Recipes.insert(InsertPt, Recipe);
}
/// Augment the existing recipes of a VPBasicBlock with an additional
/// \p Recipe as the last recipe.
void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
/// The method which generates the output IR instructions that correspond to
/// this VPBasicBlock, thereby "executing" the VPlan.
void execute(VPTransformState *State) override;
/// Return the position of the first non-phi node recipe in the block.
iterator getFirstNonPhi();
/// Returns an iterator range over the PHI-like recipes in the block.
iterator_range<iterator> phis() {
return make_range(begin(), getFirstNonPhi());
}
void dropAllReferences(VPValue *NewValue) override;
/// Split current block at \p SplitAt by inserting a new block between the
/// current block and its successors and moving all recipes starting at
/// SplitAt to the new block. Returns the new block.
VPBasicBlock *splitAt(iterator SplitAt);
VPRegionBlock *getEnclosingLoopRegion();
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
/// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
///
/// Note that the numbering is applied to the whole VPlan, so printing
/// individual blocks is consistent with the whole VPlan printing.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
using VPBlockBase::print; // Get the print(raw_stream &O) version.
#endif
/// If the block has multiple successors, return the branch recipe terminating
/// the block. If there are no or only a single successor, return nullptr;
VPRecipeBase *getTerminator();
const VPRecipeBase *getTerminator() const;
/// Returns true if the block is exiting it's parent region.
bool isExiting() const;
private:
/// Create an IR BasicBlock to hold the output instructions generated by this
/// VPBasicBlock, and return it. Update the CFGState accordingly.
BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
};
/// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
/// which form a Single-Entry-Single-Exiting subgraph of the output IR CFG.
/// A VPRegionBlock may indicate that its contents are to be replicated several
/// times. This is designed to support predicated scalarization, in which a
/// scalar if-then code structure needs to be generated VF * UF times. Having
/// this replication indicator helps to keep a single model for multiple
/// candidate VF's. The actual replication takes place only once the desired VF
/// and UF have been determined.
class VPRegionBlock : public VPBlockBase {
/// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
VPBlockBase *Entry;
/// Hold the Single Exiting block of the SESE region modelled by the
/// VPRegionBlock.
VPBlockBase *Exiting;
/// An indicator whether this region is to generate multiple replicated
/// instances of output IR corresponding to its VPBlockBases.
bool IsReplicator;
public:
VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exiting,
const std::string &Name = "", bool IsReplicator = false)
: VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exiting(Exiting),
IsReplicator(IsReplicator) {
assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
assert(Exiting->getSuccessors().empty() && "Exit block has successors.");
Entry->setParent(this);
Exiting->setParent(this);
}
VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
: VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exiting(nullptr),
IsReplicator(IsReplicator) {}
~VPRegionBlock() override {
if (Entry) {
VPValue DummyValue;
Entry->dropAllReferences(&DummyValue);
deleteCFG(Entry);
}
}
/// Method to support type inquiry through isa, cast, and dyn_cast.
static inline bool classof(const VPBlockBase *V) {
return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
}
const VPBlockBase *getEntry() const { return Entry; }
VPBlockBase *getEntry() { return Entry; }
/// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
/// EntryBlock must have no predecessors.
void setEntry(VPBlockBase *EntryBlock) {
assert(EntryBlock->getPredecessors().empty() &&
"Entry block cannot have predecessors.");
Entry = EntryBlock;
EntryBlock->setParent(this);
}
const VPBlockBase *getExiting() const { return Exiting; }
VPBlockBase *getExiting() { return Exiting; }
/// Set \p ExitingBlock as the exiting VPBlockBase of this VPRegionBlock. \p
/// ExitingBlock must have no successors.
void setExiting(VPBlockBase *ExitingBlock) {
assert(ExitingBlock->getSuccessors().empty() &&
"Exit block cannot have successors.");
Exiting = ExitingBlock;
ExitingBlock->setParent(this);
}
/// Returns the pre-header VPBasicBlock of the loop region.
VPBasicBlock *getPreheaderVPBB() {
assert(!isReplicator() && "should only get pre-header of loop regions");
return getSinglePredecessor()->getExitingBasicBlock();
}
/// An indicator whether this region is to generate multiple replicated
/// instances of output IR corresponding to its VPBlockBases.
bool isReplicator() const { return IsReplicator; }
/// The method which generates the output IR instructions that correspond to
/// this VPRegionBlock, thereby "executing" the VPlan.
void execute(VPTransformState *State) override;
void dropAllReferences(VPValue *NewValue) override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
/// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
/// consequtive numbers.
///
/// Note that the numbering is applied to the whole VPlan, so printing
/// individual regions is consistent with the whole VPlan printing.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
using VPBlockBase::print; // Get the print(raw_stream &O) version.
#endif
};
/// VPlan models a candidate for vectorization, encoding various decisions take
/// to produce efficient output IR, including which branches, basic-blocks and
/// output IR instructions to generate, and their cost. VPlan holds a
/// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
/// VPBlock.
class VPlan {
friend class VPlanPrinter;
friend class VPSlotTracker;
/// Hold the single entry to the Hierarchical CFG of the VPlan.
VPBlockBase *Entry;
/// Holds the VFs applicable to this VPlan.
SmallSetVector<ElementCount, 2> VFs;
/// Holds the UFs applicable to this VPlan. If empty, the VPlan is valid for
/// any UF.
SmallSetVector<unsigned, 2> UFs;
/// Holds the name of the VPlan, for printing.
std::string Name;
/// Holds all the external definitions created for this VPlan. External
/// definitions must be immutable and hold a pointer to their underlying IR.
DenseMap<Value *, VPValue *> VPExternalDefs;
/// Represents the trip count of the original loop, for folding
/// the tail.
VPValue *TripCount = nullptr;
/// Represents the backedge taken count of the original loop, for folding
/// the tail. It equals TripCount - 1.
VPValue *BackedgeTakenCount = nullptr;
/// Represents the vector trip count.
VPValue VectorTripCount;
/// Holds a mapping between Values and their corresponding VPValue inside
/// VPlan.
Value2VPValueTy Value2VPValue;
/// Contains all VPValues that been allocated by addVPValue directly and need
/// to be free when the plan's destructor is called.
SmallVector<VPValue *, 16> VPValuesToFree;
/// Indicates whether it is safe use the Value2VPValue mapping or if the
/// mapping cannot be used any longer, because it is stale.
bool Value2VPValueEnabled = true;
/// Values used outside the plan.
MapVector<PHINode *, VPLiveOut *> LiveOuts;
public:
VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
if (Entry)
Entry->setPlan(this);
}
~VPlan();
/// Prepare the plan for execution, setting up the required live-in values.
void prepareToExecute(Value *TripCount, Value *VectorTripCount,
Value *CanonicalIVStartValue, VPTransformState &State,
bool IsEpilogueVectorization);
/// Generate the IR code for this VPlan.
void execute(VPTransformState *State);
VPBlockBase *getEntry() { return Entry; }
const VPBlockBase *getEntry() const { return Entry; }
VPBlockBase *setEntry(VPBlockBase *Block) {
Entry = Block;
Block->setPlan(this);
return Entry;
}
/// The trip count of the original loop.
VPValue *getOrCreateTripCount() {
if (!TripCount)
TripCount = new VPValue();
return TripCount;
}
/// The backedge taken count of the original loop.
VPValue *getOrCreateBackedgeTakenCount() {
if (!BackedgeTakenCount)
BackedgeTakenCount = new VPValue();
return BackedgeTakenCount;
}
/// The vector trip count.
VPValue &getVectorTripCount() { return VectorTripCount; }
/// Mark the plan to indicate that using Value2VPValue is not safe any
/// longer, because it may be stale.
void disableValue2VPValue() { Value2VPValueEnabled = false; }
void addVF(ElementCount VF) { VFs.insert(VF); }
void setVF(ElementCount VF) {
assert(hasVF(VF) && "Cannot set VF not already in plan");
VFs.clear();
VFs.insert(VF);
}
bool hasVF(ElementCount VF) { return VFs.count(VF); }
bool hasScalarVFOnly() const { return VFs.size() == 1 && VFs[0].isScalar(); }
bool hasUF(unsigned UF) const { return UFs.empty() || UFs.contains(UF); }
void setUF(unsigned UF) {
assert(hasUF(UF) && "Cannot set the UF not already in plan");
UFs.clear();
UFs.insert(UF);
}
/// Return a string with the name of the plan and the applicable VFs and UFs.
std::string getName() const;
void setName(const Twine &newName) { Name = newName.str(); }
/// Get the existing or add a new external definition for \p V.
VPValue *getOrAddExternalDef(Value *V) {
auto I = VPExternalDefs.insert({V, nullptr});
if (I.second)
I.first->second = new VPValue(V);
return I.first->second;
}
void addVPValue(Value *V) {
assert(Value2VPValueEnabled &&
"IR value to VPValue mapping may be out of date!");
assert(V && "Trying to add a null Value to VPlan");
assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
VPValue *VPV = new VPValue(V);
Value2VPValue[V] = VPV;
VPValuesToFree.push_back(VPV);
}
void addVPValue(Value *V, VPValue *VPV) {
assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
assert(V && "Trying to add a null Value to VPlan");
assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
Value2VPValue[V] = VPV;
}
/// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
/// checking whether it is safe to query VPValues using IR Values.
VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
"Value2VPValue mapping may be out of date!");
assert(V && "Trying to get the VPValue of a null Value");
assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
return Value2VPValue[V];
}
/// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
/// OverrideAllowed can be used to disable checking whether it is safe to
/// query VPValues using IR Values.
VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
"Value2VPValue mapping may be out of date!");
assert(V && "Trying to get or add the VPValue of a null Value");
if (!Value2VPValue.count(V))
addVPValue(V);
return getVPValue(V);
}
void removeVPValueFor(Value *V) {
assert(Value2VPValueEnabled &&
"IR value to VPValue mapping may be out of date!");
Value2VPValue.erase(V);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print this VPlan to \p O.
void print(raw_ostream &O) const;
/// Print this VPlan in DOT format to \p O.
void printDOT(raw_ostream &O) const;
/// Dump the plan to stderr (for debugging).
LLVM_DUMP_METHOD void dump() const;
#endif
/// Returns a range mapping the values the range \p Operands to their
/// corresponding VPValues.
iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
mapToVPValues(User::op_range Operands) {
std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
return getOrAddVPValue(Op);
};
return map_range(Operands, Fn);
}
/// Returns the VPRegionBlock of the vector loop.
VPRegionBlock *getVectorLoopRegion() {
return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
}
const VPRegionBlock *getVectorLoopRegion() const {
return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
}
/// Returns the canonical induction recipe of the vector loop.
VPCanonicalIVPHIRecipe *getCanonicalIV() {
VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock();
if (EntryVPBB->empty()) {
// VPlan native path.
EntryVPBB = cast<VPBasicBlock>(EntryVPBB->getSingleSuccessor());
}
return cast<VPCanonicalIVPHIRecipe>(&*EntryVPBB->begin());
}
/// Find and return the VPActiveLaneMaskPHIRecipe from the header - there
/// be only one at most. If there isn't one, then return nullptr.
VPActiveLaneMaskPHIRecipe *getActiveLaneMaskPhi();
void addLiveOut(PHINode *PN, VPValue *V);
void clearLiveOuts() {
for (auto &KV : LiveOuts)
delete KV.second;
LiveOuts.clear();
}
void removeLiveOut(PHINode *PN) {
delete LiveOuts[PN];
LiveOuts.erase(PN);
}
const MapVector<PHINode *, VPLiveOut *> &getLiveOuts() const {
return LiveOuts;
}
private:
/// Add to the given dominator tree the header block and every new basic block
/// that was created between it and the latch block, inclusive.
static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
BasicBlock *LoopPreHeaderBB,
BasicBlock *LoopExitBB);
};
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// VPlanPrinter prints a given VPlan to a given output stream. The printing is
/// indented and follows the dot format.
class VPlanPrinter {
raw_ostream &OS;
const VPlan &Plan;
unsigned Depth = 0;
unsigned TabWidth = 2;
std::string Indent;
unsigned BID = 0;
SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
VPSlotTracker SlotTracker;
/// Handle indentation.
void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
/// Print a given \p Block of the Plan.
void dumpBlock(const VPBlockBase *Block);
/// Print the information related to the CFG edges going out of a given
/// \p Block, followed by printing the successor blocks themselves.
void dumpEdges(const VPBlockBase *Block);
/// Print a given \p BasicBlock, including its VPRecipes, followed by printing
/// its successor blocks.
void dumpBasicBlock(const VPBasicBlock *BasicBlock);
/// Print a given \p Region of the Plan.
void dumpRegion(const VPRegionBlock *Region);
unsigned getOrCreateBID(const VPBlockBase *Block) {
return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
}
Twine getOrCreateName(const VPBlockBase *Block);
Twine getUID(const VPBlockBase *Block);
/// Print the information related to a CFG edge between two VPBlockBases.
void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
const Twine &Label);
public:
VPlanPrinter(raw_ostream &O, const VPlan &P)
: OS(O), Plan(P), SlotTracker(&P) {}
LLVM_DUMP_METHOD void dump();
};
struct VPlanIngredient {
const Value *V;
VPlanIngredient(const Value *V) : V(V) {}
void print(raw_ostream &O) const;
};
inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
I.print(OS);
return OS;
}
inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
Plan.print(OS);
return OS;
}
#endif
//===----------------------------------------------------------------------===//
// VPlan Utilities
//===----------------------------------------------------------------------===//
/// Class that provides utilities for VPBlockBases in VPlan.
class VPBlockUtils {
public:
VPBlockUtils() = delete;
/// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
/// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
/// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. \p BlockPtr's
/// successors are moved from \p BlockPtr to \p NewBlock. \p NewBlock must
/// have neither successors nor predecessors.
static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
assert(NewBlock->getSuccessors().empty() &&
NewBlock->getPredecessors().empty() &&
"Can't insert new block with predecessors or successors.");
NewBlock->setParent(BlockPtr->getParent());
SmallVector<VPBlockBase *> Succs(BlockPtr->successors());
for (VPBlockBase *Succ : Succs) {
disconnectBlocks(BlockPtr, Succ);
connectBlocks(NewBlock, Succ);
}
connectBlocks(BlockPtr, NewBlock);
}
/// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
/// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
/// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
/// parent to \p IfTrue and \p IfFalse. \p BlockPtr must have no successors
/// and \p IfTrue and \p IfFalse must have neither successors nor
/// predecessors.
static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
VPBlockBase *BlockPtr) {
assert(IfTrue->getSuccessors().empty() &&
"Can't insert IfTrue with successors.");
assert(IfFalse->getSuccessors().empty() &&
"Can't insert IfFalse with successors.");
BlockPtr->setTwoSuccessors(IfTrue, IfFalse);
IfTrue->setPredecessors({BlockPtr});
IfFalse->setPredecessors({BlockPtr});
IfTrue->setParent(BlockPtr->getParent());
IfFalse->setParent(BlockPtr->getParent());
}
/// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
/// the successors of \p From and \p From to the predecessors of \p To. Both
/// VPBlockBases must have the same parent, which can be null. Both
/// VPBlockBases can be already connected to other VPBlockBases.
static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
assert((From->getParent() == To->getParent()) &&
"Can't connect two block with different parents");
assert(From->getNumSuccessors() < 2 &&
"Blocks can't have more than two successors.");
From->appendSuccessor(To);
To->appendPredecessor(From);
}
/// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
/// from the successors of \p From and \p From from the predecessors of \p To.
static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
assert(To && "Successor to disconnect is null.");
From->removeSuccessor(To);
To->removePredecessor(From);
}
/// Return an iterator range over \p Range which only includes \p BlockTy
/// blocks. The accesses are casted to \p BlockTy.
template <typename BlockTy, typename T>
static auto blocksOnly(const T &Range) {
// Create BaseTy with correct const-ness based on BlockTy.
using BaseTy = std::conditional_t<std::is_const<BlockTy>::value,
const VPBlockBase, VPBlockBase>;
// We need to first create an iterator range over (const) BlocktTy & instead
// of (const) BlockTy * for filter_range to work properly.
auto Mapped =
map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
auto Filter = make_filter_range(
Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
return cast<BlockTy>(&Block);
});
}
};
class VPInterleavedAccessInfo {
DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
InterleaveGroupMap;
/// Type for mapping of instruction based interleave groups to VPInstruction
/// interleave groups
using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
InterleaveGroup<VPInstruction> *>;
/// Recursively \p Region and populate VPlan based interleave groups based on
/// \p IAI.
void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
InterleavedAccessInfo &IAI);
/// Recursively traverse \p Block and populate VPlan based interleave groups
/// based on \p IAI.
void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
InterleavedAccessInfo &IAI);
public:
VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
~VPInterleavedAccessInfo() {
SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
// Avoid releasing a pointer twice.
for (auto &I : InterleaveGroupMap)
DelSet.insert(I.second);
for (auto *Ptr : DelSet)
delete Ptr;
}
/// Get the interleave group that \p Instr belongs to.
///
/// \returns nullptr if doesn't have such group.
InterleaveGroup<VPInstruction> *
getInterleaveGroup(VPInstruction *Instr) const {
return InterleaveGroupMap.lookup(Instr);
}
};
/// Class that maps (parts of) an existing VPlan to trees of combined
/// VPInstructions.
class VPlanSlp {
enum class OpMode { Failed, Load, Opcode };
/// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
/// DenseMap keys.
struct BundleDenseMapInfo {
static SmallVector<VPValue *, 4> getEmptyKey() {
return {reinterpret_cast<VPValue *>(-1)};
}
static SmallVector<VPValue *, 4> getTombstoneKey() {
return {reinterpret_cast<VPValue *>(-2)};
}
static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
}
static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
const SmallVector<VPValue *, 4> &RHS) {
return LHS == RHS;
}
};
/// Mapping of values in the original VPlan to a combined VPInstruction.
DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
BundleToCombined;
VPInterleavedAccessInfo &IAI;
/// Basic block to operate on. For now, only instructions in a single BB are
/// considered.
const VPBasicBlock &BB;
/// Indicates whether we managed to combine all visited instructions or not.
bool CompletelySLP = true;
/// Width of the widest combined bundle in bits.
unsigned WidestBundleBits = 0;
using MultiNodeOpTy =
typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
// Input operand bundles for the current multi node. Each multi node operand
// bundle contains values not matching the multi node's opcode. They will
// be reordered in reorderMultiNodeOps, once we completed building a
// multi node.
SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
/// Indicates whether we are building a multi node currently.
bool MultiNodeActive = false;
/// Check if we can vectorize Operands together.
bool areVectorizable(ArrayRef<VPValue *> Operands) const;
/// Add combined instruction \p New for the bundle \p Operands.
void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
/// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
VPInstruction *markFailed();
/// Reorder operands in the multi node to maximize sequential memory access
/// and commutative operations.
SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
/// Choose the best candidate to use for the lane after \p Last. The set of
/// candidates to choose from are values with an opcode matching \p Last's
/// or loads consecutive to \p Last.
std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
SmallPtrSetImpl<VPValue *> &Candidates,
VPInterleavedAccessInfo &IAI);
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print bundle \p Values to dbgs().
void dumpBundle(ArrayRef<VPValue *> Values);
#endif
public:
VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
~VPlanSlp() = default;
/// Tries to build an SLP tree rooted at \p Operands and returns a
/// VPInstruction combining \p Operands, if they can be combined.
VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
/// Return the width of the widest combined bundle in bits.
unsigned getWidestBundleBits() const { return WidestBundleBits; }
/// Return true if all visited instruction can be combined.
bool isCompletelySLP() const { return CompletelySLP; }
};
namespace vputils {
/// Returns true if only the first lane of \p Def is used.
bool onlyFirstLaneUsed(VPValue *Def);
/// Get or create a VPValue that corresponds to the expansion of \p Expr. If \p
/// Expr is a SCEVConstant or SCEVUnknown, return a VPValue wrapping the live-in
/// value. Otherwise return a VPExpandSCEVRecipe to expand \p Expr. If \p Plan's
/// pre-header already contains a recipe expanding \p Expr, return it. If not,
/// create a new one.
VPValue *getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
ScalarEvolution &SE);
/// Returns true if \p VPV is uniform after vectorization.
inline bool isUniformAfterVectorization(VPValue *VPV) {
// A value defined outside the vector region must be uniform after
// vectorization inside a vector region.
if (VPV->isDefinedOutsideVectorRegions())
return true;
VPRecipeBase *Def = VPV->getDefiningRecipe();
assert(Def && "Must have definition for value defined inside vector region");
if (auto Rep = dyn_cast<VPReplicateRecipe>(Def))
return Rep->isUniform();
return false;
}
} // end namespace vputils
} // end namespace llvm
#endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H