blob: f6d76ee09534f44d18f060c40013996e8a6a6945 [file] [log] [blame]
//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ARMFeatures.h"
#include "ARMBaseInstrInfo.h"
#include "Utils/ARMBaseInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMInstPrinter.h"
#include "MCTargetDesc/ARMMCExpr.h"
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "TargetInfo/ARMTargetInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCAsmParserUtils.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/ARMEHABI.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#define DEBUG_TYPE "asm-parser"
using namespace llvm;
namespace llvm {
extern const MCInstrDesc ARMInsts[];
} // end namespace llvm
namespace {
enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
static cl::opt<ImplicitItModeTy> ImplicitItMode(
"arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
cl::desc("Allow conditional instructions outdside of an IT block"),
cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
"Accept in both ISAs, emit implicit ITs in Thumb"),
clEnumValN(ImplicitItModeTy::Never, "never",
"Warn in ARM, reject in Thumb"),
clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
"Accept in ARM, reject in Thumb"),
clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
"Warn in ARM, emit implicit ITs in Thumb")));
static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
cl::init(false));
enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
// Position==0 means we're not in an IT block at all. Position==1
// means we want the first state bit, which is always 0 (Then).
// Position==2 means we want the second state bit, stored at bit 3
// of Mask, and so on downwards. So (5 - Position) will shift the
// right bit down to bit 0, including the always-0 bit at bit 4 for
// the mandatory initial Then.
return (Mask >> (5 - Position) & 1);
}
class UnwindContext {
using Locs = SmallVector<SMLoc, 4>;
MCAsmParser &Parser;
Locs FnStartLocs;
Locs CantUnwindLocs;
Locs PersonalityLocs;
Locs PersonalityIndexLocs;
Locs HandlerDataLocs;
int FPReg;
public:
UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
bool hasFnStart() const { return !FnStartLocs.empty(); }
bool cantUnwind() const { return !CantUnwindLocs.empty(); }
bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
bool hasPersonality() const {
return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
}
void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
void saveFPReg(int Reg) { FPReg = Reg; }
int getFPReg() const { return FPReg; }
void emitFnStartLocNotes() const {
for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
FI != FE; ++FI)
Parser.Note(*FI, ".fnstart was specified here");
}
void emitCantUnwindLocNotes() const {
for (Locs::const_iterator UI = CantUnwindLocs.begin(),
UE = CantUnwindLocs.end(); UI != UE; ++UI)
Parser.Note(*UI, ".cantunwind was specified here");
}
void emitHandlerDataLocNotes() const {
for (Locs::const_iterator HI = HandlerDataLocs.begin(),
HE = HandlerDataLocs.end(); HI != HE; ++HI)
Parser.Note(*HI, ".handlerdata was specified here");
}
void emitPersonalityLocNotes() const {
for (Locs::const_iterator PI = PersonalityLocs.begin(),
PE = PersonalityLocs.end(),
PII = PersonalityIndexLocs.begin(),
PIE = PersonalityIndexLocs.end();
PI != PE || PII != PIE;) {
if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
Parser.Note(*PI++, ".personality was specified here");
else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
Parser.Note(*PII++, ".personalityindex was specified here");
else
llvm_unreachable(".personality and .personalityindex cannot be "
"at the same location");
}
}
void reset() {
FnStartLocs = Locs();
CantUnwindLocs = Locs();
PersonalityLocs = Locs();
HandlerDataLocs = Locs();
PersonalityIndexLocs = Locs();
FPReg = ARM::SP;
}
};
class ARMAsmParser : public MCTargetAsmParser {
const MCRegisterInfo *MRI;
UnwindContext UC;
ARMTargetStreamer &getTargetStreamer() {
assert(getParser().getStreamer().getTargetStreamer() &&
"do not have a target streamer");
MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
return static_cast<ARMTargetStreamer &>(TS);
}
// Map of register aliases registers via the .req directive.
StringMap<unsigned> RegisterReqs;
bool NextSymbolIsThumb;
bool useImplicitITThumb() const {
return ImplicitItMode == ImplicitItModeTy::Always ||
ImplicitItMode == ImplicitItModeTy::ThumbOnly;
}
bool useImplicitITARM() const {
return ImplicitItMode == ImplicitItModeTy::Always ||
ImplicitItMode == ImplicitItModeTy::ARMOnly;
}
struct {
ARMCC::CondCodes Cond; // Condition for IT block.
unsigned Mask:4; // Condition mask for instructions.
// Starting at first 1 (from lsb).
// '1' condition as indicated in IT.
// '0' inverse of condition (else).
// Count of instructions in IT block is
// 4 - trailingzeroes(mask)
// Note that this does not have the same encoding
// as in the IT instruction, which also depends
// on the low bit of the condition code.
unsigned CurPosition; // Current position in parsing of IT
// block. In range [0,4], with 0 being the IT
// instruction itself. Initialized according to
// count of instructions in block. ~0U if no
// active IT block.
bool IsExplicit; // true - The IT instruction was present in the
// input, we should not modify it.
// false - The IT instruction was added
// implicitly, we can extend it if that
// would be legal.
} ITState;
SmallVector<MCInst, 4> PendingConditionalInsts;
void flushPendingInstructions(MCStreamer &Out) override {
if (!inImplicitITBlock()) {
assert(PendingConditionalInsts.size() == 0);
return;
}
// Emit the IT instruction
MCInst ITInst;
ITInst.setOpcode(ARM::t2IT);
ITInst.addOperand(MCOperand::createImm(ITState.Cond));
ITInst.addOperand(MCOperand::createImm(ITState.Mask));
Out.EmitInstruction(ITInst, getSTI());
// Emit the conditonal instructions
assert(PendingConditionalInsts.size() <= 4);
for (const MCInst &Inst : PendingConditionalInsts) {
Out.EmitInstruction(Inst, getSTI());
}
PendingConditionalInsts.clear();
// Clear the IT state
ITState.Mask = 0;
ITState.CurPosition = ~0U;
}
bool inITBlock() { return ITState.CurPosition != ~0U; }
bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
bool lastInITBlock() {
return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
}
void forwardITPosition() {
if (!inITBlock()) return;
// Move to the next instruction in the IT block, if there is one. If not,
// mark the block as done, except for implicit IT blocks, which we leave
// open until we find an instruction that can't be added to it.
unsigned TZ = countTrailingZeros(ITState.Mask);
if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
ITState.CurPosition = ~0U; // Done with the IT block after this.
}
// Rewind the state of the current IT block, removing the last slot from it.
void rewindImplicitITPosition() {
assert(inImplicitITBlock());
assert(ITState.CurPosition > 1);
ITState.CurPosition--;
unsigned TZ = countTrailingZeros(ITState.Mask);
unsigned NewMask = 0;
NewMask |= ITState.Mask & (0xC << TZ);
NewMask |= 0x2 << TZ;
ITState.Mask = NewMask;
}
// Rewind the state of the current IT block, removing the last slot from it.
// If we were at the first slot, this closes the IT block.
void discardImplicitITBlock() {
assert(inImplicitITBlock());
assert(ITState.CurPosition == 1);
ITState.CurPosition = ~0U;
}
// Return the low-subreg of a given Q register.
unsigned getDRegFromQReg(unsigned QReg) const {
return MRI->getSubReg(QReg, ARM::dsub_0);
}
// Get the condition code corresponding to the current IT block slot.
ARMCC::CondCodes currentITCond() {
unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
}
// Invert the condition of the current IT block slot without changing any
// other slots in the same block.
void invertCurrentITCondition() {
if (ITState.CurPosition == 1) {
ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
} else {
ITState.Mask ^= 1 << (5 - ITState.CurPosition);
}
}
// Returns true if the current IT block is full (all 4 slots used).
bool isITBlockFull() {
return inITBlock() && (ITState.Mask & 1);
}
// Extend the current implicit IT block to have one more slot with the given
// condition code.
void extendImplicitITBlock(ARMCC::CondCodes Cond) {
assert(inImplicitITBlock());
assert(!isITBlockFull());
assert(Cond == ITState.Cond ||
Cond == ARMCC::getOppositeCondition(ITState.Cond));
unsigned TZ = countTrailingZeros(ITState.Mask);
unsigned NewMask = 0;
// Keep any existing condition bits.
NewMask |= ITState.Mask & (0xE << TZ);
// Insert the new condition bit.
NewMask |= (Cond != ITState.Cond) << TZ;
// Move the trailing 1 down one bit.
NewMask |= 1 << (TZ - 1);
ITState.Mask = NewMask;
}
// Create a new implicit IT block with a dummy condition code.
void startImplicitITBlock() {
assert(!inITBlock());
ITState.Cond = ARMCC::AL;
ITState.Mask = 8;
ITState.CurPosition = 1;
ITState.IsExplicit = false;
}
// Create a new explicit IT block with the given condition and mask.
// The mask should be in the format used in ARMOperand and
// MCOperand, with a 1 implying 'e', regardless of the low bit of
// the condition.
void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
assert(!inITBlock());
ITState.Cond = Cond;
ITState.Mask = Mask;
ITState.CurPosition = 0;
ITState.IsExplicit = true;
}
struct {
unsigned Mask : 4;
unsigned CurPosition;
} VPTState;
bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
void forwardVPTPosition() {
if (!inVPTBlock()) return;
unsigned TZ = countTrailingZeros(VPTState.Mask);
if (++VPTState.CurPosition == 5 - TZ)
VPTState.CurPosition = ~0U;
}
void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Note(L, Msg, Range);
}
bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Warning(L, Msg, Range);
}
bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Error(L, Msg, Range);
}
bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
unsigned ListNo, bool IsARPop = false);
bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
unsigned ListNo);
int tryParseRegister();
bool tryParseRegisterWithWriteBack(OperandVector &);
int tryParseShiftRegister(OperandVector &);
bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
bool parseMemory(OperandVector &);
bool parseOperand(OperandVector &, StringRef Mnemonic);
bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
unsigned &ShiftAmount);
bool parseLiteralValues(unsigned Size, SMLoc L);
bool parseDirectiveThumb(SMLoc L);
bool parseDirectiveARM(SMLoc L);
bool parseDirectiveThumbFunc(SMLoc L);
bool parseDirectiveCode(SMLoc L);
bool parseDirectiveSyntax(SMLoc L);
bool parseDirectiveReq(StringRef Name, SMLoc L);
bool parseDirectiveUnreq(SMLoc L);
bool parseDirectiveArch(SMLoc L);
bool parseDirectiveEabiAttr(SMLoc L);
bool parseDirectiveCPU(SMLoc L);
bool parseDirectiveFPU(SMLoc L);
bool parseDirectiveFnStart(SMLoc L);
bool parseDirectiveFnEnd(SMLoc L);
bool parseDirectiveCantUnwind(SMLoc L);
bool parseDirectivePersonality(SMLoc L);
bool parseDirectiveHandlerData(SMLoc L);
bool parseDirectiveSetFP(SMLoc L);
bool parseDirectivePad(SMLoc L);
bool parseDirectiveRegSave(SMLoc L, bool IsVector);
bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
bool parseDirectiveLtorg(SMLoc L);
bool parseDirectiveEven(SMLoc L);
bool parseDirectivePersonalityIndex(SMLoc L);
bool parseDirectiveUnwindRaw(SMLoc L);
bool parseDirectiveTLSDescSeq(SMLoc L);
bool parseDirectiveMovSP(SMLoc L);
bool parseDirectiveObjectArch(SMLoc L);
bool parseDirectiveArchExtension(SMLoc L);
bool parseDirectiveAlign(SMLoc L);
bool parseDirectiveThumbSet(SMLoc L);
bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
unsigned &PredicationCode,
unsigned &VPTPredicationCode, bool &CarrySetting,
unsigned &ProcessorIMod, StringRef &ITMask);
void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
StringRef FullInst, bool &CanAcceptCarrySet,
bool &CanAcceptPredicationCode,
bool &CanAcceptVPTPredicationCode);
void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
OperandVector &Operands);
bool isThumb() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[ARM::ModeThumb];
}
bool isThumbOne() const {
return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
bool isThumbTwo() const {
return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
bool hasThumb() const {
return getSTI().getFeatureBits()[ARM::HasV4TOps];
}
bool hasThumb2() const {
return getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
bool hasV6Ops() const {
return getSTI().getFeatureBits()[ARM::HasV6Ops];
}
bool hasV6T2Ops() const {
return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
}
bool hasV6MOps() const {
return getSTI().getFeatureBits()[ARM::HasV6MOps];
}
bool hasV7Ops() const {
return getSTI().getFeatureBits()[ARM::HasV7Ops];
}
bool hasV8Ops() const {
return getSTI().getFeatureBits()[ARM::HasV8Ops];
}
bool hasV8MBaseline() const {
return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
}
bool hasV8MMainline() const {
return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
}
bool hasV8_1MMainline() const {
return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
}
bool hasMVE() const {
return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
}
bool hasMVEFloat() const {
return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
}
bool has8MSecExt() const {
return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
}
bool hasARM() const {
return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
}
bool hasDSP() const {
return getSTI().getFeatureBits()[ARM::FeatureDSP];
}
bool hasD32() const {
return getSTI().getFeatureBits()[ARM::FeatureD32];
}
bool hasV8_1aOps() const {
return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
}
bool hasRAS() const {
return getSTI().getFeatureBits()[ARM::FeatureRAS];
}
void SwitchMode() {
MCSubtargetInfo &STI = copySTI();
auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
setAvailableFeatures(FB);
}
void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
bool isMClass() const {
return getSTI().getFeatureBits()[ARM::FeatureMClass];
}
/// @name Auto-generated Match Functions
/// {
#define GET_ASSEMBLER_HEADER
#include "ARMGenAsmMatcher.inc"
/// }
OperandMatchResultTy parseITCondCode(OperandVector &);
OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
OperandMatchResultTy parseBankedRegOperand(OperandVector &);
OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
int High);
OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
return parsePKHImm(O, "lsl", 0, 31);
}
OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
return parsePKHImm(O, "asr", 1, 32);
}
OperandMatchResultTy parseSetEndImm(OperandVector &);
OperandMatchResultTy parseShifterImm(OperandVector &);
OperandMatchResultTy parseRotImm(OperandVector &);
OperandMatchResultTy parseModImm(OperandVector &);
OperandMatchResultTy parseBitfield(OperandVector &);
OperandMatchResultTy parsePostIdxReg(OperandVector &);
OperandMatchResultTy parseAM3Offset(OperandVector &);
OperandMatchResultTy parseFPImm(OperandVector &);
OperandMatchResultTy parseVectorList(OperandVector &);
OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
SMLoc &EndLoc);
// Asm Match Converter Methods
void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
void cvtThumbBranches(MCInst &Inst, const OperandVector &);
void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
bool isITBlockTerminator(MCInst &Inst) const;
void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
bool Load, bool ARMMode, bool Writeback);
public:
enum ARMMatchResultTy {
Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
Match_RequiresNotITBlock,
Match_RequiresV6,
Match_RequiresThumb2,
Match_RequiresV8,
Match_RequiresFlagSetting,
#define GET_OPERAND_DIAGNOSTIC_TYPES
#include "ARMGenAsmMatcher.inc"
};
ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI, MII), UC(Parser) {
MCAsmParserExtension::Initialize(Parser);
// Cache the MCRegisterInfo.
MRI = getContext().getRegisterInfo();
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
// Add build attributes based on the selected target.
if (AddBuildAttributes)
getTargetStreamer().emitTargetAttributes(STI);
// Not in an ITBlock to start with.
ITState.CurPosition = ~0U;
VPTState.CurPosition = ~0U;
NextSymbolIsThumb = false;
}
// Implementation of the MCTargetAsmParser interface:
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
bool ParseDirective(AsmToken DirectiveID) override;
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
SmallVectorImpl<NearMissInfo> &NearMisses,
bool MatchingInlineAsm, bool &EmitInITBlock,
MCStreamer &Out);
struct NearMissMessage {
SMLoc Loc;
SmallString<128> Message;
};
const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
SmallVectorImpl<NearMissMessage> &NearMissesOut,
SMLoc IDLoc, OperandVector &Operands);
void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
OperandVector &Operands);
void doBeforeLabelEmit(MCSymbol *Symbol) override;
void onLabelParsed(MCSymbol *Symbol) override;
};
/// ARMOperand - Instances of this class represent a parsed ARM machine
/// operand.
class ARMOperand : public MCParsedAsmOperand {
enum KindTy {
k_CondCode,
k_VPTPred,
k_CCOut,
k_ITCondMask,
k_CoprocNum,
k_CoprocReg,
k_CoprocOption,
k_Immediate,
k_MemBarrierOpt,
k_InstSyncBarrierOpt,
k_TraceSyncBarrierOpt,
k_Memory,
k_PostIndexRegister,
k_MSRMask,
k_BankedReg,
k_ProcIFlags,
k_VectorIndex,
k_Register,
k_RegisterList,
k_RegisterListWithAPSR,
k_DPRRegisterList,
k_SPRRegisterList,
k_FPSRegisterListWithVPR,
k_FPDRegisterListWithVPR,
k_VectorList,
k_VectorListAllLanes,
k_VectorListIndexed,
k_ShiftedRegister,
k_ShiftedImmediate,
k_ShifterImmediate,
k_RotateImmediate,
k_ModifiedImmediate,
k_ConstantPoolImmediate,
k_BitfieldDescriptor,
k_Token,
} Kind;
SMLoc StartLoc, EndLoc, AlignmentLoc;
SmallVector<unsigned, 8> Registers;
struct CCOp {
ARMCC::CondCodes Val;
};
struct VCCOp {
ARMVCC::VPTCodes Val;
};
struct CopOp {
unsigned Val;
};
struct CoprocOptionOp {
unsigned Val;
};
struct ITMaskOp {
unsigned Mask:4;
};
struct MBOptOp {
ARM_MB::MemBOpt Val;
};
struct ISBOptOp {
ARM_ISB::InstSyncBOpt Val;
};
struct TSBOptOp {
ARM_TSB::TraceSyncBOpt Val;
};
struct IFlagsOp {
ARM_PROC::IFlags Val;
};
struct MMaskOp {
unsigned Val;
};
struct BankedRegOp {
unsigned Val;
};
struct TokOp {
const char *Data;
unsigned Length;
};
struct RegOp {
unsigned RegNum;
};
// A vector register list is a sequential list of 1 to 4 registers.
struct VectorListOp {
unsigned RegNum;
unsigned Count;
unsigned LaneIndex;
bool isDoubleSpaced;
};
struct VectorIndexOp {
unsigned Val;
};
struct ImmOp {
const MCExpr *Val;
};
/// Combined record for all forms of ARM address expressions.
struct MemoryOp {
unsigned BaseRegNum;
// Offset is in OffsetReg or OffsetImm. If both are zero, no offset
// was specified.
const MCConstantExpr *OffsetImm; // Offset immediate value
unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
unsigned ShiftImm; // shift for OffsetReg.
unsigned Alignment; // 0 = no alignment specified
// n = alignment in bytes (2, 4, 8, 16, or 32)
unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
};
struct PostIdxRegOp {
unsigned RegNum;
bool isAdd;
ARM_AM::ShiftOpc ShiftTy;
unsigned ShiftImm;
};
struct ShifterImmOp {
bool isASR;
unsigned Imm;
};
struct RegShiftedRegOp {
ARM_AM::ShiftOpc ShiftTy;
unsigned SrcReg;
unsigned ShiftReg;
unsigned ShiftImm;
};
struct RegShiftedImmOp {
ARM_AM::ShiftOpc ShiftTy;
unsigned SrcReg;
unsigned ShiftImm;
};
struct RotImmOp {
unsigned Imm;
};
struct ModImmOp {
unsigned Bits;
unsigned Rot;
};
struct BitfieldOp {
unsigned LSB;
unsigned Width;
};
union {
struct CCOp CC;
struct VCCOp VCC;
struct CopOp Cop;
struct CoprocOptionOp CoprocOption;
struct MBOptOp MBOpt;
struct ISBOptOp ISBOpt;
struct TSBOptOp TSBOpt;
struct ITMaskOp ITMask;
struct IFlagsOp IFlags;
struct MMaskOp MMask;
struct BankedRegOp BankedReg;
struct TokOp Tok;
struct RegOp Reg;
struct VectorListOp VectorList;
struct VectorIndexOp VectorIndex;
struct ImmOp Imm;
struct MemoryOp Memory;
struct PostIdxRegOp PostIdxReg;
struct ShifterImmOp ShifterImm;
struct RegShiftedRegOp RegShiftedReg;
struct RegShiftedImmOp RegShiftedImm;
struct RotImmOp RotImm;
struct ModImmOp ModImm;
struct BitfieldOp Bitfield;
};
public:
ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
/// getLocRange - Get the range between the first and last token of this
/// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
/// getAlignmentLoc - Get the location of the Alignment token of this operand.
SMLoc getAlignmentLoc() const {
assert(Kind == k_Memory && "Invalid access!");
return AlignmentLoc;
}
ARMCC::CondCodes getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
return CC.Val;
}
ARMVCC::VPTCodes getVPTPred() const {
assert(isVPTPred() && "Invalid access!");
return VCC.Val;
}
unsigned getCoproc() const {
assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
return Cop.Val;
}
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
unsigned getReg() const override {
assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
return Reg.RegNum;
}
const SmallVectorImpl<unsigned> &getRegList() const {
assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
Kind == k_FPSRegisterListWithVPR ||
Kind == k_FPDRegisterListWithVPR) &&
"Invalid access!");
return Registers;
}
const MCExpr *getImm() const {
assert(isImm() && "Invalid access!");
return Imm.Val;
}
const MCExpr *getConstantPoolImm() const {
assert(isConstantPoolImm() && "Invalid access!");
return Imm.Val;
}
unsigned getVectorIndex() const {
assert(Kind == k_VectorIndex && "Invalid access!");
return VectorIndex.Val;
}
ARM_MB::MemBOpt getMemBarrierOpt() const {
assert(Kind == k_MemBarrierOpt && "Invalid access!");
return MBOpt.Val;
}
ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
return ISBOpt.Val;
}
ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
return TSBOpt.Val;
}
ARM_PROC::IFlags getProcIFlags() const {
assert(Kind == k_ProcIFlags && "Invalid access!");
return IFlags.Val;
}
unsigned getMSRMask() const {
assert(Kind == k_MSRMask && "Invalid access!");
return MMask.Val;
}
unsigned getBankedReg() const {
assert(Kind == k_BankedReg && "Invalid access!");
return BankedReg.Val;
}
bool isCoprocNum() const { return Kind == k_CoprocNum; }
bool isCoprocReg() const { return Kind == k_CoprocReg; }
bool isCoprocOption() const { return Kind == k_CoprocOption; }
bool isCondCode() const { return Kind == k_CondCode; }
bool isVPTPred() const { return Kind == k_VPTPred; }
bool isCCOut() const { return Kind == k_CCOut; }
bool isITMask() const { return Kind == k_ITCondMask; }
bool isITCondCode() const { return Kind == k_CondCode; }
bool isImm() const override {
return Kind == k_Immediate;
}
bool isARMBranchTarget() const {
if (!isImm()) return false;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
return CE->getValue() % 4 == 0;
return true;
}
bool isThumbBranchTarget() const {
if (!isImm()) return false;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
return CE->getValue() % 2 == 0;
return true;
}
// checks whether this operand is an unsigned offset which fits is a field
// of specified width and scaled by a specific number of bits
template<unsigned width, unsigned scale>
bool isUnsignedOffset() const {
if (!isImm()) return false;
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
int64_t Align = 1LL << scale;
int64_t Max = Align * ((1LL << width) - 1);
return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
}
return false;
}
// checks whether this operand is an signed offset which fits is a field
// of specified width and scaled by a specific number of bits
template<unsigned width, unsigned scale>
bool isSignedOffset() const {
if (!isImm()) return false;
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
int64_t Align = 1LL << scale;
int64_t Max = Align * ((1LL << (width-1)) - 1);
int64_t Min = -Align * (1LL << (width-1));
return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
}
return false;
}
// checks whether this operand is an offset suitable for the LE /
// LETP instructions in Arm v8.1M
bool isLEOffset() const {
if (!isImm()) return false;
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
return Val < 0 && Val >= -4094 && (Val & 1) == 0;
}
return false;
}
// checks whether this operand is a memory operand computed as an offset
// applied to PC. the offset may have 8 bits of magnitude and is represented
// with two bits of shift. textually it may be either [pc, #imm], #imm or
// relocable expression...
bool isThumbMemPC() const {
int64_t Val = 0;
if (isImm()) {
if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
if (!CE) return false;
Val = CE->getValue();
}
else if (isGPRMem()) {
if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
if(Memory.BaseRegNum != ARM::PC) return false;
Val = Memory.OffsetImm->getValue();
}
else return false;
return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
}
bool isFPImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
return Val != -1;
}
template<int64_t N, int64_t M>
bool isImmediate() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= N && Value <= M;
}
template<int64_t N, int64_t M>
bool isImmediateS4() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= N && Value <= M;
}
template<int64_t N, int64_t M>
bool isImmediateS2() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ((Value & 1) == 0) && Value >= N && Value <= M;
}
bool isFBits16() const {
return isImmediate<0, 17>();
}
bool isFBits32() const {
return isImmediate<1, 33>();
}
bool isImm8s4() const {
return isImmediateS4<-1020, 1020>();
}
bool isImm7s4() const {
return isImmediateS4<-508, 508>();
}
bool isImm7Shift0() const {
return isImmediate<-127, 127>();
}
bool isImm7Shift1() const {
return isImmediateS2<-255, 255>();
}
bool isImm7Shift2() const {
return isImmediateS4<-511, 511>();
}
bool isImm7() const {
return isImmediate<-127, 127>();
}
bool isImm0_1020s4() const {
return isImmediateS4<0, 1020>();
}
bool isImm0_508s4() const {
return isImmediateS4<0, 508>();
}
bool isImm0_508s4Neg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = -CE->getValue();
// explicitly exclude zero. we want that to use the normal 0_508 version.
return ((Value & 3) == 0) && Value > 0 && Value <= 508;
}
bool isImm0_4095Neg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
// isImm0_4095Neg is used with 32-bit immediates only.
// 32-bit immediates are zero extended to 64-bit when parsed,
// thus simple -CE->getValue() results in a big negative number,
// not a small positive number as intended
if ((CE->getValue() >> 32) > 0) return false;
uint32_t Value = -static_cast<uint32_t>(CE->getValue());
return Value > 0 && Value < 4096;
}
bool isImm0_7() const {
return isImmediate<0, 7>();
}
bool isImm1_16() const {
return isImmediate<1, 16>();
}
bool isImm1_32() const {
return isImmediate<1, 32>();
}
bool isImm8_255() const {
return isImmediate<8, 255>();
}
bool isImm256_65535Expr() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// If it's not a constant expression, it'll generate a fixup and be
// handled later.
if (!CE) return true;
int64_t Value = CE->getValue();
return Value >= 256 && Value < 65536;
}
bool isImm0_65535Expr() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// If it's not a constant expression, it'll generate a fixup and be
// handled later.
if (!CE) return true;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 65536;
}
bool isImm24bit() const {
return isImmediate<0, 0xffffff + 1>();
}
bool isImmThumbSR() const {
return isImmediate<1, 33>();
}
template<int shift>
bool isExpImmValue(uint64_t Value) const {
uint64_t mask = (1 << shift) - 1;
if ((Value & mask) != 0 || (Value >> shift) > 0xff)
return false;
return true;
}
template<int shift>
bool isExpImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
return isExpImmValue<shift>(CE->getValue());
}
template<int shift, int size>
bool isInvertedExpImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t OriginalValue = CE->getValue();
uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
return isExpImmValue<shift>(InvertedValue);
}
bool isPKHLSLImm() const {
return isImmediate<0, 32>();
}
bool isPKHASRImm() const {
return isImmediate<0, 33>();
}
bool isAdrLabel() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
// If it is a constant, it must fit into a modified immediate encoding.
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return (ARM_AM::getSOImmVal(Value) != -1 ||
ARM_AM::getSOImmVal(-Value) != -1);
}
bool isT2SOImm() const {
// If we have an immediate that's not a constant, treat it as an expression
// needing a fixup.
if (isImm() && !isa<MCConstantExpr>(getImm())) {
// We want to avoid matching :upper16: and :lower16: as we want these
// expressions to match in isImm0_65535Expr()
const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
}
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getT2SOImmVal(Value) != -1;
}
bool isT2SOImmNot() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getT2SOImmVal(Value) == -1 &&
ARM_AM::getT2SOImmVal(~Value) != -1;
}
bool isT2SOImmNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
// Only use this when not representable as a plain so_imm.
return ARM_AM::getT2SOImmVal(Value) == -1 &&
ARM_AM::getT2SOImmVal(-Value) != -1;
}
bool isSetEndImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value == 1 || Value == 0;
}
bool isReg() const override { return Kind == k_Register; }
bool isRegList() const { return Kind == k_RegisterList; }
bool isRegListWithAPSR() const {
return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
}
bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
bool isToken() const override { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
bool isMem() const override {
return isGPRMem() || isMVEMem();
}
bool isMVEMem() const {
if (Kind != k_Memory)
return false;
if (Memory.BaseRegNum &&
!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
return false;
if (Memory.OffsetRegNum &&
!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.OffsetRegNum))
return false;
return true;
}
bool isGPRMem() const {
if (Kind != k_Memory)
return false;
if (Memory.BaseRegNum &&
!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
return false;
if (Memory.OffsetRegNum &&
!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
return false;
return true;
}
bool isShifterImm() const { return Kind == k_ShifterImmediate; }
bool isRegShiftedReg() const {
return Kind == k_ShiftedRegister &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
RegShiftedReg.SrcReg) &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
RegShiftedReg.ShiftReg);
}
bool isRegShiftedImm() const {
return Kind == k_ShiftedImmediate &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
RegShiftedImm.SrcReg);
}
bool isRotImm() const { return Kind == k_RotateImmediate; }
template<unsigned Min, unsigned Max>
bool isPowerTwoInRange() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
Value >= Min && Value <= Max;
}
bool isModImm() const { return Kind == k_ModifiedImmediate; }
bool isModImmNot() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getSOImmVal(~Value) != -1;
}
bool isModImmNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getSOImmVal(Value) == -1 &&
ARM_AM::getSOImmVal(-Value) != -1;
}
bool isThumbModImmNeg1_7() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int32_t Value = -(int32_t)CE->getValue();
return 0 < Value && Value < 8;
}
bool isThumbModImmNeg8_255() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int32_t Value = -(int32_t)CE->getValue();
return 7 < Value && Value < 256;
}
bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const {
return Kind == k_PostIndexRegister &&
ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
}
bool isPostIdxReg() const {
return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
Memory.BaseRegNum))
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
Memory.BaseRegNum))
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
if (!isGPRMem())
return false;
if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
Memory.BaseRegNum))
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
(alignOK || Memory.Alignment == Alignment);
}
bool isMemPCRelImm12() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base register must be PC.
if (Memory.BaseRegNum != ARM::PC)
return false;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val > -4096 && Val < 4096) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isAlignedMemory() const {
return isMemNoOffset(true);
}
bool isAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
bool isAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
return isMemNoOffset(false, 0);
}
bool isDupAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
return isMemNoOffset(false, 0);
}
bool isAlignedMemory64or128or256() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
return true;
if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
return true;
return isMemNoOffset(false, 0);
}
bool isAddrMode2() const {
if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return true;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val > -4096 && Val < 4096;
}
bool isAM2OffsetImm() const {
if (!isImm()) return false;
// Immediate offset in range [-4095, 4095].
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return (Val == std::numeric_limits<int32_t>::min()) ||
(Val > -4096 && Val < 4096);
}
bool isAddrMode3() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.Alignment != 0) return false;
// No shifts are legal for AM3.
if (Memory.ShiftType != ARM_AM::no_shift) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return true;
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
// have to check for this too.
return (Val > -256 && Val < 256) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isAM3Offset() const {
if (isPostIdxReg())
return true;
if (!isImm())
return false;
// Immediate offset in range [-255, 255].
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
// Special case, #-0 is std::numeric_limits<int32_t>::min().
return (Val > -256 && Val < 256) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isAddrMode5() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-1020, 1020] and a multiple of 4.
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isAddrMode5FP16() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-510, 510] and a multiple of 2.
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isMemTBB() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return true;
}
bool isMemTBH() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
Memory.Alignment != 0 )
return false;
return true;
}
bool isMemRegOffset() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
return false;
return true;
}
bool isT2MemRegOffset() const {
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
return false;
// Only lsl #{0, 1, 2, 3} allowed.
if (Memory.ShiftType == ARM_AM::no_shift)
return true;
if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
return false;
return true;
}
bool isMemThumbRR() const {
// Thumb reg+reg addressing is simple. Just two registers, a base and
// an offset. No shifts, negations or any other complicating factors.
if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return isARMLowRegister(Memory.BaseRegNum) &&
(!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
}
bool isMemThumbRIs4() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 124].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 124 && (Val % 4) == 0;
}
bool isMemThumbRIs2() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 62].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 62 && (Val % 2) == 0;
}
bool isMemThumbRIs1() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 31].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 31;
}
bool isMemThumbSPI() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
}
bool isMemImm8s4Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [-1020, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// Special case, #-0 is std::numeric_limits<int32_t>::min().
return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
Val == std::numeric_limits<int32_t>::min();
}
bool isMemImm7s4Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
Memory.BaseRegNum))
return false;
// Immediate offset a multiple of 4 in range [-508, 508].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// Special case, #-0 is INT32_MIN.
return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
}
bool isMemImm0_1020s4Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [0, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
}
bool isMemImm8Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val == std::numeric_limits<int32_t>::min()) ||
(Val > -256 && Val < 256);
}
template<unsigned Bits, unsigned RegClassID>
bool isMemImm7ShiftedOffset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
!ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
return false;
// Expect an immediate offset equal to an element of the range
// [-127, 127], shifted left by Bits.
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
// INT32_MIN is a special-case value (indicating the encoding with
// zero offset and the subtract bit set)
if (Val == INT32_MIN)
return true;
unsigned Divisor = 1U << Bits;
// Check that the low bits are zero
if (Val % Divisor != 0)
return false;
// Check that the remaining offset is within range.
Val /= Divisor;
return (Val >= -127 && Val <= 127);
}
template <int shift> bool isMemRegRQOffset() const {
if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0)
return false;
if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
Memory.BaseRegNum))
return false;
if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.OffsetRegNum))
return false;
if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
return false;
if (shift > 0 &&
(Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
return false;
return true;
}
template <int shift> bool isMemRegQOffset() const {
if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.BaseRegNum))
return false;
if(!Memory.OffsetImm) return true;
static_assert(shift < 56,
"Such that we dont shift by a value higher than 62");
int64_t Val = Memory.OffsetImm->getValue();
// The value must be a multiple of (1 << shift)
if ((Val & ((1U << shift) - 1)) != 0)
return false;
// And be in the right range, depending on the amount that it is shifted
// by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
// separately.
int64_t Range = (1U << (7+shift)) - 1;
return (Val == INT32_MIN) || (Val > -Range && Val < Range);
}
bool isMemPosImm8Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val < 256;
}
bool isMemNegImm8Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
// Immediate offset in range [-255, -1].
if (!Memory.OffsetImm) return false;
int64_t Val = Memory.OffsetImm->getValue();
return (Val == std::numeric_limits<int32_t>::min()) ||
(Val > -256 && Val < 0);
}
bool isMemUImm12Offset() const {
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= 0 && Val < 4096);
}
bool isMemImm12Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val > -4096 && Val < 4096) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isConstPoolAsmImm() const {
// Delay processing of Constant Pool Immediate, this will turn into
// a constant. Match no other operand
return (isConstantPoolImm());
}
bool isPostIdxImm8() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return (Val > -256 && Val < 256) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isPostIdxImm8s4() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
(Val == std::numeric_limits<int32_t>::min());
}
bool isMSRMask() const { return Kind == k_MSRMask; }
bool isBankedReg() const { return Kind == k_BankedReg; }
bool isProcIFlags() const { return Kind == k_ProcIFlags; }
// NEON operands.
bool isSingleSpacedVectorList() const {
return Kind == k_VectorList && !VectorList.isDoubleSpaced;
}
bool isDoubleSpacedVectorList() const {
return Kind == k_VectorList && VectorList.isDoubleSpaced;
}
bool isVecListOneD() const {
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 1;
}
bool isVecListTwoMQ() const {
return isSingleSpacedVectorList() && VectorList.Count == 2 &&
ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
VectorList.RegNum);
}
bool isVecListDPair() const {
if (!isSingleSpacedVectorList()) return false;
return (ARMMCRegisterClasses[ARM::DPairRegClassID]
.contains(VectorList.RegNum));
}
bool isVecListThreeD() const {
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 3;
}
bool isVecListFourD() const {
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 4;
}
bool isVecListDPairSpaced() const {
if (Kind != k_VectorList) return false;
if (isSingleSpacedVectorList()) return false;
return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
.contains(VectorList.RegNum));
}
bool isVecListThreeQ() const {
if (!isDoubleSpacedVectorList()) return false;
return VectorList.Count == 3;
}
bool isVecListFourQ() const {
if (!isDoubleSpacedVectorList()) return false;
return VectorList.Count == 4;
}
bool isVecListFourMQ() const {
return isSingleSpacedVectorList() && VectorList.Count == 4 &&
ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
VectorList.RegNum);
}
bool isSingleSpacedVectorAllLanes() const {
return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
}
bool isDoubleSpacedVectorAllLanes() const {
return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
}
bool isVecListOneDAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return VectorList.Count == 1;
}
bool isVecListDPairAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return (ARMMCRegisterClasses[ARM::DPairRegClassID]
.contains(VectorList.RegNum));
}
bool isVecListDPairSpacedAllLanes() const {
if (!isDoubleSpacedVectorAllLanes()) return false;
return VectorList.Count == 2;
}
bool isVecListThreeDAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return VectorList.Count == 3;
}
bool isVecListThreeQAllLanes() const {
if (!isDoubleSpacedVectorAllLanes()) return false;
return VectorList.Count == 3;
}
bool isVecListFourDAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return VectorList.Count == 4;
}
bool isVecListFourQAllLanes() const {
if (!isDoubleSpacedVectorAllLanes()) return false;
return VectorList.Count == 4;
}
bool isSingleSpacedVectorIndexed() const {
return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
}
bool isDoubleSpacedVectorIndexed() const {
return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
}
bool isVecListOneDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
}
bool isVecListOneDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
}
bool isVecListOneDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
}
bool isVecListTwoDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
}
bool isVecListTwoDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
}
bool isVecListTwoQWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
}
bool isVecListTwoQHWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
}
bool isVecListTwoDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
}
bool isVecListThreeDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
}
bool isVecListThreeDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
}
bool isVecListThreeQWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
}
bool isVecListThreeQHWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
}
bool isVecListThreeDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
}
bool isVecListFourDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
}
bool isVecListFourDHWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
}
bool isVecListFourQWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
}
bool isVecListFourQHWordIndexed() const {
if (!isDoubleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
}
bool isVecListFourDWordIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
}
bool isVectorIndex() const { return Kind == k_VectorIndex; }
template <unsigned NumLanes>
bool isVectorIndexInRange() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val < NumLanes;
}
bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
template<int PermittedValue, int OtherPermittedValue>
bool isMVEPairVectorIndex() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val == PermittedValue ||
VectorIndex.Val == OtherPermittedValue;
}
bool isNEONi8splat() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
int64_t Value = CE->getValue();
// i8 value splatted across 8 bytes. The immediate is just the 8 byte
// value.
return Value >= 0 && Value < 256;
}
bool isNEONi16splat() const {
if (isNEONByteReplicate(2))
return false; // Leave that for bytes replication and forbid by default.
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi16splat(Value);
}
bool isNEONi16splatNot() const {
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi16splat(~Value & 0xffff);
}
bool isNEONi32splat() const {
if (isNEONByteReplicate(4))
return false; // Leave that for bytes replication and forbid by default.
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi32splat(Value);
}
bool isNEONi32splatNot() const {
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
unsigned Value = CE->getValue();
return ARM_AM::isNEONi32splat(~Value);
}
static bool isValidNEONi32vmovImm(int64_t Value) {
// i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
// for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
return ((Value & 0xffffffffffffff00) == 0) ||
((Value & 0xffffffffffff00ff) == 0) ||
((Value & 0xffffffffff00ffff) == 0) ||
((Value & 0xffffffff00ffffff) == 0) ||
((Value & 0xffffffffffff00ff) == 0xff) ||
((Value & 0xffffffffff00ffff) == 0xffff);
}
bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
assert((Width == 8 || Width == 16 || Width == 32) &&
"Invalid element width");
assert(NumElems * Width <= 64 && "Invalid result width");
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE)
return false;
int64_t Value = CE->getValue();
if (!Value)
return false; // Don't bother with zero.
if (Inv)
Value = ~Value;
uint64_t Mask = (1ull << Width) - 1;
uint64_t Elem = Value & Mask;
if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
return false;
if (Width == 32 && !isValidNEONi32vmovImm(Elem))
return false;
for (unsigned i = 1; i < NumElems; ++i) {
Value >>= Width;
if ((Value & Mask) != Elem)
return false;
}
return true;
}
bool isNEONByteReplicate(unsigned NumBytes) const {
return isNEONReplicate(8, NumBytes, false);
}
static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
assert((FromW == 8 || FromW == 16 || FromW == 32) &&
"Invalid source width");
assert((ToW == 16 || ToW == 32 || ToW == 64) &&
"Invalid destination width");
assert(FromW < ToW && "ToW is not less than FromW");
}
template<unsigned FromW, unsigned ToW>
bool isNEONmovReplicate() const {
checkNeonReplicateArgs(FromW, ToW);
if (ToW == 64 && isNEONi64splat())
return false;
return isNEONReplicate(FromW, ToW / FromW, false);
}
template<unsigned FromW, unsigned ToW>
bool isNEONinvReplicate() const {
checkNeonReplicateArgs(FromW, ToW);
return isNEONReplicate(FromW, ToW / FromW, true);
}
bool isNEONi32vmov() const {
if (isNEONByteReplicate(4))
return false; // Let it to be classified as byte-replicate case.
if (!isImm())
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE)
return false;
return isValidNEONi32vmovImm(CE->getValue());
}
bool isNEONi32vmovNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
return isValidNEONi32vmovImm(~CE->getValue());
}
bool isNEONi64splat() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
uint64_t Value = CE->getValue();
// i64 value with each byte being either 0 or 0xff.
for (unsigned i = 0; i < 8; ++i, Value >>= 8)
if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
return true;
}
template<int64_t Angle, int64_t Remainder>
bool isComplexRotation() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t Value = CE->getValue();
return (Value % Angle == Remainder && Value <= 270);
}
bool isMVELongShift() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// Must be a constant.
if (!CE) return false;
uint64_t Value = CE->getValue();
return Value >= 1 && Value <= 32;
}
bool isMveSaturateOp() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t Value = CE->getValue();
return Value == 48 || Value == 64;
}
bool isITCondCodeNoAL() const {
if (!isITCondCode()) return false;
ARMCC::CondCodes CC = getCondCode();
return CC != ARMCC::AL;
}
bool isITCondCodeRestrictedI() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::EQ || CC == ARMCC::NE;
}
bool isITCondCodeRestrictedS() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
CC == ARMCC::GE;
}
bool isITCondCodeRestrictedU() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::HS || CC == ARMCC::HI;
}
bool isITCondCodeRestrictedFP() const {
if (!isITCondCode())
return false;
ARMCC::CondCodes CC = getCondCode();
return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
}
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
if (!Expr)
Inst.addOperand(MCOperand::createImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::createImm(CE->getValue()));
else
Inst.addOperand(MCOperand::createExpr(Expr));
}
void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addExpr(Inst, getImm());
}
void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
addExpr(Inst, getImm());
}
void addCondCodeOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
Inst.addOperand(MCOperand::createReg(RegNum));
}
void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
Inst.addOperand(MCOperand::createReg(RegNum));
}
void addVPTPredROperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
addVPTPredNOperands(Inst, N-1);
unsigned RegNum;
if (getVPTPred() == ARMVCC::None) {
RegNum = 0;
} else {