blob: be4c9602247278cf3e878adb73675871d9741376 [file] [log] [blame] [edit]
//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "MCTargetDesc/AArch64TargetStreamer.h"
#include "TargetInfo/AArch64TargetInfo.h"
#include "AArch64InstrInfo.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCLinkerOptimizationHint.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cctype>
#include <cstdint>
#include <cstdio>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
using namespace llvm;
namespace {
enum class RegKind {
Scalar,
NeonVector,
SVEDataVector,
SVEPredicateVector
};
enum RegConstraintEqualityTy {
EqualsReg,
EqualsSuperReg,
EqualsSubReg
};
class AArch64AsmParser : public MCTargetAsmParser {
private:
StringRef Mnemonic; ///< Instruction mnemonic.
// Map of register aliases registers via the .req directive.
StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
class PrefixInfo {
public:
static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
PrefixInfo Prefix;
switch (Inst.getOpcode()) {
case AArch64::MOVPRFX_ZZ:
Prefix.Active = true;
Prefix.Dst = Inst.getOperand(0).getReg();
break;
case AArch64::MOVPRFX_ZPmZ_B:
case AArch64::MOVPRFX_ZPmZ_H:
case AArch64::MOVPRFX_ZPmZ_S:
case AArch64::MOVPRFX_ZPmZ_D:
Prefix.Active = true;
Prefix.Predicated = true;
Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
"No destructive element size set for movprfx");
Prefix.Dst = Inst.getOperand(0).getReg();
Prefix.Pg = Inst.getOperand(2).getReg();
break;
case AArch64::MOVPRFX_ZPzZ_B:
case AArch64::MOVPRFX_ZPzZ_H:
case AArch64::MOVPRFX_ZPzZ_S:
case AArch64::MOVPRFX_ZPzZ_D:
Prefix.Active = true;
Prefix.Predicated = true;
Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
"No destructive element size set for movprfx");
Prefix.Dst = Inst.getOperand(0).getReg();
Prefix.Pg = Inst.getOperand(1).getReg();
break;
default:
break;
}
return Prefix;
}
PrefixInfo() : Active(false), Predicated(false) {}
bool isActive() const { return Active; }
bool isPredicated() const { return Predicated; }
unsigned getElementSize() const {
assert(Predicated);
return ElementSize;
}
unsigned getDstReg() const { return Dst; }
unsigned getPgReg() const {
assert(Predicated);
return Pg;
}
private:
bool Active;
bool Predicated;
unsigned ElementSize;
unsigned Dst;
unsigned Pg;
} NextPrefix;
AArch64TargetStreamer &getTargetStreamer() {
MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
return static_cast<AArch64TargetStreamer &>(TS);
}
SMLoc getLoc() const { return getParser().getTok().getLoc(); }
bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
AArch64CC::CondCode parseCondCodeString(StringRef Cond);
bool parseCondCode(OperandVector &Operands, bool invertCondCode);
unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
bool parseRegister(OperandVector &Operands);
bool parseSymbolicImmVal(const MCExpr *&ImmVal);
bool parseNeonVectorList(OperandVector &Operands);
bool parseOptionalMulOperand(OperandVector &Operands);
bool parseOperand(OperandVector &Operands, bool isCondCode,
bool invertCondCode);
bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
OperandVector &Operands);
bool parseDirectiveArch(SMLoc L);
bool parseDirectiveArchExtension(SMLoc L);
bool parseDirectiveCPU(SMLoc L);
bool parseDirectiveInst(SMLoc L);
bool parseDirectiveTLSDescCall(SMLoc L);
bool parseDirectiveLOH(StringRef LOH, SMLoc L);
bool parseDirectiveLtorg(SMLoc L);
bool parseDirectiveReq(StringRef Name, SMLoc L);
bool parseDirectiveUnreq(SMLoc L);
bool parseDirectiveCFINegateRAState();
bool parseDirectiveCFIBKeyFrame();
bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
SmallVectorImpl<SMLoc> &Loc);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
/// @name Auto-generated Match Functions
/// {
#define GET_ASSEMBLER_HEADER
#include "AArch64GenAsmMatcher.inc"
/// }
OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
RegKind MatchKind);
OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
template <bool IsSVEPrefetch = false>
OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
template<bool AddFPZeroAsLiteral>
OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
bool tryParseNeonVectorRegister(OperandVector &Operands);
OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
template <bool ParseShiftExtend,
RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
template <bool ParseShiftExtend, bool ParseSuffix>
OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
template <RegKind VectorKind>
OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
bool ExpectMatch = false);
OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
public:
enum AArch64MatchResultTy {
Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
#define GET_OPERAND_DIAGNOSTIC_TYPES
#include "AArch64GenAsmMatcher.inc"
};
bool IsILP32;
AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI, MII) {
IsILP32 = Options.getABIName() == "ilp32";
MCAsmParserExtension::Initialize(Parser);
MCStreamer &S = getParser().getStreamer();
if (S.getTargetStreamer() == nullptr)
new AArch64TargetStreamer(S);
// Alias .hword/.word/.[dx]word to the target-independent
// .2byte/.4byte/.8byte directives as they have the same form and
// semantics:
/// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
Parser.addAliasForDirective(".hword", ".2byte");
Parser.addAliasForDirective(".word", ".4byte");
Parser.addAliasForDirective(".dword", ".8byte");
Parser.addAliasForDirective(".xword", ".8byte");
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
}
bool regsEqual(const MCParsedAsmOperand &Op1,
const MCParsedAsmOperand &Op2) const override;
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
bool ParseDirective(AsmToken DirectiveID) override;
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) override;
static bool classifySymbolRef(const MCExpr *Expr,
AArch64MCExpr::VariantKind &ELFRefKind,
MCSymbolRefExpr::VariantKind &DarwinRefKind,
int64_t &Addend);
};
/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
/// instruction.
class AArch64Operand : public MCParsedAsmOperand {
private:
enum KindTy {
k_Immediate,
k_ShiftedImm,
k_CondCode,
k_Register,
k_VectorList,
k_VectorIndex,
k_Token,
k_SysReg,
k_SysCR,
k_Prefetch,
k_ShiftExtend,
k_FPImm,
k_Barrier,
k_PSBHint,
k_BTIHint,
} Kind;
SMLoc StartLoc, EndLoc;
struct TokOp {
const char *Data;
unsigned Length;
bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
};
// Separate shift/extend operand.
struct ShiftExtendOp {
AArch64_AM::ShiftExtendType Type;
unsigned Amount;
bool HasExplicitAmount;
};
struct RegOp {
unsigned RegNum;
RegKind Kind;
int ElementWidth;
// The register may be allowed as a different register class,
// e.g. for GPR64as32 or GPR32as64.
RegConstraintEqualityTy EqualityTy;
// In some cases the shift/extend needs to be explicitly parsed together
// with the register, rather than as a separate operand. This is needed
// for addressing modes where the instruction as a whole dictates the
// scaling/extend, rather than specific bits in the instruction.
// By parsing them as a single operand, we avoid the need to pass an
// extra operand in all CodeGen patterns (because all operands need to
// have an associated value), and we avoid the need to update TableGen to
// accept operands that have no associated bits in the instruction.
//
// An added benefit of parsing them together is that the assembler
// can give a sensible diagnostic if the scaling is not correct.
//
// The default is 'lsl #0' (HasExplicitAmount = false) if no
// ShiftExtend is specified.
ShiftExtendOp ShiftExtend;
};
struct VectorListOp {
unsigned RegNum;
unsigned Count;
unsigned NumElements;
unsigned ElementWidth;
RegKind RegisterKind;
};
struct VectorIndexOp {
unsigned Val;
};
struct ImmOp {
const MCExpr *Val;
};
struct ShiftedImmOp {
const MCExpr *Val;
unsigned ShiftAmount;
};
struct CondCodeOp {
AArch64CC::CondCode Code;
};
struct FPImmOp {
uint64_t Val; // APFloat value bitcasted to uint64_t.
bool IsExact; // describes whether parsed value was exact.
};
struct BarrierOp {
const char *Data;
unsigned Length;
unsigned Val; // Not the enum since not all values have names.
};
struct SysRegOp {
const char *Data;
unsigned Length;
uint32_t MRSReg;
uint32_t MSRReg;
uint32_t PStateField;
};
struct SysCRImmOp {
unsigned Val;
};
struct PrefetchOp {
const char *Data;
unsigned Length;
unsigned Val;
};
struct PSBHintOp {
const char *Data;
unsigned Length;
unsigned Val;
};
struct BTIHintOp {
const char *Data;
unsigned Length;
unsigned Val;
};
struct ExtendOp {
unsigned Val;
};
union {
struct TokOp Tok;
struct RegOp Reg;
struct VectorListOp VectorList;
struct VectorIndexOp VectorIndex;
struct ImmOp Imm;
struct ShiftedImmOp ShiftedImm;
struct CondCodeOp CondCode;
struct FPImmOp FPImm;
struct BarrierOp Barrier;
struct SysRegOp SysReg;
struct SysCRImmOp SysCRImm;
struct PrefetchOp Prefetch;
struct PSBHintOp PSBHint;
struct BTIHintOp BTIHint;
struct ShiftExtendOp ShiftExtend;
};
// Keep the MCContext around as the MCExprs may need manipulated during
// the add<>Operands() calls.
MCContext &Ctx;
public:
AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
Kind = o.Kind;
StartLoc = o.StartLoc;
EndLoc = o.EndLoc;
switch (Kind) {
case k_Token:
Tok = o.Tok;
break;
case k_Immediate:
Imm = o.Imm;
break;
case k_ShiftedImm:
ShiftedImm = o.ShiftedImm;
break;
case k_CondCode:
CondCode = o.CondCode;
break;
case k_FPImm:
FPImm = o.FPImm;
break;
case k_Barrier:
Barrier = o.Barrier;
break;
case k_Register:
Reg = o.Reg;
break;
case k_VectorList:
VectorList = o.VectorList;
break;
case k_VectorIndex:
VectorIndex = o.VectorIndex;
break;
case k_SysReg:
SysReg = o.SysReg;
break;
case k_SysCR:
SysCRImm = o.SysCRImm;
break;
case k_Prefetch:
Prefetch = o.Prefetch;
break;
case k_PSBHint:
PSBHint = o.PSBHint;
break;
case k_BTIHint:
BTIHint = o.BTIHint;
break;
case k_ShiftExtend:
ShiftExtend = o.ShiftExtend;
break;
}
}
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
bool isTokenSuffix() const {
assert(Kind == k_Token && "Invalid access!");
return Tok.IsSuffix;
}
const MCExpr *getImm() const {
assert(Kind == k_Immediate && "Invalid access!");
return Imm.Val;
}
const MCExpr *getShiftedImmVal() const {
assert(Kind == k_ShiftedImm && "Invalid access!");
return ShiftedImm.Val;
}
unsigned getShiftedImmShift() const {
assert(Kind == k_ShiftedImm && "Invalid access!");
return ShiftedImm.ShiftAmount;
}
AArch64CC::CondCode getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
return CondCode.Code;
}
APFloat getFPImm() const {
assert (Kind == k_FPImm && "Invalid access!");
return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
}
bool getFPImmIsExact() const {
assert (Kind == k_FPImm && "Invalid access!");
return FPImm.IsExact;
}
unsigned getBarrier() const {
assert(Kind == k_Barrier && "Invalid access!");
return Barrier.Val;
}
StringRef getBarrierName() const {
assert(Kind == k_Barrier && "Invalid access!");
return StringRef(Barrier.Data, Barrier.Length);
}
unsigned getReg() const override {
assert(Kind == k_Register && "Invalid access!");
return Reg.RegNum;
}
RegConstraintEqualityTy getRegEqualityTy() const {
assert(Kind == k_Register && "Invalid access!");
return Reg.EqualityTy;
}
unsigned getVectorListStart() const {
assert(Kind == k_VectorList && "Invalid access!");
return VectorList.RegNum;
}
unsigned getVectorListCount() const {
assert(Kind == k_VectorList && "Invalid access!");
return VectorList.Count;
}
unsigned getVectorIndex() const {
assert(Kind == k_VectorIndex && "Invalid access!");
return VectorIndex.Val;
}
StringRef getSysReg() const {
assert(Kind == k_SysReg && "Invalid access!");
return StringRef(SysReg.Data, SysReg.Length);
}
unsigned getSysCR() const {
assert(Kind == k_SysCR && "Invalid access!");
return SysCRImm.Val;
}
unsigned getPrefetch() const {
assert(Kind == k_Prefetch && "Invalid access!");
return Prefetch.Val;
}
unsigned getPSBHint() const {
assert(Kind == k_PSBHint && "Invalid access!");
return PSBHint.Val;
}
StringRef getPSBHintName() const {
assert(Kind == k_PSBHint && "Invalid access!");
return StringRef(PSBHint.Data, PSBHint.Length);
}
unsigned getBTIHint() const {
assert(Kind == k_BTIHint && "Invalid access!");
return BTIHint.Val;
}
StringRef getBTIHintName() const {
assert(Kind == k_BTIHint && "Invalid access!");
return StringRef(BTIHint.Data, BTIHint.Length);
}
StringRef getPrefetchName() const {
assert(Kind == k_Prefetch && "Invalid access!");
return StringRef(Prefetch.Data, Prefetch.Length);
}
AArch64_AM::ShiftExtendType getShiftExtendType() const {
if (Kind == k_ShiftExtend)
return ShiftExtend.Type;
if (Kind == k_Register)
return Reg.ShiftExtend.Type;
llvm_unreachable("Invalid access!");
}
unsigned getShiftExtendAmount() const {
if (Kind == k_ShiftExtend)
return ShiftExtend.Amount;
if (Kind == k_Register)
return Reg.ShiftExtend.Amount;
llvm_unreachable("Invalid access!");
}
bool hasShiftExtendAmount() const {
if (Kind == k_ShiftExtend)
return ShiftExtend.HasExplicitAmount;
if (Kind == k_Register)
return Reg.ShiftExtend.HasExplicitAmount;
llvm_unreachable("Invalid access!");
}
bool isImm() const override { return Kind == k_Immediate; }
bool isMem() const override { return false; }
bool isUImm6() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return false;
int64_t Val = MCE->getValue();
return (Val >= 0 && Val < 64);
}
template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
return isImmScaled<Bits, Scale>(true);
}
template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
return isImmScaled<Bits, Scale>(false);
}
template <int Bits, int Scale>
DiagnosticPredicate isImmScaled(bool Signed) const {
if (!isImm())
return DiagnosticPredicateTy::NoMatch;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return DiagnosticPredicateTy::NoMatch;
int64_t MinVal, MaxVal;
if (Signed) {
int64_t Shift = Bits - 1;
MinVal = (int64_t(1) << Shift) * -Scale;
MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
} else {
MinVal = 0;
MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
}
int64_t Val = MCE->getValue();
if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
DiagnosticPredicate isSVEPattern() const {
if (!isImm())
return DiagnosticPredicateTy::NoMatch;
auto *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return DiagnosticPredicateTy::NoMatch;
int64_t Val = MCE->getValue();
if (Val >= 0 && Val < 32)
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
AArch64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
int64_t Addend;
if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
Addend)) {
// If we don't understand the expression, assume the best and
// let the fixup and relocation code deal with it.
return true;
}
if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
ELFRefKind == AArch64MCExpr::VK_LO12 ||
ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
// Note that we don't range-check the addend. It's adjusted modulo page
// size when converted, so there is no "out of range" condition when using
// @pageoff.
return true;
} else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
// @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
return Addend == 0;
}
return false;
}
template <int Scale> bool isUImm12Offset() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return isSymbolicUImm12Offset(getImm());
int64_t Val = MCE->getValue();
return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
}
template <int N, int M>
bool isImmInRange() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return false;
int64_t Val = MCE->getValue();
return (Val >= N && Val <= M);
}
// NOTE: Also used for isLogicalImmNot as anything that can be represented as
// a logical immediate can always be represented when inverted.
template <typename T>
bool isLogicalImm() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return false;
int64_t Val = MCE->getValue();
int64_t SVal = typename std::make_signed<T>::type(Val);
int64_t UVal = typename std::make_unsigned<T>::type(Val);
if (Val != SVal && Val != UVal)
return false;
return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
}
bool isShiftedImm() const { return Kind == k_ShiftedImm; }
/// Returns the immediate value as a pair of (imm, shift) if the immediate is
/// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
/// immediate that can be shifted by 'Shift'.
template <unsigned Width>
Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
if (isShiftedImm() && Width == getShiftedImmShift())
if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
return std::make_pair(CE->getValue(), Width);
if (isImm())
if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
int64_t Val = CE->getValue();
if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
return std::make_pair(Val >> Width, Width);
else
return std::make_pair(Val, 0u);
}
return {};
}
bool isAddSubImm() const {
if (!isShiftedImm() && !isImm())
return false;
const MCExpr *Expr;
// An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
if (isShiftedImm()) {
unsigned Shift = ShiftedImm.ShiftAmount;
Expr = ShiftedImm.Val;
if (Shift != 0 && Shift != 12)
return false;
} else {
Expr = getImm();
}
AArch64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
int64_t Addend;
if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
DarwinRefKind, Addend)) {
return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
|| DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
|| (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
|| ELFRefKind == AArch64MCExpr::VK_LO12
|| ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
|| ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
|| ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
|| ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
|| ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
|| ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
|| ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
|| ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
|| ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
}
// If it's a constant, it should be a real immediate in range.
if (auto ShiftedVal = getShiftedVal<12>())
return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
// If it's an expression, we hope for the best and let the fixup/relocation
// code deal with it.
return true;
}
bool isAddSubImmNeg() const {
if (!isShiftedImm() && !isImm())
return false;
// Otherwise it should be a real negative immediate in range.
if (auto ShiftedVal = getShiftedVal<12>())
return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
return false;
}
// Signed value in the range -128 to +127. For element widths of
// 16 bits or higher it may also be a signed multiple of 256 in the
// range -32768 to +32512.
// For element-width of 8 bits a range of -128 to 255 is accepted,
// since a copy of a byte can be either signed/unsigned.
template <typename T>
DiagnosticPredicate isSVECpyImm() const {
if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
return DiagnosticPredicateTy::NoMatch;
bool IsByte =
std::is_same<int8_t, typename std::make_signed<T>::type>::value;
if (auto ShiftedImm = getShiftedVal<8>())
if (!(IsByte && ShiftedImm->second) &&
AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
<< ShiftedImm->second))
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
// Unsigned value in the range 0 to 255. For element widths of
// 16 bits or higher it may also be a signed multiple of 256 in the
// range 0 to 65280.
template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
return DiagnosticPredicateTy::NoMatch;
bool IsByte =
std::is_same<int8_t, typename std::make_signed<T>::type>::value;
if (auto ShiftedImm = getShiftedVal<8>())
if (!(IsByte && ShiftedImm->second) &&
AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
<< ShiftedImm->second))
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
if (isLogicalImm<T>() && !isSVECpyImm<T>())
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NoMatch;
}
bool isCondCode() const { return Kind == k_CondCode; }
bool isSIMDImmType10() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return false;
return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
}
template<int N>
bool isBranchTarget() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
return true;
int64_t Val = MCE->getValue();
if (Val & 0x3)
return false;
assert(N > 0 && "Branch target immediate cannot be 0 bits!");
return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
}
bool
isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
if (!isImm())
return false;
AArch64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
int64_t Addend;
if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
DarwinRefKind, Addend)) {
return false;
}
if (DarwinRefKind != MCSymbolRefExpr::VK_None)
return false;
for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
if (ELFRefKind == AllowedModifiers[i])
return true;
}
return false;
}
bool isMovWSymbolG3() const {
return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
}
bool isMovWSymbolG2() const {
return isMovWSymbol(
{AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
AArch64MCExpr::VK_DTPREL_G2});
}
bool isMovWSymbolG1() const {
return isMovWSymbol(
{AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
}
bool isMovWSymbolG0() const {
return isMovWSymbol(
{AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
}
template<int RegWidth, int Shift>
bool isMOVZMovAlias() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t Value = CE->getValue();
return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
}
template<int RegWidth, int Shift>
bool isMOVNMovAlias() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t Value = CE->getValue();
return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
}
bool isFPImm() const {
return Kind == k_FPImm &&
AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
}
bool isBarrier() const { return Kind == k_Barrier; }
bool isSysReg() const { return Kind == k_SysReg; }
bool isMRSSystemRegister() const {
if (!isSysReg()) return false;
return SysReg.MRSReg != -1U;
}
bool isMSRSystemRegister() const {
if (!isSysReg()) return false;
return SysReg.MSRReg != -1U;
}
bool isSystemPStateFieldWithImm0_1() const {
if (!isSysReg()) return false;
return (SysReg.PStateField == AArch64PState::PAN ||
SysReg.PStateField == AArch64PState::DIT ||
SysReg.PStateField == AArch64PState::UAO ||
SysReg.PStateField == AArch64PState::SSBS);
}
bool isSystemPStateFieldWithImm0_15() const {
if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
return SysReg.PStateField != -1U;
}
bool isReg() const override {
return Kind == k_Register;
}
bool isScalarReg() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar;
}
bool isNeonVectorReg() const {
return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
}
bool isNeonVectorRegLo() const {
return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
Reg.RegNum);
}
template <unsigned Class> bool isSVEVectorReg() const {
RegKind RK;
switch (Class) {
case AArch64::ZPRRegClassID:
case AArch64::ZPR_3bRegClassID:
case AArch64::ZPR_4bRegClassID:
RK = RegKind::SVEDataVector;
break;
case AArch64::PPRRegClassID:
case AArch64::PPR_3bRegClassID:
RK = RegKind::SVEPredicateVector;
break;
default:
llvm_unreachable("Unsupport register class");
}
return (Kind == k_Register && Reg.Kind == RK) &&
AArch64MCRegisterClasses[Class].contains(getReg());
}
template <unsigned Class> bool isFPRasZPR() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[Class].contains(getReg());
}
template <int ElementWidth, unsigned Class>
DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
return DiagnosticPredicateTy::NoMatch;
if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
template <int ElementWidth, unsigned Class>
DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
return DiagnosticPredicateTy::NoMatch;
if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
template <int ElementWidth, unsigned Class,
AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
bool ShiftWidthAlwaysSame>
DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
if (!VectorMatch.isMatch())
return DiagnosticPredicateTy::NoMatch;
// Give a more specific diagnostic when the user has explicitly typed in
// a shift-amount that does not match what is expected, but for which
// there is also an unscaled addressing mode (e.g. sxtw/uxtw).
bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
ShiftExtendTy == AArch64_AM::SXTW) &&
!ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
return DiagnosticPredicateTy::NoMatch;
if (MatchShift && ShiftExtendTy == getShiftExtendType())
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
bool isGPR32as64() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
}
bool isGPR64as32() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
}
bool isWSeqPair() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
Reg.RegNum);
}
bool isXSeqPair() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
Reg.RegNum);
}
template<int64_t Angle, int64_t Remainder>
DiagnosticPredicate isComplexRotation() const {
if (!isImm()) return DiagnosticPredicateTy::NoMatch;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return DiagnosticPredicateTy::NoMatch;
uint64_t Value = CE->getValue();
if (Value % Angle == Remainder && Value <= 270)
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
template <unsigned RegClassID> bool isGPR64() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[RegClassID].contains(getReg());
}
template <unsigned RegClassID, int ExtWidth>
DiagnosticPredicate isGPR64WithShiftExtend() const {
if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
return DiagnosticPredicateTy::NoMatch;
if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
getShiftExtendAmount() == Log2_32(ExtWidth / 8))
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
/// Is this a vector list with the type implicit (presumably attached to the
/// instruction itself)?
template <RegKind VectorKind, unsigned NumRegs>
bool isImplicitlyTypedVectorList() const {
return Kind == k_VectorList && VectorList.Count == NumRegs &&
VectorList.NumElements == 0 &&
VectorList.RegisterKind == VectorKind;
}
template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
unsigned ElementWidth>
bool isTypedVectorList() const {
if (Kind != k_VectorList)
return false;
if (VectorList.Count != NumRegs)
return false;
if (VectorList.RegisterKind != VectorKind)
return false;
if (VectorList.ElementWidth != ElementWidth)
return false;
return VectorList.NumElements == NumElements;
}
template <int Min, int Max>
DiagnosticPredicate isVectorIndex() const {
if (Kind != k_VectorIndex)
return DiagnosticPredicateTy::NoMatch;
if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
return DiagnosticPredicateTy::Match;
return DiagnosticPredicateTy::NearMatch;
}
bool isToken() const override { return Kind == k_Token; }
bool isTokenEqual(StringRef Str) const {
return Kind == k_Token && getToken() == Str;
}
bool isSysCR() const { return Kind == k_SysCR; }
bool isPrefetch() const { return Kind == k_Prefetch; }
bool isPSBHint() const { return Kind == k_PSBHint; }
bool isBTIHint() const { return Kind == k_BTIHint; }
bool isShiftExtend() const { return Kind == k_ShiftExtend; }
bool isShifter() const {
if (!isShiftExtend())
return false;
AArch64_AM::ShiftExtendType ST = getShiftExtendType();
return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
ST == AArch64_AM::MSL);
}
template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
if (Kind != k_FPImm)
return DiagnosticPredicateTy::NoMatch;
if (getFPImmIsExact()) {
// Lookup the immediate from table of supported immediates.
auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
assert(Desc && "Unknown enum value");
// Calculate its FP value.
APFloat RealVal(APFloat::IEEEdouble());
auto StatusOrErr =
RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
llvm_unreachable("FP immediate is not exact");
if (getFPImm().bitwiseIsEqual(RealVal))
return DiagnosticPredicateTy::Match;
}
return DiagnosticPredicateTy::NearMatch;
}
template <unsigned ImmA, unsigned ImmB>
DiagnosticPredicate isExactFPImm() const {
DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
if ((Res = isExactFPImm<ImmA>()))
return DiagnosticPredicateTy::Match;
if ((Res = isExactFPImm<ImmB>()))
return DiagnosticPredicateTy::Match;
return Res;
}
bool isExtend() const {
if (!isShiftExtend())
return false;
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
ET == AArch64_AM::LSL) &&
getShiftExtendAmount() <= 4;
}
bool isExtend64() const {
if (!isExtend())
return false;
// Make sure the extend expects a 32-bit source register.
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
}
bool isExtendLSL64() const {
if (!isExtend())
return false;
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
ET == AArch64_AM::LSL) &&
getShiftExtendAmount() <= 4;
}
template<int Width> bool isMemXExtend() const {
if (!isExtend())
return false;
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
(getShiftExtendAmount() == Log2_32(Width / 8) ||
getShiftExtendAmount() == 0);
}
template<int Width> bool isMemWExtend() const {
if (!isExtend())
return false;
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
(getShiftExtendAmount() == Log2_32(Width / 8) ||
getShiftExtendAmount() == 0);
}
template <unsigned width>
bool isArithmeticShifter() const {
if (!isShifter())
return false;
// An arithmetic shifter is LSL, LSR, or ASR.
AArch64_AM::ShiftExtendType ST = getShiftExtendType();
return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
}
template <unsigned width>
bool isLogicalShifter() const {
if (!isShifter())
return false;
// A logical shifter is LSL, LSR, ASR or ROR.
AArch64_AM::ShiftExtendType ST = getShiftExtendType();
return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
getShiftExtendAmount() < width;
}
bool isMovImm32Shifter() const {
if (!isShifter())
return false;
// A MOVi shifter is LSL of 0, 16, 32, or 48.
AArch64_AM::ShiftExtendType ST = getShiftExtendType();
if (ST != AArch64_AM::LSL)
return false;
uint64_t Val = getShiftExtendAmount();
return (Val == 0 || Val == 16);
}
bool isMovImm64Shifter() const {
if (!isShifter())
return false;
// A MOVi shifter is LSL of 0 or 16.
AArch64_AM::ShiftExtendType ST = getShiftExtendType();
if (ST != AArch64_AM::LSL)
return false;
uint64_t Val = getShiftExtendAmount();
return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
}
bool isLogicalVecShifter() const {
if (!isShifter())
return false;
// A logical vector shifter is a left shift by 0, 8, 16, or 24.
unsigned Shift = getShiftExtendAmount();
return getShiftExtendType() == AArch64_AM::LSL &&
(Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
}
bool isLogicalVecHalfWordShifter() const {
if (!isLogicalVecShifter())
return false;
// A logical vector shifter is a left shift by 0 or 8.
unsigned Shift = getShiftExtendAmount();
return getShiftExtendType() == AArch64_AM::LSL &&
(Shift == 0 || Shift == 8);
}
bool isMoveVecShifter() const {
if (!isShiftExtend())
return false;
// A logical vector shifter is a left shift by 8 or 16.
unsigned Shift = getShiftExtendAmount();
return getShiftExtendType() == AArch64_AM::MSL &&
(Shift == 8 || Shift == 16);
}
// Fallback unscaled operands are for aliases of LDR/STR that fall back
// to LDUR/STUR when the offset is not legal for the former but is for
// the latter. As such, in addition to checking for being a legal unscaled
// address, also check that it is not a legal scaled address. This avoids
// ambiguity in the matcher.
template<int Width>
bool isSImm9OffsetFB() const {
return isSImm<9>() && !isUImm12Offset<Width / 8>();
}
bool isAdrpLabel() const {
// Validation was handled during parsing, so we just sanity check that
// something didn't go haywire.
if (!isImm())
return false;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
int64_t Min = - (4096 * (1LL << (21 - 1)));
int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
return (Val % 4096) == 0 && Val >= Min && Val <= Max;
}
return true;
}
bool isAdrLabel() const {
// Validation was handled during parsing, so we just sanity check that
// something didn't go haywire.
if (!isImm())
return false;
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
int64_t Val = CE->getValue();
int64_t Min = - (1LL << (21 - 1));
int64_t Max = ((1LL << (21 - 1)) - 1);
return Val >= Min && Val <= Max;
}
return true;
}
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
if (!Expr)
Inst.addOperand(MCOperand::createImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::createImm(CE->getValue()));
else
Inst.addOperand(MCOperand::createExpr(Expr));
}
void addRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getReg()));
}
void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
assert(
AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
const MCRegisterInfo *RI = Ctx.getRegisterInfo();
uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
RI->getEncodingValue(getReg()));
Inst.addOperand(MCOperand::createReg(Reg));
}
void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
assert(
AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
const MCRegisterInfo *RI = Ctx.getRegisterInfo();
uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
RI->getEncodingValue(getReg()));
Inst.addOperand(MCOperand::createReg(Reg));
}
template <int Width>
void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
unsigned Base;
switch (Width) {
case 8: Base = AArch64::B0; break;
case 16: Base = AArch64::H0; break;
case 32: Base = AArch64::S0; break;
case 64: Base = AArch64::D0; break;
case 128: Base = AArch64::Q0; break;
default:
llvm_unreachable("Unsupported width");
}
Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
}
void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
assert(
AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
}
void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
assert(
AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
Inst.addOperand(MCOperand::createReg(getReg()));
}
void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(getReg()));
}
enum VecListIndexType {
VecListIdx_DReg = 0,
VecListIdx_QReg = 1,
VecListIdx_ZReg = 2,
};
template <VecListIndexType RegTy, unsigned NumRegs>
void addVectorListOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
static const unsigned FirstRegs[][5] = {
/* DReg */ { AArch64::Q0,
AArch64::D0, AArch64::D0_D1,
AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
/* QReg */ { AArch64::Q0,
AArch64::Q0, AArch64::Q0_Q1,
AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
/* ZReg */ { AArch64::Z0,
AArch64::Z0, AArch64::Z0_Z1,
AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
};
assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
" NumRegs must be <= 4 for ZRegs");
unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
FirstRegs[(unsigned)RegTy][0]));
}
void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
}
template <unsigned ImmIs0, unsigned ImmIs1>
void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
}
void addImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// If this is a pageoff symrefexpr with an addend, adjust the addend
// to be only the page-offset portion. Otherwise, just add the expr
// as-is.
addExpr(Inst, getImm());
}
template <int Shift>
void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
if (auto ShiftedVal = getShiftedVal<Shift>()) {
Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
} else if (isShiftedImm()) {
addExpr(Inst, getShiftedImmVal());
Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
} else {
addExpr(Inst, getImm());
Inst.addOperand(MCOperand::createImm(0));
}
}
template <int Shift>
void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
if (auto ShiftedVal = getShiftedVal<Shift>()) {
Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
} else
llvm_unreachable("Not a shifted negative immediate");
}
void addCondCodeOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getCondCode()));
}
void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE)
addExpr(Inst, getImm());
else
Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
}
void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
addImmOperands(Inst, N);
}
template<int Scale>
void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE) {
Inst.addOperand(MCOperand::createExpr(getImm()));
return;
}
Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
}
void addUImm6Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(MCE->getValue()));
}
template <int Scale>
void addImmScaledOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
}
template <typename T>
void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
typename std::make_unsigned<T>::type Val = MCE->getValue();
uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
Inst.addOperand(MCOperand::createImm(encoding));
}
template <typename T>
void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
typename std::make_unsigned<T>::type Val = ~MCE->getValue();
uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
Inst.addOperand(MCOperand::createImm(encoding));
}
void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
Inst.addOperand(MCOperand::createImm(encoding));
}
void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
// Branch operands don't encode the low bits, so shift them off
// here. If it's a label, however, just put it on directly as there's
// not enough information now to do anything.
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE) {
addExpr(Inst, getImm());
return;
}
assert(MCE && "Invalid constant immediate operand!");
Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
}
void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
// Branch operands don't encode the low bits, so shift them off
// here. If it's a label, however, just put it on directly as there's
// not enough information now to do anything.
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE) {
addExpr(Inst, getImm());
return;
}
assert(MCE && "Invalid constant immediate operand!");
Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
}
void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
// Branch operands don't encode the low bits, so shift them off
// here. If it's a label, however, just put it on directly as there's
// not enough information now to do anything.
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
if (!MCE) {
addExpr(Inst, getImm());
return;
}
assert(MCE && "Invalid constant immediate operand!");
Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
}
void addFPImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(
AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
}
void addBarrierOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getBarrier()));
}
void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
}
void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
}
void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
}
void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
}
void addSysCROperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getSysCR()));
}
void addPrefetchOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getPrefetch()));
}
void addPSBHintOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getPSBHint()));
}
void addBTIHintOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(getBTIHint()));
}
void addShifterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
unsigned Imm =
AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
Inst.addOperand(MCOperand::createImm(Imm));
}
void addExtendOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
Inst.addOperand(MCOperand::createImm(Imm));
}
void addExtend64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
Inst.addOperand(MCOperand::createImm(Imm));
}
void addMemExtendOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
Inst.addOperand(MCOperand::createImm(IsSigned));
Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
}
// For 8-bit load/store instructions with a register offset, both the
// "DoShift" and "NoShift" variants have a shift of 0. Because of this,
// they're disambiguated by whether the shift was explicit or implicit rather
// than its size.
void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
AArch64_AM::ShiftExtendType ET = getShiftExtendType();
bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
Inst.addOperand(MCOperand::createImm(IsSigned));
Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
}
template<int Shift>
void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
uint64_t Value = CE->getValue();
Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
}
template<int Shift>
void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
uint64_t Value = CE->getValue();
Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
}
void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
}
void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
}
void print(raw_ostream &OS) const override;
static std::unique_ptr<AArch64Operand>
CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->Tok.IsSuffix = IsSuffix;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
unsigned ShiftAmount = 0,
unsigned HasExplicitAmount = false) {
auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
Op->Reg.RegNum = RegNum;
Op->Reg.Kind = Kind;
Op->Reg.ElementWidth = 0;
Op->Reg.EqualityTy = EqTy;
Op->Reg.ShiftExtend.Type = ExtTy;
Op->Reg.ShiftExtend.Amount = ShiftAmount;
Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
SMLoc S, SMLoc E, MCContext &Ctx,
AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
unsigned ShiftAmount = 0,
unsigned HasExplicitAmount = false) {
assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
Kind == RegKind::SVEPredicateVector) &&
"Invalid vector kind");
auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
HasExplicitAmount);
Op->Reg.ElementWidth = ElementWidth;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.NumElements = NumElements;
Op->VectorList.ElementWidth = ElementWidth;
Op->VectorList.RegisterKind = RegisterKind;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
Op->VectorIndex.Val = Idx;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
SMLoc E, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
unsigned ShiftAmount,
SMLoc S, SMLoc E,
MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
Op->ShiftedImm .Val = Val;
Op->ShiftedImm.ShiftAmount = ShiftAmount;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
Op->CondCode.Code = Code;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
Op->FPImm.IsExact = IsExact;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
StringRef Str,
SMLoc S,
MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
Op->Barrier.Val = Val;
Op->Barrier.Data = Str.data();
Op->Barrier.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
uint32_t MRSReg,
uint32_t MSRReg,
uint32_t PStateField,
MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
Op->SysReg.Data = Str.data();
Op->SysReg.Length = Str.size();
Op->SysReg.MRSReg = MRSReg;
Op->SysReg.MSRReg = MSRReg;
Op->SysReg.PStateField = PStateField;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
SMLoc E, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
Op->SysCRImm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
StringRef Str,
SMLoc S,
MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
Op->Prefetch.Val = Val;
Op->Barrier.Data = Str.data();
Op->Barrier.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
StringRef Str,
SMLoc S,
MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
Op->PSBHint.Val = Val;
Op->PSBHint.Data = Str.data();
Op->PSBHint.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
StringRef Str,
SMLoc S,
MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
Op->BTIHint.Val = Val << 1 | 32;
Op->BTIHint.Data = Str.data();
Op->BTIHint.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
Op->ShiftExtend.Type = ShOp;
Op->ShiftExtend.Amount = Val;
Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
};
} // end anonymous namespace.
void AArch64Operand::print(raw_ostream &OS) const {
switch (Kind) {
case k_FPImm:
OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
if (!getFPImmIsExact())
OS << " (inexact)";
OS << ">";
break;
case k_Barrier: {
StringRef Name = getBarrierName();
if (!Name.empty())
OS << "<barrier " << Name << ">";
else
OS << "<barrier invalid #" << getBarrier() << ">";
break;
}
case k_Immediate:
OS << *getImm();
break;
case k_ShiftedImm: {
unsigned Shift = getShiftedImmShift();
OS << "<shiftedimm ";
OS << *getShiftedImmVal();
OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
break;
}
case k_CondCode:
OS << "<condcode " << getCondCode() << ">";
break;
case k_VectorList: {
OS << "<vectorlist ";
unsigned Reg = getVectorListStart();
for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
OS << Reg + i << " ";
OS << ">";
break;
}
case k_VectorIndex:
OS << "<vectorindex " << getVectorIndex() << ">";
break;
case k_SysReg:
OS << "<sysreg: " << getSysReg() << '>';
break;
case k_Token:
OS << "'" << getToken() << "'";
break;
case k_SysCR:
OS << "c" << getSysCR();
break;
case k_Prefetch: {
StringRef Name = getPrefetchName();
if (!Name.empty())
OS << "<prfop " << Name << ">";
else
OS << "<prfop invalid #" << getPrefetch() << ">";
break;
}
case k_PSBHint:
OS << getPSBHintName();
break;
case k_Register:
OS << "<register " << getReg() << ">";
if (!getShiftExtendAmount() && !hasShiftExtendAmount())
break;
LLVM_FALLTHROUGH;
case k_BTIHint:
OS << getBTIHintName();
break;
case k_ShiftExtend:
OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
<< getShiftExtendAmount();
if (!hasShiftExtendAmount())
OS << "<imp>";
OS << '>';
break;
}
}
/// @name Auto-generated Match Functions
/// {
static unsigned MatchRegisterName(StringRef Name);
/// }
static unsigned MatchNeonVectorRegName(StringRef Name) {
return StringSwitch<unsigned>(Name.lower())
.Case("v0", AArch64::Q0)
.Case("v1", AArch64::Q1)
.Case("v2", AArch64::Q2)
.Case("v3", AArch64::Q3)
.Case("v4", AArch64::Q4)
.Case("v5", AArch64::Q5)
.Case("v6", AArch64::Q6)
.Case("v7", AArch64::Q7)
.Case("v8", AArch64::Q8)
.Case("v9", AArch64::Q9)
.Case("v10", AArch64::Q10)
.Case("v11", AArch64::Q11)
.Case("v12", AArch64::Q12)
.Case("v13", AArch64::Q13)
.Case("v14", AArch64::Q14)
.Case("v15", AArch64::Q15)
.Case("v16", AArch64::Q16)
.Case("v17", AArch64::Q17)
.Case("v18", AArch64::Q18)
.Case("v19", AArch64::Q19)
.Case("v20", AArch64::Q20)
.Case("v21", AArch64::Q21)
.Case("v22", AArch64::Q22)
.Case("v23", AArch64::Q23)
.Case("v24", AArch64::Q24)
.Case("v25", AArch64::Q25)
.Case("v26", AArch64::Q26)
.Case("v27", AArch64::Q27)
.Case("v28", AArch64::Q28)
.Case("v29", AArch64::Q29)
.Case("v30", AArch64::Q30)
.Case("v31", AArch64::Q31)
.Default(0);
}
/// Returns an optional pair of (#elements, element-width) if Suffix
/// is a valid vector kind. Where the number of elements in a vector
/// or the vector width is implicit or explicitly unknown (but still a
/// valid suffix kind), 0 is used.
static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
RegKind VectorKind) {
std::pair<int, int> Res = {-1, -1};
switch (VectorKind) {
case RegKind::NeonVector:
Res =
StringSwitch<std::pair<int, int>>(Suffix.lower())
.Case("", {0, 0})
.Case(".1d", {1, 64})
.Case(".1q", {1, 128})
// '.2h' needed for fp16 scalar pairwise reductions
.Case(".2h", {2, 16})
.Case(".2s", {2, 32})
.Case(".2d", {2, 64})
// '.4b' is another special case for the ARMv8.2a dot product
// operand
.Case(".4b", {4, 8})
.Case(".4h", {4, 16})
.Case(".4s", {4, 32})
.Case(".8b", {8, 8})
.Case(".8h", {8, 16})
.Case(".16b", {16, 8})
// Accept the width neutral ones, too, for verbose syntax. If those
// aren't used in the right places, the token operand won't match so
// all will work out.
.Case(".b", {0, 8})
.Case(".h", {0, 16})
.Case(".s", {0, 32})
.Case(".d", {0, 64})
.Default({-1, -1});
break;
case RegKind::SVEPredicateVector:
case RegKind::SVEDataVector:
Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
.Case("", {0, 0})
.Case(".b", {0, 8})
.Case(".h", {0, 16})
.Case(".s", {0, 32})
.Case(".d", {0, 64})
.Case(".q", {0, 128})
.Default({-1, -1});
break;
default:
llvm_unreachable("Unsupported RegKind");
}
if (Res == std::make_pair(-1, -1))
return Optional<std::pair<int, int>>();
return Optional<std::pair<int, int>>(Res);
}
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
return parseVectorKind(Suffix, VectorKind).hasValue();
}
static unsigned matchSVEDataVectorRegName(StringRef Name) {
return StringSwitch<unsigned>(Name.lower())
.Case("z0", AArch64::Z0)
.Case("z1", AArch64::Z1)
.Case("z2", AArch64::Z2)
.Case("z3", AArch64::Z3)
.Case("z4", AArch64::Z4)
.Case("z5", AArch64::Z5)
.Case("z6", AArch64::Z6)
.Case("z7", AArch64::Z7)
.Case("z8", AArch64::Z8)
.Case("z9", AArch64::Z9)
.Case("z10", AArch64::Z10)
.Case("z11", AArch64::Z11)
.Case("z12", AArch64::Z12)
.Case("z13", AArch64::Z13)
.Case("z14", AArch64::Z14)
.Case("z15", AArch64::Z15)
.Case("z16", AArch64::Z16)
.Case("z17", AArch64::Z17)
.Case("z18", AArch64::Z18)
.Case("z19", AArch64::Z19)
.Case("z20", AArch64::Z20)
.Case("z21", AArch64::Z21)
.Case("z22", AArch64::Z22)
.Case("z23", AArch64::Z23)
.Case("z24", AArch64::Z24)
.Case("z25", AArch64::Z25)
.Case("z26", AArch64::Z26)
.Case("z27", AArch64::Z27)
.Case("z28", AArch64::Z28)
.Case("z29", AArch64::Z29)
.Case("z30", AArch64::Z30)
.Case("z31", AArch64::Z31)
.Default(0);
}
static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
return StringSwitch<unsigned>(Name.lower())
.Case("p0", AArch64::P0)
.Case("p1", AArch64::P1)
.Case("p2", AArch64::P2)
.Case("p3", AArch64::P3)
.Case("p4", AArch64::P4)
.Case("p5", AArch64::P5)
.Case("p6", AArch64::P6)
.Case("p7", AArch64::P7)
.Case("p8", AArch64::P8)
.Case("p9", AArch64::P9)
.Case("p10", AArch64::P10)
.Case("p11", AArch64::P11)
.Case("p12", AArch64::P12)
.Case("p13", AArch64::P13)
.Case("p14", AArch64::P14)
.Case("p15", AArch64::P15)
.Default(0);
}
bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) {
StartLoc = getLoc();
auto Res = tryParseScalarRegister(RegNo);
EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
return Res != MatchOperand_Success;
}
// Matches a register name or register alias previously defined by '.req'
unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
RegKind Kind) {
unsigned RegNum = 0;
if ((RegNum = matchSVEDataVectorRegName(Name)))
return Kind == RegKind::SVEDataVector ? RegNum : 0;
if ((RegNum = matchSVEPredicateVectorRegName(Name)))
return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
if ((RegNum = MatchNeonVectorRegName(Name)))
return Kind == RegKind::NeonVector ? RegNum : 0;
// The parsed register must be of RegKind Scalar
if ((RegNum = MatchRegisterName(Name)))
return Kind == RegKind::Scalar ? RegNum : 0;
if (!RegNum) {
// Handle a few common aliases of registers.
if (auto RegNum = StringSwitch<unsigned>(Name.lower())
.Case("fp", AArch64::FP)
.Case("lr", AArch64::LR)
.Case("x31", AArch64::XZR)
.Case("w31", AArch64::WZR)
.Default(0))
return Kind == RegKind::Scalar ? RegNum : 0;
// Check for aliases registered via .req. Canonicalize to lower case.
// That's more consistent since register names are case insensitive, and
// it's how the original entry was passed in from MC/MCParser/AsmParser.
auto Entry = RegisterReqs.find(Name.lower());
if (Entry == RegisterReqs.end())
return 0;
// set RegNum if the match is the right kind of register
if (Kind == Entry->getValue().first)
RegNum = Entry->getValue().second;
}
return RegNum;
}
/// tryParseScalarRegister - Try to parse a register name. The token must be an
/// Identifier when called, and if it is a register name the token is eaten and
/// the register is added to the operand list.
OperandMatchResultTy
AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
std::string lowerCase = Tok.getString().lower();
unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
if (Reg == 0)
return MatchOperand_NoMatch;
RegNum = Reg;
Parser.Lex(); // Eat identifier token.
return MatchOperand_Success;
}
/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
OperandMatchResultTy
AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = getLoc();
if (Parser.getTok().isNot(AsmToken::Identifier)) {
Error(S, "Expected cN operand where 0 <= N <= 15");
return MatchOperand_ParseFail;
}
StringRef Tok = Parser.getTok().getIdentifier();
if (Tok[0] != 'c' && Tok[0] != 'C') {
Error(S, "Expected cN operand where 0 <= N <= 15");
return MatchOperand_ParseFail;
}
uint32_t CRNum;
bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
if (BadNum || CRNum > 15) {
Error(S, "Expected cN operand where 0 <= N <= 15");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat identifier token.
Operands.push_back(
AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
return MatchOperand_Success;
}
/// tryParsePrefetch - Try to parse a prefetch operand.
template <bool IsSVEPrefetch>
OperandMatchResultTy
AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = getLoc();
const AsmToken &Tok = Parser.getTok();
auto LookupByName = [](StringRef N) {
if (IsSVEPrefetch) {
if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
return Optional<unsigned>(Res->Encoding);
} else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
return Optional<unsigned>(Res->Encoding);
return Optional<unsigned>();
};
auto LookupByEncoding = [](unsigned E) {
if (IsSVEPrefetch) {
if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
return Optional<StringRef>(Res->Name);
} else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
return Optional<StringRef>(Res->Name);
return Optional<StringRef>();
};
unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
// Either an identifier for named values or a 5-bit immediate.
// Eat optional hash.
if (parseOptionalToken(AsmToken::Hash) ||
Tok.is(AsmToken::Integer)) {
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
return MatchOperand_ParseFail;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
if (!MCE) {
TokError("immediate value expected for prefetch operand");
return MatchOperand_ParseFail;
}
unsigned prfop = MCE->getValue();
if (prfop > MaxVal) {
TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
"] expected");
return MatchOperand_ParseFail;
}
auto PRFM = LookupByEncoding(MCE->getValue());
Operands.push_back(AArch64Operand::CreatePrefetch(
prfop, PRFM.getValueOr(""), S, getContext()));
return MatchOperand_Success;
}
if (Tok.isNot(AsmToken::Identifier)) {
TokError("prefetch hint expected");
return MatchOperand_ParseFail;
}
auto PRFM = LookupByName(Tok.getString());
if (!PRFM) {
TokError("prefetch hint expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat identifier token.
Operands.push_back(AArch64Operand::CreatePrefetch(
*PRFM, Tok.getString(), S, getContext()));
return MatchOperand_Success;
}
/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
OperandMatchResultTy
AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) {
TokError("invalid operand for instruction");
return MatchOperand_ParseFail;
}
auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
if (!PSB) {
TokError("invalid operand for instruction");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat identifier token.
Operands.push_back(AArch64Operand::CreatePSBHint(
PSB->Encoding, Tok.getString(), S, getContext()));
return MatchOperand_Success;
}
/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
OperandMatchResultTy
AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) {
TokError("invalid operand for instruction");
return MatchOperand_ParseFail;
}
auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
if (!BTI) {
TokError("invalid operand for instruction");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat identifier token.
Operands.push_back(AArch64Operand::CreateBTIHint(
BTI->Encoding, Tok.getString(), S, getContext()));
return MatchOperand_Success;
}
/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
/// instruction.
OperandMatchResultTy
AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = getLoc();
const MCExpr *Expr = nullptr;
if (Parser.getTok().is(AsmToken::Hash)) {
Parser.Lex(); // Eat hash token.
}
if (parseSymbolicImmVal(Expr))
return MatchOperand_ParseFail;
AArch64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
int64_t Addend;
if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
ELFRefKind == AArch64MCExpr::VK_INVALID) {
// No modifier was specified at all; this is the syntax for an ELF basic
// ADRP relocation (unfortunately).
Expr =
AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
} else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
Addend != 0) {
Error(S, "gotpage label reference not allowed an addend");
return MatchOperand_ParseFail;
} else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
// The operand must be an @page or @gotpage qualified symbolref.
Error(S, "page or gotpage label reference expected");
return MatchOperand_ParseFail;
}
}
// We have either a label reference possibly with addend or an immediate. The
// addend is a raw value here. The linker will adjust it to only reference the
// page.
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
return MatchOperand_Success;
}
/// tryParseAdrLabel - Parse and validate a source label for the ADR
/// instruction.
OperandMatchResultTy
AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
SMLoc S = getLoc();
const MCExpr *Expr = nullptr;
// Leave anything with a bracket to the default for SVE
if (getParser().getTok().is(AsmToken::LBrac))
return MatchOperand_NoMatch;
if (getParser().getTok().is(AsmToken::Hash))
getParser().Lex(); // Eat hash token.
if (parseSymbolicImmVal(Expr))
return MatchOperand_ParseFail;
AArch64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
int64_t Addend;
if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
ELFRefKind == AArch64MCExpr::VK_INVALID) {
// No modifier was specified at all; this is the syntax for an ELF basic
// ADR relocation (unfortunately).
Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
} else {
Error(S, "unexpected adr label");
return MatchOperand_ParseFail;
}
}
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
return MatchOperand_Success;
}
/// tryParseFPImm - A floating point immediate expression operand.
template<bool AddFPZeroAsLiteral>
OperandMatchResultTy
AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = getLoc();
bool Hash = parseOptionalToken(AsmToken::Hash);
// Handle negation, as that still comes through as a separate token.
bool isNegative = parseOptionalToken(AsmToken::Minus);
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
if (!Hash)
return MatchOperand_NoMatch;
TokError("invalid floating point immediate");
return MatchOperand_ParseFail;
}
// Parse hexadecimal representation.
if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
if (Tok.getIntVal() > 255 || isNegative) {
TokError("encoded floating point value out of range");
return MatchOperand_ParseFail;
}
APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
Operands.push_back(
AArch64Operand::CreateFPImm(F, true, S, getContext()));
} else {
// Parse FP representation.
APFloat RealVal(APFloat::IEEEdouble());
auto StatusOrErr =