| //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "MCTargetDesc/ARM64AddressingModes.h" |
| #include "MCTargetDesc/ARM64MCExpr.h" |
| #include "Utils/ARM64BaseInfo.h" |
| #include "llvm/MC/MCParser/MCAsmLexer.h" |
| #include "llvm/MC/MCParser/MCAsmParser.h" |
| #include "llvm/MC/MCParser/MCParsedAsmOperand.h" |
| #include "llvm/MC/MCContext.h" |
| #include "llvm/MC/MCExpr.h" |
| #include "llvm/MC/MCInst.h" |
| #include "llvm/MC/MCRegisterInfo.h" |
| #include "llvm/MC/MCStreamer.h" |
| #include "llvm/MC/MCSubtargetInfo.h" |
| #include "llvm/MC/MCSymbol.h" |
| #include "llvm/MC/MCTargetAsmParser.h" |
| #include "llvm/Support/SourceMgr.h" |
| #include "llvm/Support/TargetRegistry.h" |
| #include "llvm/Support/ErrorHandling.h" |
| #include "llvm/Support/raw_ostream.h" |
| #include "llvm/ADT/SmallString.h" |
| #include "llvm/ADT/SmallVector.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/StringSwitch.h" |
| #include "llvm/ADT/Twine.h" |
| #include <cstdio> |
| using namespace llvm; |
| |
| namespace { |
| |
| class ARM64Operand; |
| |
| class ARM64AsmParser : public MCTargetAsmParser { |
| public: |
| typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector; |
| |
| private: |
| StringRef Mnemonic; ///< Instruction mnemonic. |
| MCSubtargetInfo &STI; |
| MCAsmParser &Parser; |
| |
| MCAsmParser &getParser() const { return Parser; } |
| MCAsmLexer &getLexer() const { return Parser.getLexer(); } |
| |
| SMLoc getLoc() const { return Parser.getTok().getLoc(); } |
| |
| bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); |
| unsigned parseCondCodeString(StringRef Cond); |
| bool parseCondCode(OperandVector &Operands, bool invertCondCode); |
| int tryParseRegister(); |
| int tryMatchVectorRegister(StringRef &Kind, bool expected); |
| bool parseOptionalShift(OperandVector &Operands); |
| bool parseOptionalExtend(OperandVector &Operands); |
| bool parseRegister(OperandVector &Operands); |
| bool parseMemory(OperandVector &Operands); |
| bool parseSymbolicImmVal(const MCExpr *&ImmVal); |
| bool parseVectorList(OperandVector &Operands); |
| bool parseOperand(OperandVector &Operands, bool isCondCode, |
| bool invertCondCode); |
| |
| void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } |
| bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } |
| bool showMatchError(SMLoc Loc, unsigned ErrCode); |
| |
| bool parseDirectiveWord(unsigned Size, SMLoc L); |
| bool parseDirectiveTLSDescCall(SMLoc L); |
| |
| bool parseDirectiveLOH(StringRef LOH, SMLoc L); |
| |
| bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc); |
| bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
| OperandVector &Operands, MCStreamer &Out, |
| unsigned &ErrorInfo, |
| bool MatchingInlineAsm) override; |
| /// @name Auto-generated Match Functions |
| /// { |
| |
| #define GET_ASSEMBLER_HEADER |
| #include "ARM64GenAsmMatcher.inc" |
| |
| /// } |
| |
| OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands); |
| OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); |
| OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); |
| OperandMatchResultTy tryParseSysReg(OperandVector &Operands); |
| OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); |
| OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); |
| OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); |
| OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); |
| OperandMatchResultTy tryParseFPImm(OperandVector &Operands); |
| bool tryParseVectorRegister(OperandVector &Operands); |
| |
| public: |
| enum ARM64MatchResultTy { |
| Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, |
| #define GET_OPERAND_DIAGNOSTIC_TYPES |
| #include "ARM64GenAsmMatcher.inc" |
| }; |
| ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser, |
| const MCInstrInfo &MII, |
| const MCTargetOptions &Options) |
| : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { |
| MCAsmParserExtension::Initialize(_Parser); |
| |
| // Initialize the set of available features. |
| setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); |
| } |
| |
| bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, |
| SMLoc NameLoc, OperandVector &Operands) override; |
| bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; |
| bool ParseDirective(AsmToken DirectiveID) override; |
| unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, |
| unsigned Kind) override; |
| |
| static bool classifySymbolRef(const MCExpr *Expr, |
| ARM64MCExpr::VariantKind &ELFRefKind, |
| MCSymbolRefExpr::VariantKind &DarwinRefKind, |
| int64_t &Addend); |
| }; |
| } // end anonymous namespace |
| |
| namespace { |
| |
| /// ARM64Operand - Instances of this class represent a parsed ARM64 machine |
| /// instruction. |
| class ARM64Operand : public MCParsedAsmOperand { |
| public: |
| enum MemIdxKindTy { |
| ImmediateOffset, // pre-indexed, no writeback |
| RegisterOffset // register offset, with optional extend |
| }; |
| |
| private: |
| enum KindTy { |
| k_Immediate, |
| k_Memory, |
| k_Register, |
| k_VectorList, |
| k_VectorIndex, |
| k_Token, |
| k_SysReg, |
| k_SysCR, |
| k_Prefetch, |
| k_Shifter, |
| k_Extend, |
| k_FPImm, |
| k_Barrier |
| } Kind; |
| |
| SMLoc StartLoc, EndLoc, OffsetLoc; |
| |
| struct TokOp { |
| const char *Data; |
| unsigned Length; |
| bool IsSuffix; // Is the operand actually a suffix on the mnemonic. |
| }; |
| |
| struct RegOp { |
| unsigned RegNum; |
| bool isVector; |
| }; |
| |
| struct VectorListOp { |
| unsigned RegNum; |
| unsigned Count; |
| unsigned NumElements; |
| unsigned ElementKind; |
| }; |
| |
| struct VectorIndexOp { |
| unsigned Val; |
| }; |
| |
| struct ImmOp { |
| const MCExpr *Val; |
| }; |
| |
| struct FPImmOp { |
| unsigned Val; // Encoded 8-bit representation. |
| }; |
| |
| struct BarrierOp { |
| unsigned Val; // Not the enum since not all values have names. |
| }; |
| |
| struct SysRegOp { |
| const char *Data; |
| unsigned Length; |
| }; |
| |
| struct SysCRImmOp { |
| unsigned Val; |
| }; |
| |
| struct PrefetchOp { |
| unsigned Val; |
| }; |
| |
| struct ShifterOp { |
| unsigned Val; |
| }; |
| |
| struct ExtendOp { |
| unsigned Val; |
| }; |
| |
| // This is for all forms of ARM64 address expressions |
| struct MemOp { |
| unsigned BaseRegNum, OffsetRegNum; |
| ARM64_AM::ExtendType ExtType; |
| unsigned ShiftVal; |
| bool ExplicitShift; |
| const MCExpr *OffsetImm; |
| MemIdxKindTy Mode; |
| }; |
| |
| union { |
| struct TokOp Tok; |
| struct RegOp Reg; |
| struct VectorListOp VectorList; |
| struct VectorIndexOp VectorIndex; |
| struct ImmOp Imm; |
| struct FPImmOp FPImm; |
| struct BarrierOp Barrier; |
| struct SysRegOp SysReg; |
| struct SysCRImmOp SysCRImm; |
| struct PrefetchOp Prefetch; |
| struct ShifterOp Shifter; |
| struct ExtendOp Extend; |
| struct MemOp Mem; |
| }; |
| |
| // Keep the MCContext around as the MCExprs may need manipulated during |
| // the add<>Operands() calls. |
| MCContext &Ctx; |
| |
| ARM64Operand(KindTy K, MCContext &_Ctx) |
| : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {} |
| |
| public: |
| ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { |
| Kind = o.Kind; |
| StartLoc = o.StartLoc; |
| EndLoc = o.EndLoc; |
| switch (Kind) { |
| case k_Token: |
| Tok = o.Tok; |
| break; |
| case k_Immediate: |
| Imm = o.Imm; |
| break; |
| case k_FPImm: |
| FPImm = o.FPImm; |
| break; |
| case k_Barrier: |
| Barrier = o.Barrier; |
| break; |
| case k_Register: |
| Reg = o.Reg; |
| break; |
| case k_VectorList: |
| VectorList = o.VectorList; |
| break; |
| case k_VectorIndex: |
| VectorIndex = o.VectorIndex; |
| break; |
| case k_SysReg: |
| SysReg = o.SysReg; |
| break; |
| case k_SysCR: |
| SysCRImm = o.SysCRImm; |
| break; |
| case k_Prefetch: |
| Prefetch = o.Prefetch; |
| break; |
| case k_Memory: |
| Mem = o.Mem; |
| break; |
| case k_Shifter: |
| Shifter = o.Shifter; |
| break; |
| case k_Extend: |
| Extend = o.Extend; |
| break; |
| } |
| } |
| |
| /// getStartLoc - Get the location of the first token of this operand. |
| SMLoc getStartLoc() const override { return StartLoc; } |
| /// getEndLoc - Get the location of the last token of this operand. |
| SMLoc getEndLoc() const override { return EndLoc; } |
| /// getOffsetLoc - Get the location of the offset of this memory operand. |
| SMLoc getOffsetLoc() const { return OffsetLoc; } |
| |
| StringRef getToken() const { |
| assert(Kind == k_Token && "Invalid access!"); |
| return StringRef(Tok.Data, Tok.Length); |
| } |
| |
| bool isTokenSuffix() const { |
| assert(Kind == k_Token && "Invalid access!"); |
| return Tok.IsSuffix; |
| } |
| |
| const MCExpr *getImm() const { |
| assert(Kind == k_Immediate && "Invalid access!"); |
| return Imm.Val; |
| } |
| |
| unsigned getFPImm() const { |
| assert(Kind == k_FPImm && "Invalid access!"); |
| return FPImm.Val; |
| } |
| |
| unsigned getBarrier() const { |
| assert(Kind == k_Barrier && "Invalid access!"); |
| return Barrier.Val; |
| } |
| |
| unsigned getReg() const override { |
| assert(Kind == k_Register && "Invalid access!"); |
| return Reg.RegNum; |
| } |
| |
| unsigned getVectorListStart() const { |
| assert(Kind == k_VectorList && "Invalid access!"); |
| return VectorList.RegNum; |
| } |
| |
| unsigned getVectorListCount() const { |
| assert(Kind == k_VectorList && "Invalid access!"); |
| return VectorList.Count; |
| } |
| |
| unsigned getVectorIndex() const { |
| assert(Kind == k_VectorIndex && "Invalid access!"); |
| return VectorIndex.Val; |
| } |
| |
| StringRef getSysReg() const { |
| assert(Kind == k_SysReg && "Invalid access!"); |
| return StringRef(SysReg.Data, SysReg.Length); |
| } |
| |
| unsigned getSysCR() const { |
| assert(Kind == k_SysCR && "Invalid access!"); |
| return SysCRImm.Val; |
| } |
| |
| unsigned getPrefetch() const { |
| assert(Kind == k_Prefetch && "Invalid access!"); |
| return Prefetch.Val; |
| } |
| |
| unsigned getShifter() const { |
| assert(Kind == k_Shifter && "Invalid access!"); |
| return Shifter.Val; |
| } |
| |
| unsigned getExtend() const { |
| assert(Kind == k_Extend && "Invalid access!"); |
| return Extend.Val; |
| } |
| |
| bool isImm() const override { return Kind == k_Immediate; } |
| bool isSImm9() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= -256 && Val < 256); |
| } |
| bool isSImm7s4() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= -256 && Val <= 252 && (Val & 3) == 0); |
| } |
| bool isSImm7s8() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= -512 && Val <= 504 && (Val & 7) == 0); |
| } |
| bool isSImm7s16() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0); |
| } |
| bool isImm0_7() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 0 && Val < 8); |
| } |
| bool isImm1_8() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val > 0 && Val < 9); |
| } |
| bool isImm0_15() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 0 && Val < 16); |
| } |
| bool isImm1_16() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val > 0 && Val < 17); |
| } |
| bool isImm0_31() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 0 && Val < 32); |
| } |
| bool isImm1_31() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 1 && Val < 32); |
| } |
| bool isImm1_32() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 1 && Val < 33); |
| } |
| bool isImm0_63() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 0 && Val < 64); |
| } |
| bool isImm1_63() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 1 && Val < 64); |
| } |
| bool isImm1_64() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 1 && Val < 65); |
| } |
| bool isImm0_127() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 0 && Val < 128); |
| } |
| bool isImm0_255() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 0 && Val < 256); |
| } |
| bool isImm0_65535() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| int64_t Val = MCE->getValue(); |
| return (Val >= 0 && Val < 65536); |
| } |
| bool isLogicalImm32() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32); |
| } |
| bool isLogicalImm64() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64); |
| } |
| bool isSIMDImmType10() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return false; |
| return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue()); |
| } |
| bool isBranchTarget26() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return true; |
| int64_t Val = MCE->getValue(); |
| if (Val & 0x3) |
| return false; |
| return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2)); |
| } |
| bool isPCRelLabel19() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return true; |
| int64_t Val = MCE->getValue(); |
| if (Val & 0x3) |
| return false; |
| return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2)); |
| } |
| bool isBranchTarget14() const { |
| if (!isImm()) |
| return false; |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| return true; |
| int64_t Val = MCE->getValue(); |
| if (Val & 0x3) |
| return false; |
| return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2)); |
| } |
| |
| bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const { |
| if (!isImm()) |
| return false; |
| |
| ARM64MCExpr::VariantKind ELFRefKind; |
| MCSymbolRefExpr::VariantKind DarwinRefKind; |
| int64_t Addend; |
| if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind, |
| Addend)) { |
| return false; |
| } |
| if (DarwinRefKind != MCSymbolRefExpr::VK_None) |
| return false; |
| |
| for (unsigned i = 0; i != AllowedModifiers.size(); ++i) { |
| if (ELFRefKind == AllowedModifiers[i]) |
| return Addend == 0; |
| } |
| |
| return false; |
| } |
| |
| bool isMovZSymbolG3() const { |
| static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isMovZSymbolG2() const { |
| static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2, |
| ARM64MCExpr::VK_ABS_G2_S, |
| ARM64MCExpr::VK_TPREL_G2, |
| ARM64MCExpr::VK_DTPREL_G2 }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isMovZSymbolG1() const { |
| static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1, |
| ARM64MCExpr::VK_ABS_G1_S, |
| ARM64MCExpr::VK_GOTTPREL_G1, |
| ARM64MCExpr::VK_TPREL_G1, |
| ARM64MCExpr::VK_DTPREL_G1, }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isMovZSymbolG0() const { |
| static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0, |
| ARM64MCExpr::VK_ABS_G0_S, |
| ARM64MCExpr::VK_TPREL_G0, |
| ARM64MCExpr::VK_DTPREL_G0 }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isMovKSymbolG3() const { |
| static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isMovKSymbolG2() const { |
| static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isMovKSymbolG1() const { |
| static ARM64MCExpr::VariantKind Variants[] = { |
| ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC, |
| ARM64MCExpr::VK_DTPREL_G1_NC |
| }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isMovKSymbolG0() const { |
| static ARM64MCExpr::VariantKind Variants[] = { |
| ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC, |
| ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC |
| }; |
| return isMovWSymbol(Variants); |
| } |
| |
| bool isFPImm() const { return Kind == k_FPImm; } |
| bool isBarrier() const { return Kind == k_Barrier; } |
| bool isSysReg() const { return Kind == k_SysReg; } |
| bool isMRSSystemRegister() const { |
| if (!isSysReg()) return false; |
| |
| bool IsKnownRegister; |
| ARM64SysReg::MRSMapper().fromString(getSysReg(), IsKnownRegister); |
| |
| return IsKnownRegister; |
| } |
| bool isMSRSystemRegister() const { |
| if (!isSysReg()) return false; |
| |
| bool IsKnownRegister; |
| ARM64SysReg::MSRMapper().fromString(getSysReg(), IsKnownRegister); |
| |
| return IsKnownRegister; |
| } |
| bool isSystemPStateField() const { |
| if (!isSysReg()) return false; |
| |
| bool IsKnownRegister; |
| ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister); |
| |
| return IsKnownRegister; |
| } |
| bool isReg() const override { return Kind == k_Register && !Reg.isVector; } |
| bool isVectorReg() const { return Kind == k_Register && Reg.isVector; } |
| bool isVectorRegLo() const { |
| return Kind == k_Register && Reg.isVector && |
| ARM64MCRegisterClasses[ARM64::FPR128_loRegClassID].contains(Reg.RegNum); |
| } |
| |
| /// Is this a vector list with the type implicit (presumably attached to the |
| /// instruction itself)? |
| template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const { |
| return Kind == k_VectorList && VectorList.Count == NumRegs && |
| !VectorList.ElementKind; |
| } |
| |
| template <unsigned NumRegs, unsigned NumElements, char ElementKind> |
| bool isTypedVectorList() const { |
| if (Kind != k_VectorList) |
| return false; |
| if (VectorList.Count != NumRegs) |
| return false; |
| if (VectorList.ElementKind != ElementKind) |
| return false; |
| return VectorList.NumElements == NumElements; |
| } |
| |
| bool isVectorIndexB() const { |
| return Kind == k_VectorIndex && VectorIndex.Val < 16; |
| } |
| bool isVectorIndexH() const { |
| return Kind == k_VectorIndex && VectorIndex.Val < 8; |
| } |
| bool isVectorIndexS() const { |
| return Kind == k_VectorIndex && VectorIndex.Val < 4; |
| } |
| bool isVectorIndexD() const { |
| return Kind == k_VectorIndex && VectorIndex.Val < 2; |
| } |
| bool isToken() const override { return Kind == k_Token; } |
| bool isTokenEqual(StringRef Str) const { |
| return Kind == k_Token && getToken() == Str; |
| } |
| bool isMem() const override { return Kind == k_Memory; } |
| bool isSysCR() const { return Kind == k_SysCR; } |
| bool isPrefetch() const { return Kind == k_Prefetch; } |
| bool isShifter() const { return Kind == k_Shifter; } |
| bool isExtend() const { |
| // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand. |
| if (isShifter()) { |
| ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val); |
| return ST == ARM64_AM::LSL; |
| } |
| return Kind == k_Extend; |
| } |
| bool isExtend64() const { |
| if (Kind != k_Extend) |
| return false; |
| // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class). |
| ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val); |
| return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX; |
| } |
| bool isExtendLSL64() const { |
| // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand. |
| if (isShifter()) { |
| ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val); |
| return ST == ARM64_AM::LSL; |
| } |
| if (Kind != k_Extend) |
| return false; |
| ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val); |
| return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX; |
| } |
| |
| bool isArithmeticShifter() const { |
| if (!isShifter()) |
| return false; |
| |
| // An arithmetic shifter is LSL, LSR, or ASR. |
| ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val); |
| return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR; |
| } |
| |
| bool isMovImm32Shifter() const { |
| if (!isShifter()) |
| return false; |
| |
| // A MOVi shifter is LSL of 0, 16, 32, or 48. |
| ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val); |
| if (ST != ARM64_AM::LSL) |
| return false; |
| uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val); |
| return (Val == 0 || Val == 16); |
| } |
| |
| bool isMovImm64Shifter() const { |
| if (!isShifter()) |
| return false; |
| |
| // A MOVi shifter is LSL of 0 or 16. |
| ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val); |
| if (ST != ARM64_AM::LSL) |
| return false; |
| uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val); |
| return (Val == 0 || Val == 16 || Val == 32 || Val == 48); |
| } |
| |
| bool isAddSubShifter() const { |
| if (!isShifter()) |
| return false; |
| |
| // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. |
| unsigned Val = Shifter.Val; |
| return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL && |
| (ARM64_AM::getShiftValue(Val) == 0 || |
| ARM64_AM::getShiftValue(Val) == 12); |
| } |
| |
| bool isLogicalVecShifter() const { |
| if (!isShifter()) |
| return false; |
| |
| // A logical vector shifter is a left shift by 0, 8, 16, or 24. |
| unsigned Val = Shifter.Val; |
| unsigned Shift = ARM64_AM::getShiftValue(Val); |
| return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL && |
| (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); |
| } |
| |
| bool isLogicalVecHalfWordShifter() const { |
| if (!isLogicalVecShifter()) |
| return false; |
| |
| // A logical vector shifter is a left shift by 0 or 8. |
| unsigned Val = Shifter.Val; |
| unsigned Shift = ARM64_AM::getShiftValue(Val); |
| return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL && |
| (Shift == 0 || Shift == 8); |
| } |
| |
| bool isMoveVecShifter() const { |
| if (!isShifter()) |
| return false; |
| |
| // A logical vector shifter is a left shift by 8 or 16. |
| unsigned Val = Shifter.Val; |
| unsigned Shift = ARM64_AM::getShiftValue(Val); |
| return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL && |
| (Shift == 8 || Shift == 16); |
| } |
| |
| bool isMemoryRegisterOffset8() const { |
| return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0; |
| } |
| |
| bool isMemoryRegisterOffset16() const { |
| return isMem() && Mem.Mode == RegisterOffset && |
| (Mem.ShiftVal == 0 || Mem.ShiftVal == 1); |
| } |
| |
| bool isMemoryRegisterOffset32() const { |
| return isMem() && Mem.Mode == RegisterOffset && |
| (Mem.ShiftVal == 0 || Mem.ShiftVal == 2); |
| } |
| |
| bool isMemoryRegisterOffset64() const { |
| return isMem() && Mem.Mode == RegisterOffset && |
| (Mem.ShiftVal == 0 || Mem.ShiftVal == 3); |
| } |
| |
| bool isMemoryRegisterOffset128() const { |
| return isMem() && Mem.Mode == RegisterOffset && |
| (Mem.ShiftVal == 0 || Mem.ShiftVal == 4); |
| } |
| |
| bool isMemoryUnscaled() const { |
| if (!isMem()) |
| return false; |
| if (Mem.Mode != ImmediateOffset) |
| return false; |
| if (!Mem.OffsetImm) |
| return true; |
| // Make sure the immediate value is valid. |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| if (!CE) |
| return false; |
| // The offset must fit in a signed 9-bit unscaled immediate. |
| int64_t Value = CE->getValue(); |
| return (Value >= -256 && Value < 256); |
| } |
| // Fallback unscaled operands are for aliases of LDR/STR that fall back |
| // to LDUR/STUR when the offset is not legal for the former but is for |
| // the latter. As such, in addition to checking for being a legal unscaled |
| // address, also check that it is not a legal scaled address. This avoids |
| // ambiguity in the matcher. |
| bool isMemoryUnscaledFB8() const { |
| return isMemoryUnscaled() && !isMemoryIndexed8(); |
| } |
| bool isMemoryUnscaledFB16() const { |
| return isMemoryUnscaled() && !isMemoryIndexed16(); |
| } |
| bool isMemoryUnscaledFB32() const { |
| return isMemoryUnscaled() && !isMemoryIndexed32(); |
| } |
| bool isMemoryUnscaledFB64() const { |
| return isMemoryUnscaled() && !isMemoryIndexed64(); |
| } |
| bool isMemoryUnscaledFB128() const { |
| return isMemoryUnscaled() && !isMemoryIndexed128(); |
| } |
| bool isMemoryIndexed(unsigned Scale) const { |
| if (!isMem()) |
| return false; |
| if (Mem.Mode != ImmediateOffset) |
| return false; |
| if (!Mem.OffsetImm) |
| return true; |
| // Make sure the immediate value is valid. |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| |
| if (CE) { |
| // The offset must be a positive multiple of the scale and in range of |
| // encoding with a 12-bit immediate. |
| int64_t Value = CE->getValue(); |
| return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale)); |
| } |
| |
| // If it's not a constant, check for some expressions we know. |
| const MCExpr *Expr = Mem.OffsetImm; |
| ARM64MCExpr::VariantKind ELFRefKind; |
| MCSymbolRefExpr::VariantKind DarwinRefKind; |
| int64_t Addend; |
| if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, |
| Addend)) { |
| // If we don't understand the expression, assume the best and |
| // let the fixup and relocation code deal with it. |
| return true; |
| } |
| |
| if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || |
| ELFRefKind == ARM64MCExpr::VK_LO12 || |
| ELFRefKind == ARM64MCExpr::VK_GOT_LO12 || |
| ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 || |
| ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC || |
| ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 || |
| ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC || |
| ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC || |
| ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) { |
| // Note that we don't range-check the addend. It's adjusted modulo page |
| // size when converted, so there is no "out of range" condition when using |
| // @pageoff. |
| return Addend >= 0 && (Addend % Scale) == 0; |
| } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || |
| DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { |
| // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. |
| return Addend == 0; |
| } |
| |
| return false; |
| } |
| bool isMemoryIndexed128() const { return isMemoryIndexed(16); } |
| bool isMemoryIndexed64() const { return isMemoryIndexed(8); } |
| bool isMemoryIndexed32() const { return isMemoryIndexed(4); } |
| bool isMemoryIndexed16() const { return isMemoryIndexed(2); } |
| bool isMemoryIndexed8() const { return isMemoryIndexed(1); } |
| bool isMemoryNoIndex() const { |
| if (!isMem()) |
| return false; |
| if (Mem.Mode != ImmediateOffset) |
| return false; |
| if (!Mem.OffsetImm) |
| return true; |
| |
| // Make sure the immediate value is valid. Only zero is allowed. |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| if (!CE || CE->getValue() != 0) |
| return false; |
| return true; |
| } |
| bool isMemorySIMDNoIndex() const { |
| if (!isMem()) |
| return false; |
| if (Mem.Mode != ImmediateOffset) |
| return false; |
| return Mem.OffsetImm == nullptr; |
| } |
| bool isMemoryIndexedSImm9() const { |
| if (!isMem() || Mem.Mode != ImmediateOffset) |
| return false; |
| if (!Mem.OffsetImm) |
| return true; |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| assert(CE && "Non-constant pre-indexed offset!"); |
| int64_t Value = CE->getValue(); |
| return Value >= -256 && Value <= 255; |
| } |
| bool isMemoryIndexed32SImm7() const { |
| if (!isMem() || Mem.Mode != ImmediateOffset) |
| return false; |
| if (!Mem.OffsetImm) |
| return true; |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| assert(CE && "Non-constant pre-indexed offset!"); |
| int64_t Value = CE->getValue(); |
| return ((Value % 4) == 0) && Value >= -256 && Value <= 252; |
| } |
| bool isMemoryIndexed64SImm7() const { |
| if (!isMem() || Mem.Mode != ImmediateOffset) |
| return false; |
| if (!Mem.OffsetImm) |
| return true; |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| assert(CE && "Non-constant pre-indexed offset!"); |
| int64_t Value = CE->getValue(); |
| return ((Value % 8) == 0) && Value >= -512 && Value <= 504; |
| } |
| bool isMemoryIndexed128SImm7() const { |
| if (!isMem() || Mem.Mode != ImmediateOffset) |
| return false; |
| if (!Mem.OffsetImm) |
| return true; |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| assert(CE && "Non-constant pre-indexed offset!"); |
| int64_t Value = CE->getValue(); |
| return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008; |
| } |
| |
| bool isAdrpLabel() const { |
| // Validation was handled during parsing, so we just sanity check that |
| // something didn't go haywire. |
| if (!isImm()) |
| return false; |
| |
| if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
| int64_t Val = CE->getValue(); |
| int64_t Min = - (4096 * (1LL << (21 - 1))); |
| int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); |
| return (Val % 4096) == 0 && Val >= Min && Val <= Max; |
| } |
| |
| return true; |
| } |
| |
| bool isAdrLabel() const { |
| // Validation was handled during parsing, so we just sanity check that |
| // something didn't go haywire. |
| if (!isImm()) |
| return false; |
| |
| if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
| int64_t Val = CE->getValue(); |
| int64_t Min = - (1LL << (21 - 1)); |
| int64_t Max = ((1LL << (21 - 1)) - 1); |
| return Val >= Min && Val <= Max; |
| } |
| |
| return true; |
| } |
| |
| void addExpr(MCInst &Inst, const MCExpr *Expr) const { |
| // Add as immediates when possible. Null MCExpr = 0. |
| if (!Expr) |
| Inst.addOperand(MCOperand::CreateImm(0)); |
| else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) |
| Inst.addOperand(MCOperand::CreateImm(CE->getValue())); |
| else |
| Inst.addOperand(MCOperand::CreateExpr(Expr)); |
| } |
| |
| void addRegOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateReg(getReg())); |
| } |
| |
| void addVectorRegOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateReg(getReg())); |
| } |
| |
| void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateReg(getReg())); |
| } |
| |
| template <unsigned NumRegs> |
| void addVectorList64Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1, |
| ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 }; |
| unsigned FirstReg = FirstRegs[NumRegs - 1]; |
| |
| Inst.addOperand( |
| MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0)); |
| } |
| |
| template <unsigned NumRegs> |
| void addVectorList128Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1, |
| ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 }; |
| unsigned FirstReg = FirstRegs[NumRegs - 1]; |
| |
| Inst.addOperand( |
| MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0)); |
| } |
| |
| void addVectorIndexBOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); |
| } |
| |
| void addVectorIndexHOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); |
| } |
| |
| void addVectorIndexSOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); |
| } |
| |
| void addVectorIndexDOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); |
| } |
| |
| void addImmOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| // If this is a pageoff symrefexpr with an addend, adjust the addend |
| // to be only the page-offset portion. Otherwise, just add the expr |
| // as-is. |
| addExpr(Inst, getImm()); |
| } |
| |
| void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) |
| addExpr(Inst, getImm()); |
| else |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12)); |
| } |
| |
| void addAdrLabelOperands(MCInst &Inst, unsigned N) const { |
| addImmOperands(Inst, N); |
| } |
| |
| void addSImm9Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addSImm7s4Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4)); |
| } |
| |
| void addSImm7s8Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8)); |
| } |
| |
| void addSImm7s16Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16)); |
| } |
| |
| void addImm0_7Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm1_8Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm0_15Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm1_16Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm0_31Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm1_31Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm1_32Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm0_63Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm1_63Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm1_64Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm0_127Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm0_255Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addImm0_65535Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue())); |
| } |
| |
| void addLogicalImm32Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid logical immediate operand!"); |
| uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32); |
| Inst.addOperand(MCOperand::CreateImm(encoding)); |
| } |
| |
| void addLogicalImm64Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid logical immediate operand!"); |
| uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64); |
| Inst.addOperand(MCOperand::CreateImm(encoding)); |
| } |
| |
| void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| assert(MCE && "Invalid immediate operand!"); |
| uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); |
| Inst.addOperand(MCOperand::CreateImm(encoding)); |
| } |
| |
| void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { |
| // Branch operands don't encode the low bits, so shift them off |
| // here. If it's a label, however, just put it on directly as there's |
| // not enough information now to do anything. |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) { |
| addExpr(Inst, getImm()); |
| return; |
| } |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2)); |
| } |
| |
| void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { |
| // Branch operands don't encode the low bits, so shift them off |
| // here. If it's a label, however, just put it on directly as there's |
| // not enough information now to do anything. |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) { |
| addExpr(Inst, getImm()); |
| return; |
| } |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2)); |
| } |
| |
| void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { |
| // Branch operands don't encode the low bits, so shift them off |
| // here. If it's a label, however, just put it on directly as there's |
| // not enough information now to do anything. |
| assert(N == 1 && "Invalid number of operands!"); |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
| if (!MCE) { |
| addExpr(Inst, getImm()); |
| return; |
| } |
| assert(MCE && "Invalid constant immediate operand!"); |
| Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2)); |
| } |
| |
| void addFPImmOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getFPImm())); |
| } |
| |
| void addBarrierOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getBarrier())); |
| } |
| |
| void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| |
| bool Valid; |
| uint32_t Bits = ARM64SysReg::MRSMapper().fromString(getSysReg(), Valid); |
| |
| Inst.addOperand(MCOperand::CreateImm(Bits)); |
| } |
| |
| void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| |
| bool Valid; |
| uint32_t Bits = ARM64SysReg::MSRMapper().fromString(getSysReg(), Valid); |
| |
| Inst.addOperand(MCOperand::CreateImm(Bits)); |
| } |
| |
| void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| |
| bool Valid; |
| uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid); |
| |
| Inst.addOperand(MCOperand::CreateImm(Bits)); |
| } |
| |
| void addSysCROperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getSysCR())); |
| } |
| |
| void addPrefetchOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getPrefetch())); |
| } |
| |
| void addShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addAddSubShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getShifter())); |
| } |
| |
| void addExtendOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand. |
| if (isShifter()) { |
| assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL); |
| unsigned imm = getArithExtendImm(ARM64_AM::UXTW, |
| ARM64_AM::getShiftValue(getShifter())); |
| Inst.addOperand(MCOperand::CreateImm(imm)); |
| } else |
| Inst.addOperand(MCOperand::CreateImm(getExtend())); |
| } |
| |
| void addExtend64Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| Inst.addOperand(MCOperand::CreateImm(getExtend())); |
| } |
| |
| void addExtendLSL64Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && "Invalid number of operands!"); |
| // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand. |
| if (isShifter()) { |
| assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL); |
| unsigned imm = getArithExtendImm(ARM64_AM::UXTX, |
| ARM64_AM::getShiftValue(getShifter())); |
| Inst.addOperand(MCOperand::CreateImm(imm)); |
| } else |
| Inst.addOperand(MCOperand::CreateImm(getExtend())); |
| } |
| |
| void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) { |
| assert(N == 3 && "Invalid number of operands!"); |
| |
| Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); |
| Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum))); |
| unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift); |
| Inst.addOperand(MCOperand::CreateImm(ExtendImm)); |
| } |
| |
| void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) { |
| addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift); |
| } |
| |
| void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) { |
| addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1); |
| } |
| |
| void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) { |
| addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2); |
| } |
| |
| void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) { |
| addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3); |
| } |
| |
| void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) { |
| addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4); |
| } |
| |
| void addMemoryIndexedOperands(MCInst &Inst, unsigned N, |
| unsigned Scale) const { |
| // Add the base register operand. |
| Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); |
| |
| if (!Mem.OffsetImm) { |
| // There isn't an offset. |
| Inst.addOperand(MCOperand::CreateImm(0)); |
| return; |
| } |
| |
| // Add the offset operand. |
| if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) { |
| assert(CE->getValue() % Scale == 0 && |
| "Offset operand must be multiple of the scale!"); |
| |
| // The MCInst offset operand doesn't include the low bits (like the |
| // instruction encoding). |
| Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale)); |
| } |
| |
| // If this is a pageoff symrefexpr with an addend, the linker will |
| // do the scaling of the addend. |
| // |
| // Otherwise we don't know what this is, so just add the scaling divide to |
| // the expression and let the MC fixup evaluation code deal with it. |
| const MCExpr *Expr = Mem.OffsetImm; |
| ARM64MCExpr::VariantKind ELFRefKind; |
| MCSymbolRefExpr::VariantKind DarwinRefKind; |
| int64_t Addend; |
| if (Scale > 1 && |
| (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, |
| Addend) || |
| (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) { |
| Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx), |
| Ctx); |
| } |
| |
| Inst.addOperand(MCOperand::CreateExpr(Expr)); |
| } |
| |
| void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!"); |
| // Add the base register operand. |
| Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); |
| |
| // Add the offset operand. |
| if (!Mem.OffsetImm) |
| Inst.addOperand(MCOperand::CreateImm(0)); |
| else { |
| // Only constant offsets supported. |
| const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm); |
| Inst.addOperand(MCOperand::CreateImm(CE->getValue())); |
| } |
| } |
| |
| void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!"); |
| addMemoryIndexedOperands(Inst, N, 16); |
| } |
| |
| void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!"); |
| addMemoryIndexedOperands(Inst, N, 8); |
| } |
| |
| void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!"); |
| addMemoryIndexedOperands(Inst, N, 4); |
| } |
| |
| void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!"); |
| addMemoryIndexedOperands(Inst, N, 2); |
| } |
| |
| void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const { |
| assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!"); |
| addMemoryIndexedOperands(Inst, N, 1); |
| } |
| |
| void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!"); |
| // Add the base register operand (the offset is always zero, so ignore it). |
| Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); |
| } |
| |
| void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const { |
| assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!"); |
| // Add the base register operand (the offset is always zero, so ignore it). |
| Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); |
| } |
| |
| void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N, |
| unsigned Scale) const { |
| assert(N == 2 && "Invalid number of operands!"); |
| |
| // Add the base register operand. |
| Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum)); |
| |
| // Add the offset operand. |
| int64_t Offset = 0; |
| if (Mem.OffsetImm) { |
| const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm); |
| assert(CE && "Non-constant indexed offset operand!"); |
| Offset = CE->getValue(); |
| } |
| |
| if (Scale != 1) { |
| assert(Offset % Scale == 0 && |
| "Offset operand must be a multiple of the scale!"); |
| Offset /= Scale; |
| } |
| |
| Inst.addOperand(MCOperand::CreateImm(Offset)); |
| } |
| |
| void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const { |
| addMemoryWritebackIndexedOperands(Inst, N, 1); |
| } |
| |
| void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const { |
| addMemoryWritebackIndexedOperands(Inst, N, 4); |
| } |
| |
| void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const { |
| addMemoryWritebackIndexedOperands(Inst, N, 8); |
| } |
| |
| void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const { |
| addMemoryWritebackIndexedOperands(Inst, N, 16); |
| } |
| |
| void print(raw_ostream &OS) const override; |
| |
| static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S, |
| MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Token, Ctx); |
| Op->Tok.Data = Str.data(); |
| Op->Tok.Length = Str.size(); |
| Op->Tok.IsSuffix = IsSuffix; |
| Op->StartLoc = S; |
| Op->EndLoc = S; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S, |
| SMLoc E, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Register, Ctx); |
| Op->Reg.RegNum = RegNum; |
| Op->Reg.isVector = isVector; |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count, |
| unsigned NumElements, char ElementKind, |
| SMLoc S, SMLoc E, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx); |
| Op->VectorList.RegNum = RegNum; |
| Op->VectorList.Count = Count; |
| Op->VectorList.NumElements = NumElements; |
| Op->VectorList.ElementKind = ElementKind; |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, |
| MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx); |
| Op->VectorIndex.Val = Idx; |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E, |
| MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx); |
| Op->Imm.Val = Val; |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx); |
| Op->FPImm.Val = Val; |
| Op->StartLoc = S; |
| Op->EndLoc = S; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx); |
| Op->Barrier.Val = Val; |
| Op->StartLoc = S; |
| Op->EndLoc = S; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx); |
| Op->SysReg.Data = Str.data(); |
| Op->SysReg.Length = Str.size(); |
| Op->StartLoc = S; |
| Op->EndLoc = S; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off, |
| SMLoc S, SMLoc E, SMLoc OffsetLoc, |
| MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx); |
| Op->Mem.BaseRegNum = BaseRegNum; |
| Op->Mem.OffsetRegNum = 0; |
| Op->Mem.OffsetImm = Off; |
| Op->Mem.ExtType = ARM64_AM::UXTX; |
| Op->Mem.ShiftVal = 0; |
| Op->Mem.ExplicitShift = false; |
| Op->Mem.Mode = ImmediateOffset; |
| Op->OffsetLoc = OffsetLoc; |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg, |
| ARM64_AM::ExtendType ExtType, |
| unsigned ShiftVal, bool ExplicitShift, |
| SMLoc S, SMLoc E, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx); |
| Op->Mem.BaseRegNum = BaseReg; |
| Op->Mem.OffsetRegNum = OffsetReg; |
| Op->Mem.OffsetImm = nullptr; |
| Op->Mem.ExtType = ExtType; |
| Op->Mem.ShiftVal = ShiftVal; |
| Op->Mem.ExplicitShift = ExplicitShift; |
| Op->Mem.Mode = RegisterOffset; |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E, |
| MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx); |
| Op->SysCRImm.Val = Val; |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx); |
| Op->Prefetch.Val = Val; |
| Op->StartLoc = S; |
| Op->EndLoc = S; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val, |
| SMLoc S, SMLoc E, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx); |
| Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val); |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| |
| static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val, |
| SMLoc S, SMLoc E, MCContext &Ctx) { |
| ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx); |
| Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val); |
| Op->StartLoc = S; |
| Op->EndLoc = E; |
| return Op; |
| } |
| }; |
| |
| } // end anonymous namespace. |
| |
| void ARM64Operand::print(raw_ostream &OS) const { |
| switch (Kind) { |
| case k_FPImm: |
| OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm()) |
| << ") >"; |
| break; |
| case k_Barrier: { |
| bool Valid; |
| StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid); |
| if (Valid) |
| OS << "<barrier " << Name << ">"; |
| else |
| OS << "<barrier invalid #" << getBarrier() << ">"; |
| break; |
| } |
| case k_Immediate: |
| getImm()->print(OS); |
| break; |
| case k_Memory: |
| OS << "<memory>"; |
| break; |
| case k_Register: |
| OS << "<register " << getReg() << ">"; |
| break; |
| case k_VectorList: { |
| OS << "<vectorlist "; |
| unsigned Reg = getVectorListStart(); |
| for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) |
| OS << Reg + i << " "; |
| OS << ">"; |
| break; |
| } |
| case k_VectorIndex: |
| OS << "<vectorindex " << getVectorIndex() << ">"; |
| break; |
| case k_SysReg: |
| OS << "<sysreg: " << getSysReg() << '>'; |
| break; |
| case k_Token: |
| OS << "'" << getToken() << "'"; |
| break; |
| case k_SysCR: |
| OS << "c" << getSysCR(); |
| break; |
| case k_Prefetch: { |
| bool Valid; |
| StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid); |
| if (Valid) |
| OS << "<prfop " << Name << ">"; |
| else |
| OS << "<prfop invalid #" << getPrefetch() << ">"; |
| break; |
| } |
| case k_Shifter: { |
| unsigned Val = getShifter(); |
| OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #" |
| << ARM64_AM::getShiftValue(Val) << ">"; |
| break; |
| } |
| case k_Extend: { |
| unsigned Val = getExtend(); |
| OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val)) |
| << " #" << ARM64_AM::getArithShiftValue(Val) << ">"; |
| break; |
| } |
| } |
| } |
| |
| /// @name Auto-generated Match Functions |
| /// { |
| |
| static unsigned MatchRegisterName(StringRef Name); |
| |
| /// } |
| |
| static unsigned matchVectorRegName(StringRef Name) { |
| return StringSwitch<unsigned>(Name) |
| .Case("v0", ARM64::Q0) |
| .Case("v1", ARM64::Q1) |
| .Case("v2", ARM64::Q2) |
| .Case("v3", ARM64::Q3) |
| .Case("v4", ARM64::Q4) |
| .Case("v5", ARM64::Q5) |
| .Case("v6", ARM64::Q6) |
| .Case("v7", ARM64::Q7) |
| .Case("v8", ARM64::Q8) |
| .Case("v9", ARM64::Q9) |
| .Case("v10", ARM64::Q10) |
| .Case("v11", ARM64::Q11) |
| .Case("v12", ARM64::Q12) |
| .Case("v13", ARM64::Q13) |
| .Case("v14", ARM64::Q14) |
| .Case("v15", ARM64::Q15) |
| .Case("v16", ARM64::Q16) |
| .Case("v17", ARM64::Q17) |
| .Case("v18", ARM64::Q18) |
| .Case("v19", ARM64::Q19) |
| .Case("v20", ARM64::Q20) |
| .Case("v21", ARM64::Q21) |
| .Case("v22", ARM64::Q22) |
| .Case("v23", ARM64::Q23) |
| .Case("v24", ARM64::Q24) |
| .Case("v25", ARM64::Q25) |
| .Case("v26", ARM64::Q26) |
| .Case("v27", ARM64::Q27) |
| .Case("v28", ARM64::Q28) |
| .Case("v29", ARM64::Q29) |
| .Case("v30", ARM64::Q30) |
| .Case("v31", ARM64::Q31) |
| .Default(0); |
| } |
| |
| static bool isValidVectorKind(StringRef Name) { |
| return StringSwitch<bool>(Name.lower()) |
| .Case(".8b", true) |
| .Case(".16b", true) |
| .Case(".4h", true) |
| .Case(".8h", true) |
| .Case(".2s", true) |
| .Case(".4s", true) |
| .Case(".1d", true) |
| .Case(".2d", true) |
| .Case(".1q", true) |
| // Accept the width neutral ones, too, for verbose syntax. If those |
| // aren't used in the right places, the token operand won't match so |
| // all will work out. |
| .Case(".b", true) |
| .Case(".h", true) |
| .Case(".s", true) |
| .Case(".d", true) |
| .Default(false); |
| } |
| |
| static void parseValidVectorKind(StringRef Name, unsigned &NumElements, |
| char &ElementKind) { |
| assert(isValidVectorKind(Name)); |
| |
| ElementKind = Name.lower()[Name.size() - 1]; |
| NumElements = 0; |
| |
| if (Name.size() == 2) |
| return; |
| |
| // Parse the lane count |
| Name = Name.drop_front(); |
| while (isdigit(Name.front())) { |
| NumElements = 10 * NumElements + (Name.front() - '0'); |
| Name = Name.drop_front(); |
| } |
| } |
| |
| bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, |
| SMLoc &EndLoc) { |
| StartLoc = getLoc(); |
| RegNo = tryParseRegister(); |
| EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
| return (RegNo == (unsigned)-1); |
| } |
| |
| /// tryParseRegister - Try to parse a register name. The token must be an |
| /// Identifier when called, and if it is a register name the token is eaten and |
| /// the register is added to the operand list. |
| int ARM64AsmParser::tryParseRegister() { |
| const AsmToken &Tok = Parser.getTok(); |
| assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); |
| |
| std::string lowerCase = Tok.getString().lower(); |
| unsigned RegNum = MatchRegisterName(lowerCase); |
| // Also handle a few aliases of registers. |
| if (RegNum == 0) |
| RegNum = StringSwitch<unsigned>(lowerCase) |
| .Case("fp", ARM64::FP) |
| .Case("lr", ARM64::LR) |
| .Case("x31", ARM64::XZR) |
| .Case("w31", ARM64::WZR) |
| .Default(0); |
| |
| if (RegNum == 0) |
| return -1; |
| |
| Parser.Lex(); // Eat identifier token. |
| return RegNum; |
| } |
| |
| /// tryMatchVectorRegister - Try to parse a vector register name with optional |
| /// kind specifier. If it is a register specifier, eat the token and return it. |
| int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) { |
| if (Parser.getTok().isNot(AsmToken::Identifier)) { |
| TokError("vector register expected"); |
| return -1; |
| } |
| |
| StringRef Name = Parser.getTok().getString(); |
| // If there is a kind specifier, it's separated from the register name by |
| // a '.'. |
| size_t Start = 0, Next = Name.find('.'); |
| StringRef Head = Name.slice(Start, Next); |
| unsigned RegNum = matchVectorRegName(Head); |
| if (RegNum) { |
| if (Next != StringRef::npos) { |
| Kind = Name.slice(Next, StringRef::npos); |
| if (!isValidVectorKind(Kind)) { |
| TokError("invalid vector kind qualifier"); |
| return -1; |
| } |
| } |
| Parser.Lex(); // Eat the register token. |
| return RegNum; |
| } |
| |
| if (expected) |
| TokError("vector register expected"); |
| return -1; |
| } |
| |
| static int MatchSysCRName(StringRef Name) { |
| // Use the same layout as the tablegen'erated register name matcher. Ugly, |
| // but efficient. |
| switch (Name.size()) { |
| default: |
| break; |
| case 2: |
| if (Name[0] != 'c' && Name[0] != 'C') |
| return -1; |
| switch (Name[1]) { |
| default: |
| return -1; |
| case '0': |
| return 0; |
| case '1': |
| return 1; |
| case '2': |
| return 2; |
| case '3': |
| return 3; |
| case '4': |
| return 4; |
| case '5': |
| return 5; |
| case '6': |
| return 6; |
| case '7': |
| return 7; |
| case '8': |
| return 8; |
| case '9': |
| return 9; |
| } |
| break; |
| case 3: |
| if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1') |
| return -1; |
| switch (Name[2]) { |
| default: |
| return -1; |
| case '0': |
| return 10; |
| case '1': |
| return 11; |
| case '2': |
| return 12; |
| case '3': |
| return 13; |
| case '4': |
| return 14; |
| case '5': |
| return 15; |
| } |
| break; |
| } |
| |
| llvm_unreachable("Unhandled SysCR operand string!"); |
| return -1; |
| } |
| |
| /// tryParseSysCROperand - Try to parse a system instruction CR operand name. |
| ARM64AsmParser::OperandMatchResultTy |
| ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) { |
| SMLoc S = getLoc(); |
| const AsmToken &Tok = Parser.getTok(); |
| if (Tok.isNot(AsmToken::Identifier)) |
| return MatchOperand_NoMatch; |
| |
| int Num = MatchSysCRName(Tok.getString()); |
| if (Num == -1) |
| return MatchOperand_NoMatch; |
| |
| Parser.Lex(); // Eat identifier token. |
| Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext())); |
| return MatchOperand_Success; |
| } |
| |
| /// tryParsePrefetch - Try to parse a prefetch operand. |
| ARM64AsmParser::OperandMatchResultTy |
| ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) { |
| SMLoc S = getLoc(); |
| const AsmToken &Tok = Parser.getTok(); |
| // Either an identifier for named values or a 5-bit immediate. |
| bool Hash = Tok.is(AsmToken::Hash); |
| if (Hash || Tok.is(AsmToken::Integer)) { |
| if (Hash) |
| Parser.Lex(); // Eat hash token. |
| const MCExpr *ImmVal; |
| if (getParser().parseExpression(ImmVal)) |
| return MatchOperand_ParseFail; |
| |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
| if (!MCE) { |
| TokError("immediate value expected for prefetch operand"); |
| return MatchOperand_ParseFail; |
| } |
| unsigned prfop = MCE->getValue(); |
| if (prfop > 31) { |
| TokError("prefetch operand out of range, [0,31] expected"); |
| return MatchOperand_ParseFail; |
| } |
| |
| Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext())); |
| return MatchOperand_Success; |
| } |
| |
| if (Tok.isNot(AsmToken::Identifier)) { |
| TokError("pre-fetch hint expected"); |
| return MatchOperand_ParseFail; |
| } |
| |
| bool Valid; |
| unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid); |
| if (!Valid) { |
| TokError("pre-fetch hint expected"); |
| return MatchOperand_ParseFail; |
| } |
| |
| Parser.Lex(); // Eat identifier token. |
| Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext())); |
| return MatchOperand_Success; |
| } |
| |
| /// tryParseAdrpLabel - Parse and validate a source label for the ADRP |
| /// instruction. |
| ARM64AsmParser::OperandMatchResultTy |
| ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { |
| SMLoc S = getLoc(); |
| const MCExpr *Expr; |
| |
| if (Parser.getTok().is(AsmToken::Hash)) { |
| Parser.Lex(); // Eat hash token. |
| } |
| |
| if (parseSymbolicImmVal(Expr)) |
| return MatchOperand_ParseFail; |
| |
| ARM64MCExpr::VariantKind ELFRefKind; |
| MCSymbolRefExpr::VariantKind DarwinRefKind; |
| int64_t Addend; |
| if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
| if (DarwinRefKind == MCSymbolRefExpr::VK_None && |
| ELFRefKind == ARM64MCExpr::VK_INVALID) { |
| // No modifier was specified at all; this is the syntax for an ELF basic |
| // ADRP relocation (unfortunately). |
| Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext()); |
| } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || |
| DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && |
| Addend != 0) { |
| Error(S, "gotpage label reference not allowed an addend"); |
| return MatchOperand_ParseFail; |
| } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && |
| DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && |
| DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && |
| ELFRefKind != ARM64MCExpr::VK_GOT_PAGE && |
| ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE && |
| ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) { |
| // The operand must be an @page or @gotpage qualified symbolref. |
| Error(S, "page or gotpage label reference expected"); |
| return MatchOperand_ParseFail; |
| } |
| } |
| |
| // We have either a label reference possibly with addend or an immediate. The |
| // addend is a raw value here. The linker will adjust it to only reference the |
| // page. |
| SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
| Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext())); |
| |
| return MatchOperand_Success; |
| } |
| |
| /// tryParseAdrLabel - Parse and validate a source label for the ADR |
| /// instruction. |
| ARM64AsmParser::OperandMatchResultTy |
| ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) { |
| SMLoc S = getLoc(); |
| const MCExpr *Expr; |
| |
| if (Parser.getTok().is(AsmToken::Hash)) { |
| Parser.Lex(); // Eat hash token. |
| } |
| |
| if (getParser().parseExpression(Expr)) |
| return MatchOperand_ParseFail; |
| |
| SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
| Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext())); |
| |
| return MatchOperand_Success; |
| } |
| |
| /// tryParseFPImm - A floating point immediate expression operand. |
| ARM64AsmParser::OperandMatchResultTy |
| ARM64AsmParser::tryParseFPImm(OperandVector &Operands) { |
| SMLoc S = getLoc(); |
| |
| bool Hash = false; |
| if (Parser.getTok().is(AsmToken::Hash)) { |
| Parser.Lex(); // Eat '#' |
| Hash = true; |
| } |
| |
| // Handle negation, as that still comes through as a separate token. |
| bool isNegative = false; |
| if (Parser.getTok().is(AsmToken::Minus)) { |
| isNegative = true; |
| Parser.Lex(); |
| } |
| const AsmToken &Tok = Parser.getTok(); |
| if (Tok.is(AsmToken::Real)) { |
| APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); |
| uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
| // If we had a '-' in front, toggle the sign bit. |
| IntVal ^= (uint64_t)isNegative << 63; |
| int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal)); |
| Parser.Lex(); // Eat the token. |
| // Check for out of range values. As an exception, we let Zero through, |
| // as we handle that special case in post-processing before matching in |
| // order to use the zero register for it. |
| if (Val == -1 && !RealVal.isZero()) { |
| TokError("floating point value out of range"); |
| return MatchOperand_ParseFail; |
| } |
| Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext())); |
| return MatchOperand_Success; |
| } |
| if (Tok.is(AsmToken::Integer)) { |
| int64_t Val; |
| if (!isNegative && Tok.getString().startswith("0x")) { |
| Val = Tok.getIntVal(); |
| if (Val > 255 || Val < 0) { |
| TokError("encoded floating point value out of range"); |
| return MatchOperand_ParseFail; |
| } |
| } else { |
| APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); |
| uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
| // If we had a '-' in front, toggle the sign bit. |
| IntVal ^= (uint64_t)isNegative << 63; |
| Val = ARM64_AM::getFP64Imm(APInt(64, IntVal)); |
| } |
| Parser.Lex(); // Eat the token. |
| Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext())); |
| return MatchOperand_Success; |
| } |
| |
| if (!Hash) |
| return MatchOperand_NoMatch; |
| |
| TokError("invalid floating point immediate"); |
| return MatchOperand_ParseFail; |
| } |
| |
| /// parseCondCodeString - Parse a Condition Code string. |
| unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) { |
| unsigned CC = StringSwitch<unsigned>(Cond.lower()) |
| .Case("eq", ARM64CC::EQ) |
| .Case("ne", ARM64CC::NE) |
| .Case("cs", ARM64CC::HS) |
| .Case("hs", ARM64CC::HS) |
| .Case("cc", ARM64CC::LO) |
| .Case("lo", ARM64CC::LO) |
| .Case("mi", ARM64CC::MI) |
| .Case("pl", ARM64CC::PL) |
| .Case("vs", ARM64CC::VS) |
| .Case("vc", ARM64CC::VC) |
| .Case("hi", ARM64CC::HI) |
| .Case("ls", ARM64CC::LS) |
| .Case("ge", ARM64CC::GE) |
| .Case("lt", ARM64CC::LT) |
| .Case("gt", ARM64CC::GT) |
| .Case("le", ARM64CC::LE) |
| .Case("al", ARM64CC::AL) |
| .Case("nv", ARM64CC::NV) |
| .Default(ARM64CC::Invalid); |
| return CC; |
| } |
| |
| /// parseCondCode - Parse a Condition Code operand. |
| bool ARM64AsmParser::parseCondCode(OperandVector &Operands, |
| bool invertCondCode) { |
| SMLoc S = getLoc(); |
| const AsmToken &Tok = Parser.getTok(); |
| assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); |
| |
| StringRef Cond = Tok.getString(); |
| unsigned CC = parseCondCodeString(Cond); |
| if (CC == ARM64CC::Invalid) |
| return TokError("invalid condition code"); |
| Parser.Lex(); // Eat identifier token. |
| |
| if (invertCondCode) |
| CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC)); |
| |
| const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext()); |
| Operands.push_back( |
| ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext())); |
| return false; |
| } |
| |
| /// ParseOptionalShift - Some operands take an optional shift argument. Parse |
| /// them if present. |
| bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) { |
| const AsmToken &Tok = Parser.getTok(); |
| ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString()) |
| .Case("lsl", ARM64_AM::LSL) |
| .Case("lsr", ARM64_AM::LSR) |
| .Case("asr", ARM64_AM::ASR) |
| .Case("ror", ARM64_AM::ROR) |
| .Case("msl", ARM64_AM::MSL) |
| .Case("LSL", ARM64_AM::LSL) |
| .Case("LSR", ARM64_AM::LSR) |
| .Case("ASR", ARM64_AM::ASR) |
| .Case("ROR", ARM64_AM::ROR) |
| .Case("MSL", ARM64_AM::MSL) |
| .Default(ARM64_AM::InvalidShift); |
| if (ShOp == ARM64_AM::InvalidShift) |
| return true; |
| |
| SMLoc S = Tok.getLoc(); |
| Parser.Lex(); |
| |
| // We expect a number here. |
| bool Hash = getLexer().is(AsmToken::Hash); |
| if (!Hash && getLexer().isNot(AsmToken::Integer)) |
| return TokError("immediate value expected for shifter operand"); |
| |
| if (Hash) |
| Parser.Lex(); // Eat the '#'. |
| |
| SMLoc ExprLoc = getLoc(); |
| const MCExpr *ImmVal; |
| if (getParser().parseExpression(ImmVal)) |
| return true; |
| |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
| if (!MCE) |
| return TokError("immediate value expected for shifter operand"); |
| |
| if ((MCE->getValue() & 0x3f) != MCE->getValue()) |
| return Error(ExprLoc, "immediate value too large for shifter operand"); |
| |
| SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
| Operands.push_back( |
| ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext())); |
| return false; |
| } |
| |
| /// parseOptionalExtend - Some operands take an optional extend argument. Parse |
| /// them if present. |
| bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) { |
| const AsmToken &Tok = Parser.getTok(); |
| ARM64_AM::ExtendType ExtOp = |
| StringSwitch<ARM64_AM::ExtendType>(Tok.getString()) |
| .Case("uxtb", ARM64_AM::UXTB) |
| .Case("uxth", ARM64_AM::UXTH) |
| .Case("uxtw", ARM64_AM::UXTW) |
| .Case("uxtx", ARM64_AM::UXTX) |
| .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX |
| .Case("sxtb", ARM64_AM::SXTB) |
| .Case("sxth", ARM64_AM::SXTH) |
| .Case("sxtw", ARM64_AM::SXTW) |
| .Case("sxtx", ARM64_AM::SXTX) |
| .Case("UXTB", ARM64_AM::UXTB) |
| .Case("UXTH", ARM64_AM::UXTH) |
| .Case("UXTW", ARM64_AM::UXTW) |
| .Case("UXTX", ARM64_AM::UXTX) |
| .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX |
| .Case("SXTB", ARM64_AM::SXTB) |
| .Case("SXTH", ARM64_AM::SXTH) |
| .Case("SXTW", ARM64_AM::SXTW) |
| .Case("SXTX", ARM64_AM::SXTX) |
| .Default(ARM64_AM::InvalidExtend); |
| if (ExtOp == ARM64_AM::InvalidExtend) |
| return true; |
| |
| SMLoc S = Tok.getLoc(); |
| Parser.Lex(); |
| |
| if (getLexer().is(AsmToken::EndOfStatement) || |
| getLexer().is(AsmToken::Comma)) { |
| SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
| Operands.push_back( |
| ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext())); |
| return false; |
| } |
| |
| bool Hash = getLexer().is(AsmToken::Hash); |
| if (!Hash && getLexer().isNot(AsmToken::Integer)) { |
| SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
| Operands.push_back( |
| ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext())); |
| return false; |
| } |
| |
| if (Hash) |
| Parser.Lex(); // Eat the '#'. |
| |
| const MCExpr *ImmVal; |
| if (getParser().parseExpression(ImmVal)) |
| return true; |
| |
| const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
| if (!MCE) |
| return TokError("immediate value expected for extend operand"); |
| |
| SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
| Operands.push_back( |
| ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext())); |
| return false; |
| } |
| |
| /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for |
| /// the SYS instruction. Parse them specially so that we create a SYS MCInst. |
| bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, |
| OperandVector &Operands) { |
| if (Name.find('.') != StringRef::npos) |
| return TokError("invalid operand"); |
| |
| Mnemonic = Name; |
| Operands.push_back( |
| ARM64Operand::CreateToken("sys", false, NameLoc, getContext())); |
| |
| const AsmToken &Tok = Parser.getTok(); |
| StringRef Op = Tok.getString(); |
| SMLoc S = Tok.getLoc(); |
| |
| const MCExpr *Expr = nullptr; |
| |
| #define SYS_ALIAS(op1, Cn, Cm, op2) \ |
| do { \ |
| Expr = MCConstantExpr::Create(op1, getContext()); \ |
| Operands.push_back( \ |
| ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \ |
| Operands.push_back( \ |
| ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \ |
| Operands.push_back( \ |
| ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \ |
| Expr = MCConstantExpr::Create(op2, getContext()); \ |
| Operands.push_back( \ |
| ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \ |
| } while (0) |
| |
| if (Mnemonic == "ic") { |
| if (!Op.compare_lower("ialluis")) { |
| // SYS #0, C7, C1, #0 |
| SYS_ALIAS(0, 7, 1, 0); |
| } else if (!Op.compare_lower("iallu")) { |
| // SYS #0, C7, C5, #0 |
| SYS_ALIAS(0, 7, 5, 0); |
| } else if (!Op.compare_lower("ivau")) { |
| // SYS #3, C7, C5, #1 |
| SYS_ALIAS(3, 7, 5, 1); |
| } else { |
| return TokError("invalid operand for IC instruction"); |
| } |
| } else if (Mnemonic == "dc") { |
| if (!Op.compare_lower("zva")) { |
| // SYS #3, C7, C4, #1 |
| SYS_ALIAS(3, 7, 4, 1); |
| } else if (!Op.compare_lower("ivac")) { |
| // SYS #3, C7, C6, #1 |
| SYS_ALIAS(0, 7, 6, 1); |
| } else if (!Op.compare_lower("isw")) { |
| // SYS #0, C7, C6, #2 |
| SYS_ALIAS(0, 7, 6, 2); |
| } else if (!Op.compare_lower("cvac")) { |
| // SYS #3, C7, C10, #1 |
| SYS_ALIAS(3, 7, 10, 1); |
| } else if (!Op.compare_lower("csw")) { |
| // SYS #0, C7, C10, #2 |
| SYS_ALIAS(0, 7, 10, 2); |
| } else if (!Op.compare_lower("cvau")) { |
| // SYS #3, C7, C11, #1 |
| SYS_ALIAS(3, 7, 11, 1); |
| } else if (!Op.compare_lower("civac")) { |
| // SYS #3, C7, C14, #1 |
| SYS_ALIAS(3, 7, 14, 1); |
| } else if (!Op.compare_lower("cisw")) { |
| // SYS #0, C7, C14, #2 |
| SYS_ALIAS(0, 7, 14, 2); |
| } else { |
| return TokError("invalid operand for DC instruction"); |
| } |
| } else if (Mnemonic == "at") { |
| if (!Op.compare_lower("s1e1r")) { |
| // SYS #0, C7, C8, #0 |
| SYS_ALIAS(0, 7, 8, 0); |
| } else if (!Op.compare_lower("s1e2r")) { |
| // SYS #4, C7, C8, #0 |
| SYS_ALIAS(4, 7, 8, 0); |
| } else if (!Op.compare_lower("s1e3r")) { |
| // SYS #6, C7, C8, #0 |
| SYS_ALIAS(6, 7, 8, 0); |
| } else if (!Op.compare_lower("s1e1w")) { |
| // SYS #0, C7, C8, #1 |
| SYS_ALIAS(0, 7, 8, 1); |
| } else if (!Op.compare_lower("s1e2w")) { |
| // SYS #4, C7, C8, #1 |
| SYS_ALIAS(4, 7, 8, 1); |
| } else if (!Op.compare_lower("s1e3w")) { |
| // SYS #6, C7, C8, #1 |
| SYS_ALIAS(6, 7, 8, 1); |
| } else if (!Op.compare_lower("s1e0r")) { |
| // SYS #0, C7, C8, #3 |
| SYS_ALIAS(0, 7, 8, 2); |
| } else if (!Op.compare_lower("s1e0w")) { |
| // SYS #0, C7, C8, #3 |
| SYS_ALIAS(0, 7, 8, 3); |
| } else if (!Op.compare_lower("s12e1r")) { |
| // SYS #4, C7, C8, #4 |
| SYS_ALIAS(4, 7, 8, 4); |
| } else if (!Op.compare_lower("s12e1w")) { |
| // SYS #4, C7, C8, #5 |
| SYS_ALIAS(4, 7, 8, 5); |
| } else if (!Op.compare_lower("s12e0r")) { |
| // SYS #4, C7, C8, #6 |
| SYS_ALIAS(4, 7, 8, 6); |
| } else if (!Op.compare_lower("s12e0w")) { |
| // SYS #4, C7, C8, #7 |
| SYS_ALIAS(4, 7, 8, 7); |
| } else { |
| return TokError("invalid operand for AT instruction"); |
| } |
| } else if (Mnemonic == "tlbi") { |
| if (!Op.compare_lower("vmalle1is")) { |
| // SYS #0, C8, C3, #0 |
| SYS_ALIAS(0, 8, 3, 0); |
| } else if (!Op.compare_lower("alle2is")) { |
| // SYS #4, C8, C3, #0 |
| SYS_ALIAS(4, 8, 3, 0); |
| } else if (!Op.compare_lower("alle3is")) { |
| // SYS #6, C8, C3, #0 |
| SYS_ALIAS(6, 8, 3, 0); |
| } else if (!Op.compare_lower("vae1is")) { |
| // SYS #0, C8, C3, #1 |
| SYS_ALIAS(0, 8, 3, 1); |
| } else if (!Op.compare_lower("vae2is")) { |
| // SYS #4, C8, C3, #1 |
| SYS_ALIAS(4, 8, 3, 1); |
| } else if (!Op.compare_lower("vae3is")) { |
| // SYS #6, C8, C3, #1 |
| SYS_ALIAS(6, 8, 3, 1); |
| } else if (!Op.compare_lower("aside1is")) { |
| // SYS #0, C8, C3, #2 |
| SYS_ALIAS(0, 8, 3, 2); |
| } else if (!Op.compare_lower("vaae1is")) { |
| // SYS #0, C8, C3, #3 |
| SYS_ALIAS(0, 8, 3, 3); |
| } else if (!Op.compare_lower("alle1is")) { |
| // SYS #4, C8, C3, #4 |
| SYS_ALIAS(4, 8, 3, 4); |
| } else if (!Op.compare_lower("vale1is")) { |
| // SYS #0, C8, C3, #5 |
| SYS_ALIAS(0, 8, 3, 5); |
| } else if (!Op.compare_lower("vaale1is")) { |
| // SYS #0, C8, C3, #7 |
| SYS_ALIAS(0, 8, 3, 7); |
| } else if (!Op.compare_lower("vmalle1")) { |
| // SYS #0, C8, C7, #0 |
| SYS_ALIAS(0, 8, 7, 0); |
| } else if (!Op.compare_lower("alle2")) { |
| // SYS #4, C8, C7, #0 |
| SYS_ALIAS(4, 8, 7, 0); |
| } else if (!Op.compare_lower("vale2is")) { |
| // SYS #4, C8, C3, #5 |
| SYS_ALIAS(4, 8, 3, 5); |
| } else if (!Op.compare_lower("vale3is")) { |
| // SYS #6, C8, C3, #5 |
| SYS_ALIAS(6, 8, 3, 5); |
| } else if (!Op.compare_lower("alle3")) { |
| // SYS #6, C8, C7, #0 |
| SYS_ALIAS(6, 8, 7, 0); |
| } else if (!Op.compare_lower("vae1")) { |
| // SYS #0, C8, C7, #1 |
| SYS_ALIAS(0, 8, 7, 1); |
| } else if (!Op.compare_lower("vae2")) { |
| // SYS #4, C8, C7, #1 |
| SYS_ALIAS(4, 8, 7, 1); |
| } else |