| //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| /// \file |
| /// This file implements the targeting of the Machinelegalizer class for |
| /// AArch64. |
| /// \todo This should be generated by TableGen. |
| //===----------------------------------------------------------------------===// |
| |
| #include "AArch64LegalizerInfo.h" |
| #include "AArch64RegisterBankInfo.h" |
| #include "AArch64Subtarget.h" |
| #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" |
| #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" |
| #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" |
| #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| #include "llvm/CodeGen/GlobalISel/Utils.h" |
| #include "llvm/CodeGen/MachineInstr.h" |
| #include "llvm/CodeGen/MachineRegisterInfo.h" |
| #include "llvm/CodeGen/TargetOpcodes.h" |
| #include "llvm/CodeGen/ValueTypes.h" |
| #include "llvm/IR/DerivedTypes.h" |
| #include "llvm/IR/Intrinsics.h" |
| #include "llvm/IR/IntrinsicsAArch64.h" |
| #include "llvm/IR/Type.h" |
| #include "llvm/Support/MathExtras.h" |
| #include <initializer_list> |
| |
| #define DEBUG_TYPE "aarch64-legalinfo" |
| |
| using namespace llvm; |
| using namespace LegalizeActions; |
| using namespace LegalizeMutations; |
| using namespace LegalityPredicates; |
| using namespace MIPatternMatch; |
| |
| AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) |
| : ST(&ST) { |
| using namespace TargetOpcode; |
| const LLT p0 = LLT::pointer(0, 64); |
| const LLT s8 = LLT::scalar(8); |
| const LLT s16 = LLT::scalar(16); |
| const LLT s32 = LLT::scalar(32); |
| const LLT s64 = LLT::scalar(64); |
| const LLT s128 = LLT::scalar(128); |
| const LLT v16s8 = LLT::fixed_vector(16, 8); |
| const LLT v8s8 = LLT::fixed_vector(8, 8); |
| const LLT v4s8 = LLT::fixed_vector(4, 8); |
| const LLT v8s16 = LLT::fixed_vector(8, 16); |
| const LLT v4s16 = LLT::fixed_vector(4, 16); |
| const LLT v2s16 = LLT::fixed_vector(2, 16); |
| const LLT v2s32 = LLT::fixed_vector(2, 32); |
| const LLT v4s32 = LLT::fixed_vector(4, 32); |
| const LLT v2s64 = LLT::fixed_vector(2, 64); |
| const LLT v2p0 = LLT::fixed_vector(2, p0); |
| |
| std::initializer_list<LLT> PackedVectorAllTypeList = {/* Begin 128bit types */ |
| v16s8, v8s16, v4s32, |
| v2s64, v2p0, |
| /* End 128bit types */ |
| /* Begin 64bit types */ |
| v8s8, v4s16, v2s32}; |
| |
| const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine(); |
| |
| // FIXME: support subtargets which have neon/fp-armv8 disabled. |
| if (!ST.hasNEON() || !ST.hasFPARMv8()) { |
| getLegacyLegalizerInfo().computeTables(); |
| return; |
| } |
| |
| // Some instructions only support s16 if the subtarget has full 16-bit FP |
| // support. |
| const bool HasFP16 = ST.hasFullFP16(); |
| const LLT &MinFPScalar = HasFP16 ? s16 : s32; |
| |
| const bool HasCSSC = ST.hasCSSC(); |
| |
| getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE}) |
| .legalFor({p0, s8, s16, s32, s64}) |
| .legalFor(PackedVectorAllTypeList) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s8, s64) |
| .fewerElementsIf( |
| [=](const LegalityQuery &Query) { |
| return Query.Types[0].isVector() && |
| (Query.Types[0].getElementType() != s64 || |
| Query.Types[0].getNumElements() != 2); |
| }, |
| [=](const LegalityQuery &Query) { |
| LLT EltTy = Query.Types[0].getElementType(); |
| if (EltTy == s64) |
| return std::make_pair(0, LLT::fixed_vector(2, 64)); |
| return std::make_pair(0, EltTy); |
| }); |
| |
| getActionDefinitionsBuilder(G_PHI) |
| .legalFor({p0, s16, s32, s64}) |
| .legalFor(PackedVectorAllTypeList) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s16, s64) |
| // Maximum: sN * k = 128 |
| .clampMaxNumElements(0, s8, 16) |
| .clampMaxNumElements(0, s16, 8) |
| .clampMaxNumElements(0, s32, 4) |
| .clampMaxNumElements(0, s64, 2) |
| .clampMaxNumElements(0, p0, 2); |
| |
| getActionDefinitionsBuilder(G_BSWAP) |
| .legalFor({s32, s64, v4s32, v2s32, v2s64}) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s32, s64); |
| |
| getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) |
| .legalFor({s32, s64, v2s32, v4s32, v4s16, v8s16, v16s8, v8s8}) |
| .scalarizeIf( |
| [=](const LegalityQuery &Query) { |
| return Query.Opcode == G_MUL && Query.Types[0] == v2s64; |
| }, |
| 0) |
| .legalFor({v2s64}) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s32, s64) |
| .clampNumElements(0, v2s32, v4s32) |
| .clampNumElements(0, v2s64, v2s64) |
| .moreElementsToNextPow2(0); |
| |
| getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) |
| .customIf([=](const LegalityQuery &Query) { |
| const auto &SrcTy = Query.Types[0]; |
| const auto &AmtTy = Query.Types[1]; |
| return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 && |
| AmtTy.getSizeInBits() == 32; |
| }) |
| .legalFor({ |
| {s32, s32}, |
| {s32, s64}, |
| {s64, s64}, |
| {v8s8, v8s8}, |
| {v16s8, v16s8}, |
| {v4s16, v4s16}, |
| {v8s16, v8s16}, |
| {v2s32, v2s32}, |
| {v4s32, v4s32}, |
| {v2s64, v2s64}, |
| }) |
| .widenScalarToNextPow2(0) |
| .clampScalar(1, s32, s64) |
| .clampScalar(0, s32, s64) |
| .clampNumElements(0, v2s32, v4s32) |
| .clampNumElements(0, v2s64, v2s64) |
| .moreElementsToNextPow2(0) |
| .minScalarSameAs(1, 0); |
| |
| getActionDefinitionsBuilder(G_PTR_ADD) |
| .legalFor({{p0, s64}, {v2p0, v2s64}}) |
| .clampScalar(1, s64, s64); |
| |
| getActionDefinitionsBuilder(G_PTRMASK).legalFor({{p0, s64}}); |
| |
| getActionDefinitionsBuilder({G_SDIV, G_UDIV}) |
| .legalFor({s32, s64}) |
| .libcallFor({s128}) |
| .clampScalar(0, s32, s64) |
| .widenScalarToNextPow2(0) |
| .scalarize(0); |
| |
| getActionDefinitionsBuilder({G_SREM, G_UREM, G_SDIVREM, G_UDIVREM}) |
| .lowerFor({s8, s16, s32, s64, v2s64, v4s32, v2s32}) |
| .widenScalarOrEltToNextPow2(0) |
| .clampScalarOrElt(0, s32, s64) |
| .clampNumElements(0, v2s32, v4s32) |
| .clampNumElements(0, v2s64, v2s64) |
| .moreElementsToNextPow2(0); |
| |
| |
| getActionDefinitionsBuilder({G_SMULO, G_UMULO}) |
| .widenScalarToNextPow2(0, /*Min = */ 32) |
| .clampScalar(0, s32, s64) |
| .lower(); |
| |
| getActionDefinitionsBuilder({G_SMULH, G_UMULH}) |
| .legalFor({s64, v8s16, v16s8, v4s32}) |
| .lower(); |
| |
| auto &MinMaxActions = getActionDefinitionsBuilder( |
| {G_SMIN, G_SMAX, G_UMIN, G_UMAX}); |
| if (HasCSSC) |
| MinMaxActions |
| .legalFor({s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32}) |
| // Making clamping conditional on CSSC extension as without legal types we |
| // lower to CMP which can fold one of the two sxtb's we'd otherwise need |
| // if we detect a type smaller than 32-bit. |
| .minScalar(0, s32); |
| else |
| MinMaxActions |
| .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32}); |
| MinMaxActions |
| .clampNumElements(0, v8s8, v16s8) |
| .clampNumElements(0, v4s16, v8s16) |
| .clampNumElements(0, v2s32, v4s32) |
| // FIXME: This sholdn't be needed as v2s64 types are going to |
| // be expanded anyway, but G_ICMP doesn't support splitting vectors yet |
| .clampNumElements(0, v2s64, v2s64) |
| .lower(); |
| |
| getActionDefinitionsBuilder( |
| {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO}) |
| .legalFor({{s32, s32}, {s64, s32}}) |
| .clampScalar(0, s32, s64) |
| .clampScalar(1, s32, s64) |
| .widenScalarToNextPow2(0); |
| |
| getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG}) |
| .legalFor({MinFPScalar, s32, s64, v2s64, v4s32, v2s32}) |
| .clampScalar(0, MinFPScalar, s64) |
| .clampNumElements(0, v2s32, v4s32) |
| .clampNumElements(0, v2s64, v2s64); |
| |
| getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64}); |
| |
| getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FRINT, |
| G_FMA, G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND, |
| G_FNEARBYINT, G_INTRINSIC_LRINT}) |
| // If we don't have full FP16 support, then scalarize the elements of |
| // vectors containing fp16 types. |
| .fewerElementsIf( |
| [=, &ST](const LegalityQuery &Query) { |
| const auto &Ty = Query.Types[0]; |
| return Ty.isVector() && Ty.getElementType() == s16 && |
| !ST.hasFullFP16(); |
| }, |
| [=](const LegalityQuery &Query) { return std::make_pair(0, s16); }) |
| // If we don't have full FP16 support, then widen s16 to s32 if we |
| // encounter it. |
| .widenScalarIf( |
| [=, &ST](const LegalityQuery &Query) { |
| return Query.Types[0] == s16 && !ST.hasFullFP16(); |
| }, |
| [=](const LegalityQuery &Query) { return std::make_pair(0, s32); }) |
| .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16}); |
| |
| getActionDefinitionsBuilder( |
| {G_FCOS, G_FSIN, G_FLOG10, G_FLOG, G_FLOG2, G_FEXP, G_FEXP2, G_FPOW}) |
| // We need a call for these, so we always need to scalarize. |
| .scalarize(0) |
| // Regardless of FP16 support, widen 16-bit elements to 32-bits. |
| .minScalar(0, s32) |
| .libcallFor({s32, s64, v2s32, v4s32, v2s64}); |
| |
| getActionDefinitionsBuilder(G_INSERT) |
| .legalIf(all(typeInSet(0, {s32, s64, p0}), |
| typeInSet(1, {s8, s16, s32}), smallerThan(1, 0))) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s32, s64) |
| .widenScalarToNextPow2(1) |
| .minScalar(1, s8) |
| .maxScalarIf(typeInSet(0, {s32}), 1, s16) |
| .maxScalarIf(typeInSet(0, {s64, p0}), 1, s32); |
| |
| getActionDefinitionsBuilder(G_EXTRACT) |
| .legalIf(all(typeInSet(0, {s16, s32, s64, p0}), |
| typeInSet(1, {s32, s64, s128, p0}), smallerThan(0, 1))) |
| .widenScalarToNextPow2(1) |
| .clampScalar(1, s32, s128) |
| .widenScalarToNextPow2(0) |
| .minScalar(0, s16) |
| .maxScalarIf(typeInSet(1, {s32}), 0, s16) |
| .maxScalarIf(typeInSet(1, {s64, p0}), 0, s32) |
| .maxScalarIf(typeInSet(1, {s128}), 0, s64); |
| |
| |
| for (unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) { |
| auto &Actions = getActionDefinitionsBuilder(Op); |
| |
| if (Op == G_SEXTLOAD) |
| Actions.lowerIf(atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Unordered)); |
| |
| // Atomics have zero extending behavior. |
| Actions |
| .legalForTypesWithMemDesc({{s32, p0, s8, 8}, |
| {s32, p0, s16, 8}, |
| {s32, p0, s32, 8}, |
| {s64, p0, s8, 2}, |
| {s64, p0, s16, 2}, |
| {s64, p0, s32, 4}, |
| {s64, p0, s64, 8}, |
| {p0, p0, s64, 8}, |
| {v2s32, p0, s64, 8}}) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s32, s64) |
| // TODO: We could support sum-of-pow2's but the lowering code doesn't know |
| // how to do that yet. |
| .unsupportedIfMemSizeNotPow2() |
| // Lower anything left over into G_*EXT and G_LOAD |
| .lower(); |
| } |
| |
| auto IsPtrVecPred = [=](const LegalityQuery &Query) { |
| const LLT &ValTy = Query.Types[0]; |
| if (!ValTy.isVector()) |
| return false; |
| const LLT EltTy = ValTy.getElementType(); |
| return EltTy.isPointer() && EltTy.getAddressSpace() == 0; |
| }; |
| |
| getActionDefinitionsBuilder(G_LOAD) |
| .customIf([=](const LegalityQuery &Query) { |
| return Query.Types[0] == s128 && |
| Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic; |
| }) |
| .legalForTypesWithMemDesc({{s8, p0, s8, 8}, |
| {s16, p0, s16, 8}, |
| {s32, p0, s32, 8}, |
| {s64, p0, s64, 8}, |
| {p0, p0, s64, 8}, |
| {s128, p0, s128, 8}, |
| {v8s8, p0, s64, 8}, |
| {v16s8, p0, s128, 8}, |
| {v4s16, p0, s64, 8}, |
| {v8s16, p0, s128, 8}, |
| {v2s32, p0, s64, 8}, |
| {v4s32, p0, s128, 8}, |
| {v2s64, p0, s128, 8}}) |
| // These extends are also legal |
| .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 8}}) |
| .widenScalarToNextPow2(0, /* MinSize = */8) |
| .lowerIfMemSizeNotByteSizePow2() |
| .clampScalar(0, s8, s64) |
| .narrowScalarIf([=](const LegalityQuery &Query) { |
| // Clamp extending load results to 32-bits. |
| return Query.Types[0].isScalar() && |
| Query.Types[0] != Query.MMODescrs[0].MemoryTy && |
| Query.Types[0].getSizeInBits() > 32; |
| }, |
| changeTo(0, s32)) |
| .clampMaxNumElements(0, s8, 16) |
| .clampMaxNumElements(0, s16, 8) |
| .clampMaxNumElements(0, s32, 4) |
| .clampMaxNumElements(0, s64, 2) |
| .clampMaxNumElements(0, p0, 2) |
| .customIf(IsPtrVecPred) |
| .scalarizeIf(typeIs(0, v2s16), 0); |
| |
| getActionDefinitionsBuilder(G_STORE) |
| .customIf([=](const LegalityQuery &Query) { |
| return Query.Types[0] == s128 && |
| Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic; |
| }) |
| .legalForTypesWithMemDesc({{s8, p0, s8, 8}, |
| {s16, p0, s8, 8}, // truncstorei8 from s16 |
| {s32, p0, s8, 8}, // truncstorei8 from s32 |
| {s64, p0, s8, 8}, // truncstorei8 from s64 |
| {s16, p0, s16, 8}, |
| {s32, p0, s16, 8}, // truncstorei16 from s32 |
| {s64, p0, s16, 8}, // truncstorei16 from s64 |
| {s32, p0, s8, 8}, |
| {s32, p0, s16, 8}, |
| {s32, p0, s32, 8}, |
| {s64, p0, s64, 8}, |
| {s64, p0, s32, 8}, // truncstorei32 from s64 |
| {p0, p0, s64, 8}, |
| {s128, p0, s128, 8}, |
| {v16s8, p0, s128, 8}, |
| {v8s8, p0, s64, 8}, |
| {v4s16, p0, s64, 8}, |
| {v8s16, p0, s128, 8}, |
| {v2s32, p0, s64, 8}, |
| {v4s32, p0, s128, 8}, |
| {v2s64, p0, s128, 8}}) |
| .clampScalar(0, s8, s64) |
| .lowerIf([=](const LegalityQuery &Query) { |
| return Query.Types[0].isScalar() && |
| Query.Types[0] != Query.MMODescrs[0].MemoryTy; |
| }) |
| // Maximum: sN * k = 128 |
| .clampMaxNumElements(0, s8, 16) |
| .clampMaxNumElements(0, s16, 8) |
| .clampMaxNumElements(0, s32, 4) |
| .clampMaxNumElements(0, s64, 2) |
| .clampMaxNumElements(0, p0, 2) |
| .lowerIfMemSizeNotPow2() |
| .customIf(IsPtrVecPred) |
| .scalarizeIf(typeIs(0, v2s16), 0); |
| |
| // Constants |
| getActionDefinitionsBuilder(G_CONSTANT) |
| .legalFor({p0, s8, s16, s32, s64}) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s8, s64); |
| getActionDefinitionsBuilder(G_FCONSTANT) |
| .legalIf([=](const LegalityQuery &Query) { |
| const auto &Ty = Query.Types[0]; |
| if (HasFP16 && Ty == s16) |
| return true; |
| return Ty == s32 || Ty == s64 || Ty == s128; |
| }) |
| .clampScalar(0, MinFPScalar, s128); |
| |
| getActionDefinitionsBuilder({G_ICMP, G_FCMP}) |
| .legalFor({{s32, s32}, |
| {s32, s64}, |
| {s32, p0}, |
| {v4s32, v4s32}, |
| {v2s32, v2s32}, |
| {v2s64, v2s64}, |
| {v2s64, v2p0}, |
| {v4s16, v4s16}, |
| {v8s16, v8s16}, |
| {v8s8, v8s8}, |
| {v16s8, v16s8}}) |
| .widenScalarOrEltToNextPow2(1) |
| .clampScalar(1, s32, s64) |
| .clampScalar(0, s32, s32) |
| .minScalarEltSameAsIf( |
| [=](const LegalityQuery &Query) { |
| const LLT &Ty = Query.Types[0]; |
| const LLT &SrcTy = Query.Types[1]; |
| return Ty.isVector() && !SrcTy.getElementType().isPointer() && |
| Ty.getElementType() != SrcTy.getElementType(); |
| }, |
| 0, 1) |
| .minScalarOrEltIf( |
| [=](const LegalityQuery &Query) { return Query.Types[1] == v2s16; }, |
| 1, s32) |
| .minScalarOrEltIf( |
| [=](const LegalityQuery &Query) { return Query.Types[1] == v2p0; }, 0, |
| s64) |
| .clampNumElements(0, v2s32, v4s32); |
| |
| // Extensions |
| auto ExtLegalFunc = [=](const LegalityQuery &Query) { |
| unsigned DstSize = Query.Types[0].getSizeInBits(); |
| |
| if (DstSize == 128 && !Query.Types[0].isVector()) |
| return false; // Extending to a scalar s128 needs narrowing. |
| |
| // Make sure that we have something that will fit in a register, and |
| // make sure it's a power of 2. |
| if (DstSize < 8 || DstSize > 128 || !isPowerOf2_32(DstSize)) |
| return false; |
| |
| const LLT &SrcTy = Query.Types[1]; |
| |
| // Make sure we fit in a register otherwise. Don't bother checking that |
| // the source type is below 128 bits. We shouldn't be allowing anything |
| // through which is wider than the destination in the first place. |
| unsigned SrcSize = SrcTy.getSizeInBits(); |
| if (SrcSize < 8 || !isPowerOf2_32(SrcSize)) |
| return false; |
| |
| return true; |
| }; |
| getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) |
| .legalIf(ExtLegalFunc) |
| .clampScalar(0, s64, s64); // Just for s128, others are handled above. |
| |
| getActionDefinitionsBuilder(G_TRUNC) |
| .minScalarOrEltIf( |
| [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); }, |
| 0, s8) |
| .customIf([=](const LegalityQuery &Query) { |
| LLT DstTy = Query.Types[0]; |
| LLT SrcTy = Query.Types[1]; |
| return DstTy == v8s8 && SrcTy.getSizeInBits() > 128; |
| }) |
| .alwaysLegal(); |
| |
| getActionDefinitionsBuilder(G_SEXT_INREG) |
| .legalFor({s32, s64}) |
| .legalFor(PackedVectorAllTypeList) |
| .lower(); |
| |
| // FP conversions |
| getActionDefinitionsBuilder(G_FPTRUNC) |
| .legalFor( |
| {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}}) |
| .clampMaxNumElements(0, s32, 2); |
| getActionDefinitionsBuilder(G_FPEXT) |
| .legalFor( |
| {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}}) |
| .clampMaxNumElements(0, s64, 2); |
| |
| // Conversions |
| getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) |
| .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32}) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s32, s64) |
| .widenScalarToNextPow2(1) |
| .clampScalar(1, s32, s64); |
| |
| getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) |
| .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32}) |
| .clampScalar(1, s32, s64) |
| .minScalarSameAs(1, 0) |
| .clampScalar(0, s32, s64) |
| .widenScalarToNextPow2(0); |
| |
| // Control-flow |
| getActionDefinitionsBuilder(G_BRCOND) |
| .legalFor({s32}) |
| .clampScalar(0, s32, s32); |
| getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0}); |
| |
| getActionDefinitionsBuilder(G_SELECT) |
| .legalFor({{s32, s32}, {s64, s32}, {p0, s32}}) |
| .widenScalarToNextPow2(0) |
| .clampScalar(0, s32, s64) |
| .clampScalar(1, s32, s32) |
| .minScalarEltSameAsIf(all(isVector(0), isVector(1)), 1, 0) |
| .lowerIf(isVector(0)); |
| |
| // Pointer-handling |
| getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0}); |
| |
| if (TM.getCodeModel() == CodeModel::Small) |
| getActionDefinitionsBuilder(G_GLOBAL_VALUE).custom(); |
| else |
| getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0}); |
| |
| getActionDefinitionsBuilder(G_PTRTOINT) |
| .legalFor({{s64, p0}, {v2s64, v2p0}}) |
| .widenScalarToNextPow2(0, 64) |
| .clampScalar(0, s64, s64); |
| |
| getActionDefinitionsBuilder(G_INTTOPTR) |
| .unsupportedIf([&](const LegalityQuery &Query) { |
| return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits(); |
| }) |
| .legalFor({{p0, s64}, {v2p0, v2s64}}); |
| |
| // Casts for 32 and 64-bit width type are just copies. |
| // Same for 128-bit width type, except they are on the FPR bank. |
| getActionDefinitionsBuilder(G_BITCAST) |
| // FIXME: This is wrong since G_BITCAST is not allowed to change the |
| // number of bits but it's what the previous code described and fixing |
| // it breaks tests. |
| .legalForCartesianProduct({s8, s16, s32, s64, s128, v16s8, v8s8, v4s8, |
| v8s16, v4s16, v2s16, v4s32, v2s32, v2s64, |
| v2p0}); |
| |
| getActionDefinitionsBuilder(G_VASTART).legalFor({p0}); |
| |
| // va_list must be a pointer, but most sized types are pretty easy to handle |
| // as the destination. |
| getActionDefinitionsBuilder(G_VAARG) |
| .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0}) |
| .clampScalar(0, s8, s64) |
| .widenScalarToNextPow2(0, /*Min*/ 8); |
| |
| getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS) |
| .lowerIf( |
| all(typeInSet(0, {s8, s16, s32, s64, s128}), typeIs(2, p0))); |
| |
| getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG) |
| .customIf([](const LegalityQuery &Query) { |
| return Query.Types[0].getSizeInBits() == 128; |
| }) |
| .clampScalar(0, s32, s64) |
| .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0))); |
| |
| getActionDefinitionsBuilder( |
| {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND, |
| G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX, |
| G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX}) |
| .clampScalar(0, s32, s64) |
| .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0))); |
| |
| getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0}); |
| |
| // Merge/Unmerge |
| for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { |
| unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1; |
| unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0; |
| getActionDefinitionsBuilder(Op) |
| .widenScalarToNextPow2(LitTyIdx, 8) |
| .widenScalarToNextPow2(BigTyIdx, 32) |
| .clampScalar(LitTyIdx, s8, s64) |
| .clampScalar(BigTyIdx, s32, s128) |
| .legalIf([=](const LegalityQuery &Q) { |
| switch (Q.Types[BigTyIdx].getSizeInBits()) { |
| case 32: |
| case 64: |
| case 128: |
| break; |
| default: |
| return false; |
| } |
| switch (Q.Types[LitTyIdx].getSizeInBits()) { |
| case 8: |
| case 16: |
| case 32: |
| case 64: |
| return true; |
| default: |
| return false; |
| } |
| }); |
| } |
| |
| getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT) |
| .unsupportedIf([=](const LegalityQuery &Query) { |
| const LLT &EltTy = Query.Types[1].getElementType(); |
| return Query.Types[0] != EltTy; |
| }) |
| .minScalar(2, s64) |
| .legalIf([=](const LegalityQuery &Query) { |
| const LLT &VecTy = Query.Types[1]; |
| return VecTy == v2s16 || VecTy == v4s16 || VecTy == v8s16 || |
| VecTy == v4s32 || VecTy == v2s64 || VecTy == v2s32 || |
| VecTy == v8s8 || VecTy == v16s8 || VecTy == v2s32 || |
| VecTy == v2p0; |
| }) |
| .minScalarOrEltIf( |
| [=](const LegalityQuery &Query) { |
| // We want to promote to <M x s1> to <M x s64> if that wouldn't |
| // cause the total vec size to be > 128b. |
| return Query.Types[1].getNumElements() <= 2; |
| }, |
| 0, s64) |
| .minScalarOrEltIf( |
| [=](const LegalityQuery &Query) { |
| return Query.Types[1].getNumElements() <= 4; |
| }, |
| 0, s32) |
| .minScalarOrEltIf( |
| [=](const LegalityQuery &Query) { |
| return Query.Types[1].getNumElements() <= 8; |
| }, |
| 0, s16) |
| .minScalarOrEltIf( |
| [=](const LegalityQuery &Query) { |
| return Query.Types[1].getNumElements() <= 16; |
| }, |
| 0, s8) |
| .minScalarOrElt(0, s8) // Worst case, we need at least s8. |
| .clampMaxNumElements(1, s64, 2) |
| .clampMaxNumElements(1, s32, 4) |
| .clampMaxNumElements(1, s16, 8) |
| .clampMaxNumElements(1, p0, 2); |
| |
| getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT) |
| .legalIf(typeInSet(0, {v8s16, v2s32, v4s32, v2s64})); |
| |
| getActionDefinitionsBuilder(G_BUILD_VECTOR) |
| .legalFor({{v8s8, s8}, |
| {v16s8, s8}, |
| {v2s16, s16}, |
| {v4s16, s16}, |
| {v8s16, s16}, |
| {v2s32, s32}, |
| {v4s32, s32}, |
| {v2p0, p0}, |
| {v2s64, s64}}) |
| .clampNumElements(0, v4s32, v4s32) |
| .clampNumElements(0, v2s64, v2s64) |
| .minScalarOrElt(0, s8) |
| .minScalarSameAs(1, 0); |
| |
| getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC).lower(); |
| |
| getActionDefinitionsBuilder(G_CTLZ) |
| .legalForCartesianProduct( |
| {s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32}) |
| .scalarize(1); |
| getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF).lower(); |
| |
| // TODO: Custom lowering for v2s32, v4s32, v2s64. |
| getActionDefinitionsBuilder(G_BITREVERSE) |
| .legalFor({s32, s64, v8s8, v16s8}) |
| .widenScalarToNextPow2(0, /*Min = */ 32) |
| .clampScalar(0, s32, s64); |
| |
| getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF).lower(); |
| |
| getActionDefinitionsBuilder(G_CTTZ) |
| .lowerIf(isVector(0)) |
| .clampScalar(0, s32, s64) |
| .scalarSameSizeAs(1, 0) |
| .legalIf([=](const LegalityQuery &Query) { |
| return (HasCSSC && typeInSet(0, {s32, s64})(Query)); |
| }) |
| .customIf([=](const LegalityQuery &Query) { |
| return (!HasCSSC && typeInSet(0, {s32, s64})(Query)); |
| }); |
| |
| getActionDefinitionsBuilder(G_SHUFFLE_VECTOR) |
| .legalIf([=](const LegalityQuery &Query) { |
| const LLT &DstTy = Query.Types[0]; |
| const LLT &SrcTy = Query.Types[1]; |
| // For now just support the TBL2 variant which needs the source vectors |
| // to be the same size as the dest. |
| if (DstTy != SrcTy) |
| return false; |
| return llvm::is_contained({v2s32, v4s32, v2s64, v2p0, v16s8, v8s16}, |
| DstTy); |
| }) |
| // G_SHUFFLE_VECTOR can have scalar sources (from 1 x s vectors), we |
| // just want those lowered into G_BUILD_VECTOR |
| .lowerIf([=](const LegalityQuery &Query) { |
| return !Query.Types[1].isVector(); |
| }) |
| .moreElementsIf( |
| [](const LegalityQuery &Query) { |
| return Query.Types[0].isVector() && Query.Types[1].isVector() && |
| Query.Types[0].getNumElements() > |
| Query.Types[1].getNumElements(); |
| }, |
| changeTo(1, 0)) |
| .moreElementsToNextPow2(0) |
| .clampNumElements(0, v4s32, v4s32) |
| .clampNumElements(0, v2s64, v2s64); |
| |
| getActionDefinitionsBuilder(G_CONCAT_VECTORS) |
| .legalFor({{v4s32, v2s32}, {v8s16, v4s16}, {v16s8, v8s8}}); |
| |
| getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({{p0}, {s64}}); |
| |
| getActionDefinitionsBuilder(G_BRJT).legalIf([=](const LegalityQuery &Query) { |
| return Query.Types[0] == p0 && Query.Types[1] == s64; |
| }); |
| |
| getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower(); |
| |
| if (ST.hasMOPS()) { |
| // G_BZERO is not supported. Currently it is only emitted by |
| // PreLegalizerCombiner for G_MEMSET with zero constant. |
| getActionDefinitionsBuilder(G_BZERO).unsupported(); |
| |
| getActionDefinitionsBuilder(G_MEMSET) |
| .legalForCartesianProduct({p0}, {s64}, {s64}) |
| .customForCartesianProduct({p0}, {s8}, {s64}) |
| .immIdx(0); // Inform verifier imm idx 0 is handled. |
| |
| getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE}) |
| .legalForCartesianProduct({p0}, {p0}, {s64}) |
| .immIdx(0); // Inform verifier imm idx 0 is handled. |
| |
| // G_MEMCPY_INLINE does not have a tailcall immediate |
| getActionDefinitionsBuilder(G_MEMCPY_INLINE) |
| .legalForCartesianProduct({p0}, {p0}, {s64}); |
| |
| } else { |
| getActionDefinitionsBuilder({G_BZERO, G_MEMCPY, G_MEMMOVE, G_MEMSET}) |
| .libcall(); |
| } |
| |
| // FIXME: Legal vector types are only legal with NEON. |
| auto &ABSActions = getActionDefinitionsBuilder(G_ABS); |
| if (HasCSSC) |
| ABSActions |
| .legalFor({s32, s64}); |
| ABSActions |
| .legalFor(PackedVectorAllTypeList) |
| .lowerIf(isScalar(0)); |
| |
| getActionDefinitionsBuilder(G_VECREDUCE_FADD) |
| // We only have FADDP to do reduction-like operations. Lower the rest. |
| .legalFor({{s32, v2s32}, {s64, v2s64}}) |
| .clampMaxNumElements(1, s64, 2) |
| .clampMaxNumElements(1, s32, 2) |
| .lower(); |
| |
| getActionDefinitionsBuilder(G_VECREDUCE_ADD) |
| .legalFor( |
| {{s8, v16s8}, {s16, v8s16}, {s32, v4s32}, {s32, v2s32}, {s64, v2s64}}) |
| .clampMaxNumElements(1, s64, 2) |
| .clampMaxNumElements(1, s32, 4) |
| .lower(); |
| |
| getActionDefinitionsBuilder( |
| {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR}) |
| // Try to break down into smaller vectors as long as they're at least 64 |
| // bits. This lets us use vector operations for some parts of the |
| // reduction. |
| .fewerElementsIf( |
| [=](const LegalityQuery &Q) { |
| LLT SrcTy = Q.Types[1]; |
| if (SrcTy.isScalar()) |
| return false; |
| if (!isPowerOf2_32(SrcTy.getNumElements())) |
| return false; |
| // We can usually perform 64b vector operations. |
| return SrcTy.getSizeInBits() > 64; |
| }, |
| [=](const LegalityQuery &Q) { |
| LLT SrcTy = Q.Types[1]; |
| return std::make_pair(1, SrcTy.divide(2)); |
| }) |
| .scalarize(1) |
| .lower(); |
| |
| getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT}) |
| .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); }); |
| |
| getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower(); |
| |
| getActionDefinitionsBuilder(G_ROTR) |
| .legalFor({{s32, s64}, {s64, s64}}) |
| .customIf([=](const LegalityQuery &Q) { |
| return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64; |
| }) |
| .lower(); |
| getActionDefinitionsBuilder(G_ROTL).lower(); |
| |
| getActionDefinitionsBuilder({G_SBFX, G_UBFX}) |
| .customFor({{s32, s32}, {s64, s64}}); |
| |
| auto always = [=](const LegalityQuery &Q) { return true; }; |
| auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP); |
| if (HasCSSC) |
| CTPOPActions |
| .legalFor({{s32, s32}, |
| {s64, s64}, |
| {v8s8, v8s8}, |
| {v16s8, v16s8}}) |
| .customFor({{s128, s128}, |
| {v2s64, v2s64}, |
| {v2s32, v2s32}, |
| {v4s32, v4s32}, |
| {v4s16, v4s16}, |
| {v8s16, v8s16}}); |
| else |
| CTPOPActions |
| .legalFor({{v8s8, v8s8}, |
| {v16s8, v16s8}}) |
| .customFor({{s32, s32}, |
| {s64, s64}, |
| {s128, s128}, |
| {v2s64, v2s64}, |
| {v2s32, v2s32}, |
| {v4s32, v4s32}, |
| {v4s16, v4s16}, |
| {v8s16, v8s16}}); |
| CTPOPActions |
| .clampScalar(0, s32, s128) |
| .widenScalarToNextPow2(0) |
| .minScalarEltSameAsIf(always, 1, 0) |
| .maxScalarEltSameAsIf(always, 1, 0); |
| |
| // TODO: Vector types. |
| getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT}).lowerIf(isScalar(0)); |
| |
| // TODO: Vector types. |
| getActionDefinitionsBuilder({G_FMAXNUM, G_FMINNUM}) |
| .legalFor({MinFPScalar, s32, s64}) |
| .libcallFor({s128}) |
| .minScalar(0, MinFPScalar); |
| |
| getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM}) |
| .legalFor({MinFPScalar, s32, s64, v2s32, v4s32, v2s64}) |
| .legalIf([=](const LegalityQuery &Query) { |
| const auto &Ty = Query.Types[0]; |
| return (Ty == v8s16 || Ty == v4s16) && HasFP16; |
| }) |
| .minScalar(0, MinFPScalar) |
| .clampNumElements(0, v4s16, v8s16) |
| .clampNumElements(0, v2s32, v4s32) |
| .clampNumElements(0, v2s64, v2s64); |
| |
| // TODO: Libcall support for s128. |
| // TODO: s16 should be legal with full FP16 support. |
| getActionDefinitionsBuilder({G_LROUND, G_LLROUND}) |
| .legalFor({{s64, s32}, {s64, s64}}); |
| |
| // TODO: Custom legalization for vector types. |
| // TODO: Custom legalization for mismatched types. |
| // TODO: s16 support. |
| getActionDefinitionsBuilder(G_FCOPYSIGN).customFor({{s32, s32}, {s64, s64}}); |
| |
| getActionDefinitionsBuilder(G_FMAD).lower(); |
| |
| getLegacyLegalizerInfo().computeTables(); |
| verify(*ST.getInstrInfo()); |
| } |
| |
| bool AArch64LegalizerInfo::legalizeCustom(LegalizerHelper &Helper, |
| MachineInstr &MI) const { |
| MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; |
| MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); |
| GISelChangeObserver &Observer = Helper.Observer; |
| switch (MI.getOpcode()) { |
| default: |
| // No idea what to do. |
| return false; |
| case TargetOpcode::G_VAARG: |
| return legalizeVaArg(MI, MRI, MIRBuilder); |
| case TargetOpcode::G_LOAD: |
| case TargetOpcode::G_STORE: |
| return legalizeLoadStore(MI, MRI, MIRBuilder, Observer); |
| case TargetOpcode::G_SHL: |
| case TargetOpcode::G_ASHR: |
| case TargetOpcode::G_LSHR: |
| return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer); |
| case TargetOpcode::G_GLOBAL_VALUE: |
| return legalizeSmallCMGlobalValue(MI, MRI, MIRBuilder, Observer); |
| case TargetOpcode::G_TRUNC: |
| return legalizeVectorTrunc(MI, Helper); |
| case TargetOpcode::G_SBFX: |
| case TargetOpcode::G_UBFX: |
| return legalizeBitfieldExtract(MI, MRI, Helper); |
| case TargetOpcode::G_ROTR: |
| return legalizeRotate(MI, MRI, Helper); |
| case TargetOpcode::G_CTPOP: |
| return legalizeCTPOP(MI, MRI, Helper); |
| case TargetOpcode::G_ATOMIC_CMPXCHG: |
| return legalizeAtomicCmpxchg128(MI, MRI, Helper); |
| case TargetOpcode::G_CTTZ: |
| return legalizeCTTZ(MI, Helper); |
| case TargetOpcode::G_BZERO: |
| case TargetOpcode::G_MEMCPY: |
| case TargetOpcode::G_MEMMOVE: |
| case TargetOpcode::G_MEMSET: |
| return legalizeMemOps(MI, Helper); |
| case TargetOpcode::G_FCOPYSIGN: |
| return legalizeFCopySign(MI, Helper); |
| } |
| |
| llvm_unreachable("expected switch to return"); |
| } |
| |
| bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI, |
| MachineRegisterInfo &MRI, |
| LegalizerHelper &Helper) const { |
| // To allow for imported patterns to match, we ensure that the rotate amount |
| // is 64b with an extension. |
| Register AmtReg = MI.getOperand(2).getReg(); |
| LLT AmtTy = MRI.getType(AmtReg); |
| (void)AmtTy; |
| assert(AmtTy.isScalar() && "Expected a scalar rotate"); |
| assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal"); |
| auto NewAmt = Helper.MIRBuilder.buildZExt(LLT::scalar(64), AmtReg); |
| Helper.Observer.changingInstr(MI); |
| MI.getOperand(2).setReg(NewAmt.getReg(0)); |
| Helper.Observer.changedInstr(MI); |
| return true; |
| } |
| |
| static void extractParts(Register Reg, MachineRegisterInfo &MRI, |
| MachineIRBuilder &MIRBuilder, LLT Ty, int NumParts, |
| SmallVectorImpl<Register> &VRegs) { |
| for (int I = 0; I < NumParts; ++I) |
| VRegs.push_back(MRI.createGenericVirtualRegister(Ty)); |
| MIRBuilder.buildUnmerge(VRegs, Reg); |
| } |
| |
| bool AArch64LegalizerInfo::legalizeVectorTrunc( |
| MachineInstr &MI, LegalizerHelper &Helper) const { |
| MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; |
| MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); |
| // Similar to how operand splitting is done in SelectiondDAG, we can handle |
| // %res(v8s8) = G_TRUNC %in(v8s32) by generating: |
| // %inlo(<4x s32>), %inhi(<4 x s32>) = G_UNMERGE %in(<8 x s32>) |
| // %lo16(<4 x s16>) = G_TRUNC %inlo |
| // %hi16(<4 x s16>) = G_TRUNC %inhi |
| // %in16(<8 x s16>) = G_CONCAT_VECTORS %lo16, %hi16 |
| // %res(<8 x s8>) = G_TRUNC %in16 |
| |
| Register DstReg = MI.getOperand(0).getReg(); |
| Register SrcReg = MI.getOperand(1).getReg(); |
| LLT DstTy = MRI.getType(DstReg); |
| LLT SrcTy = MRI.getType(SrcReg); |
| assert(isPowerOf2_32(DstTy.getSizeInBits()) && |
| isPowerOf2_32(SrcTy.getSizeInBits())); |
| |
| // Split input type. |
| LLT SplitSrcTy = |
| SrcTy.changeElementCount(SrcTy.getElementCount().divideCoefficientBy(2)); |
| // First, split the source into two smaller vectors. |
| SmallVector<Register, 2> SplitSrcs; |
| extractParts(SrcReg, MRI, MIRBuilder, SplitSrcTy, 2, SplitSrcs); |
| |
| // Truncate the splits into intermediate narrower elements. |
| LLT InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2); |
| for (unsigned I = 0; I < SplitSrcs.size(); ++I) |
| SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0); |
| |
| auto Concat = MIRBuilder.buildConcatVectors( |
| DstTy.changeElementSize(DstTy.getScalarSizeInBits() * 2), SplitSrcs); |
| |
| Helper.Observer.changingInstr(MI); |
| MI.getOperand(1).setReg(Concat.getReg(0)); |
| Helper.Observer.changedInstr(MI); |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue( |
| MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, |
| GISelChangeObserver &Observer) const { |
| assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE); |
| // We do this custom legalization to convert G_GLOBAL_VALUE into target ADRP + |
| // G_ADD_LOW instructions. |
| // By splitting this here, we can optimize accesses in the small code model by |
| // folding in the G_ADD_LOW into the load/store offset. |
| auto &GlobalOp = MI.getOperand(1); |
| const auto* GV = GlobalOp.getGlobal(); |
| if (GV->isThreadLocal()) |
| return true; // Don't want to modify TLS vars. |
| |
| auto &TM = ST->getTargetLowering()->getTargetMachine(); |
| unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM); |
| |
| if (OpFlags & AArch64II::MO_GOT) |
| return true; |
| |
| auto Offset = GlobalOp.getOffset(); |
| Register DstReg = MI.getOperand(0).getReg(); |
| auto ADRP = MIRBuilder.buildInstr(AArch64::ADRP, {LLT::pointer(0, 64)}, {}) |
| .addGlobalAddress(GV, Offset, OpFlags | AArch64II::MO_PAGE); |
| // Set the regclass on the dest reg too. |
| MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass); |
| |
| // MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so |
| // by creating a MOVK that sets bits 48-63 of the register to (global address |
| // + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to |
| // prevent an incorrect tag being generated during relocation when the the |
| // global appears before the code section. Without the offset, a global at |
| // `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced |
| // by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 = |
| // 0x0eff'ffff'ffff'f000`, meaning the tag would be incorrectly set to `0xe` |
| // instead of `0xf`. |
| // This assumes that we're in the small code model so we can assume a binary |
| // size of <= 4GB, which makes the untagged PC relative offset positive. The |
| // binary must also be loaded into address range [0, 2^48). Both of these |
| // properties need to be ensured at runtime when using tagged addresses. |
| if (OpFlags & AArch64II::MO_TAGGED) { |
| assert(!Offset && |
| "Should not have folded in an offset for a tagged global!"); |
| ADRP = MIRBuilder.buildInstr(AArch64::MOVKXi, {LLT::pointer(0, 64)}, {ADRP}) |
| .addGlobalAddress(GV, 0x100000000, |
| AArch64II::MO_PREL | AArch64II::MO_G3) |
| .addImm(48); |
| MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass); |
| } |
| |
| MIRBuilder.buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP}) |
| .addGlobalAddress(GV, Offset, |
| OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, |
| MachineInstr &MI) const { |
| switch (MI.getIntrinsicID()) { |
| case Intrinsic::vacopy: { |
| unsigned PtrSize = ST->isTargetILP32() ? 4 : 8; |
| unsigned VaListSize = |
| (ST->isTargetDarwin() || ST->isTargetWindows()) |
| ? PtrSize |
| : ST->isTargetILP32() ? 20 : 32; |
| |
| MachineFunction &MF = *MI.getMF(); |
| auto Val = MF.getRegInfo().createGenericVirtualRegister( |
| LLT::scalar(VaListSize * 8)); |
| MachineIRBuilder MIB(MI); |
| MIB.buildLoad(Val, MI.getOperand(2), |
| *MF.getMachineMemOperand(MachinePointerInfo(), |
| MachineMemOperand::MOLoad, |
| VaListSize, Align(PtrSize))); |
| MIB.buildStore(Val, MI.getOperand(1), |
| *MF.getMachineMemOperand(MachinePointerInfo(), |
| MachineMemOperand::MOStore, |
| VaListSize, Align(PtrSize))); |
| MI.eraseFromParent(); |
| return true; |
| } |
| case Intrinsic::get_dynamic_area_offset: { |
| MachineIRBuilder &MIB = Helper.MIRBuilder; |
| MIB.buildConstant(MI.getOperand(0).getReg(), 0); |
| MI.eraseFromParent(); |
| return true; |
| } |
| case Intrinsic::aarch64_mops_memset_tag: { |
| assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); |
| // Zext the value to 64 bit |
| MachineIRBuilder MIB(MI); |
| auto &Value = MI.getOperand(3); |
| Register ZExtValueReg = MIB.buildAnyExt(LLT::scalar(64), Value).getReg(0); |
| Value.setReg(ZExtValueReg); |
| return true; |
| } |
| case Intrinsic::prefetch: { |
| MachineIRBuilder MIB(MI); |
| auto &AddrVal = MI.getOperand(1); |
| |
| int64_t IsWrite = MI.getOperand(2).getImm(); |
| int64_t Locality = MI.getOperand(3).getImm(); |
| int64_t IsData = MI.getOperand(4).getImm(); |
| |
| bool IsStream = Locality == 0; |
| if (Locality != 0) { |
| assert(Locality <= 3 && "Prefetch locality out-of-range"); |
| // The locality degree is the opposite of the cache speed. |
| // Put the number the other way around. |
| // The encoding starts at 0 for level 1 |
| Locality = 3 - Locality; |
| } |
| |
| unsigned PrfOp = |
| (IsWrite << 4) | (!IsData << 3) | (Locality << 1) | IsStream; |
| |
| MIB.buildInstr(AArch64::G_PREFETCH).addImm(PrfOp).add(AddrVal); |
| MI.eraseFromParent(); |
| return true; |
| } |
| case Intrinsic::aarch64_prefetch: { |
| MachineIRBuilder MIB(MI); |
| auto &AddrVal = MI.getOperand(1); |
| |
| int64_t IsWrite = MI.getOperand(2).getImm(); |
| int64_t Target = MI.getOperand(3).getImm(); |
| int64_t IsStream = MI.getOperand(4).getImm(); |
| int64_t IsData = MI.getOperand(5).getImm(); |
| |
| unsigned PrfOp = (IsWrite << 4) | // Load/Store bit |
| (!IsData << 3) | // IsDataCache bit |
| (Target << 1) | // Cache level bits |
| (unsigned)IsStream; // Stream bit |
| |
| MIB.buildInstr(AArch64::G_PREFETCH).addImm(PrfOp).add(AddrVal); |
| MI.eraseFromParent(); |
| return true; |
| } |
| } |
| |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeShlAshrLshr( |
| MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, |
| GISelChangeObserver &Observer) const { |
| assert(MI.getOpcode() == TargetOpcode::G_ASHR || |
| MI.getOpcode() == TargetOpcode::G_LSHR || |
| MI.getOpcode() == TargetOpcode::G_SHL); |
| // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the |
| // imported patterns can select it later. Either way, it will be legal. |
| Register AmtReg = MI.getOperand(2).getReg(); |
| auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI); |
| if (!VRegAndVal) |
| return true; |
| // Check the shift amount is in range for an immediate form. |
| int64_t Amount = VRegAndVal->Value.getSExtValue(); |
| if (Amount > 31) |
| return true; // This will have to remain a register variant. |
| auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount); |
| Observer.changingInstr(MI); |
| MI.getOperand(2).setReg(ExtCst.getReg(0)); |
| Observer.changedInstr(MI); |
| return true; |
| } |
| |
| static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset, |
| MachineRegisterInfo &MRI) { |
| Base = Root; |
| Offset = 0; |
| |
| Register NewBase; |
| int64_t NewOffset; |
| if (mi_match(Root, MRI, m_GPtrAdd(m_Reg(NewBase), m_ICst(NewOffset))) && |
| isShiftedInt<7, 3>(NewOffset)) { |
| Base = NewBase; |
| Offset = NewOffset; |
| } |
| } |
| |
| // FIXME: This should be removed and replaced with the generic bitcast legalize |
| // action. |
| bool AArch64LegalizerInfo::legalizeLoadStore( |
| MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, |
| GISelChangeObserver &Observer) const { |
| assert(MI.getOpcode() == TargetOpcode::G_STORE || |
| MI.getOpcode() == TargetOpcode::G_LOAD); |
| // Here we just try to handle vector loads/stores where our value type might |
| // have pointer elements, which the SelectionDAG importer can't handle. To |
| // allow the existing patterns for s64 to fire for p0, we just try to bitcast |
| // the value to use s64 types. |
| |
| // Custom legalization requires the instruction, if not deleted, must be fully |
| // legalized. In order to allow further legalization of the inst, we create |
| // a new instruction and erase the existing one. |
| |
| Register ValReg = MI.getOperand(0).getReg(); |
| const LLT ValTy = MRI.getType(ValReg); |
| |
| if (ValTy == LLT::scalar(128)) { |
| assert((*MI.memoperands_begin())->getSuccessOrdering() == |
| AtomicOrdering::Monotonic || |
| (*MI.memoperands_begin())->getSuccessOrdering() == |
| AtomicOrdering::Unordered); |
| assert(ST->hasLSE2() && "ldp/stp not single copy atomic without +lse2"); |
| LLT s64 = LLT::scalar(64); |
| MachineInstrBuilder NewI; |
| if (MI.getOpcode() == TargetOpcode::G_LOAD) { |
| NewI = MIRBuilder.buildInstr(AArch64::LDPXi, {s64, s64}, {}); |
| MIRBuilder.buildMergeLikeInstr( |
| ValReg, {NewI->getOperand(0), NewI->getOperand(1)}); |
| } else { |
| auto Split = MIRBuilder.buildUnmerge(s64, MI.getOperand(0)); |
| NewI = MIRBuilder.buildInstr( |
| AArch64::STPXi, {}, {Split->getOperand(0), Split->getOperand(1)}); |
| } |
| Register Base; |
| int Offset; |
| matchLDPSTPAddrMode(MI.getOperand(1).getReg(), Base, Offset, MRI); |
| NewI.addUse(Base); |
| NewI.addImm(Offset / 8); |
| |
| NewI.cloneMemRefs(MI); |
| constrainSelectedInstRegOperands(*NewI, *ST->getInstrInfo(), |
| *MRI.getTargetRegisterInfo(), |
| *ST->getRegBankInfo()); |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| if (!ValTy.isVector() || !ValTy.getElementType().isPointer() || |
| ValTy.getElementType().getAddressSpace() != 0) { |
| LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store"); |
| return false; |
| } |
| |
| unsigned PtrSize = ValTy.getElementType().getSizeInBits(); |
| const LLT NewTy = LLT::vector(ValTy.getElementCount(), PtrSize); |
| auto &MMO = **MI.memoperands_begin(); |
| MMO.setType(NewTy); |
| |
| if (MI.getOpcode() == TargetOpcode::G_STORE) { |
| auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg); |
| MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1), MMO); |
| } else { |
| auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO); |
| MIRBuilder.buildBitcast(ValReg, NewLoad); |
| } |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI, |
| MachineRegisterInfo &MRI, |
| MachineIRBuilder &MIRBuilder) const { |
| MachineFunction &MF = MIRBuilder.getMF(); |
| Align Alignment(MI.getOperand(2).getImm()); |
| Register Dst = MI.getOperand(0).getReg(); |
| Register ListPtr = MI.getOperand(1).getReg(); |
| |
| LLT PtrTy = MRI.getType(ListPtr); |
| LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits()); |
| |
| const unsigned PtrSize = PtrTy.getSizeInBits() / 8; |
| const Align PtrAlign = Align(PtrSize); |
| auto List = MIRBuilder.buildLoad( |
| PtrTy, ListPtr, |
| *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad, |
| PtrTy, PtrAlign)); |
| |
| MachineInstrBuilder DstPtr; |
| if (Alignment > PtrAlign) { |
| // Realign the list to the actual required alignment. |
| auto AlignMinus1 = |
| MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1); |
| auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0)); |
| DstPtr = MIRBuilder.buildMaskLowPtrBits(PtrTy, ListTmp, Log2(Alignment)); |
| } else |
| DstPtr = List; |
| |
| LLT ValTy = MRI.getType(Dst); |
| uint64_t ValSize = ValTy.getSizeInBits() / 8; |
| MIRBuilder.buildLoad( |
| Dst, DstPtr, |
| *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad, |
| ValTy, std::max(Alignment, PtrAlign))); |
| |
| auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrAlign)); |
| |
| auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0)); |
| |
| MIRBuilder.buildStore(NewList, ListPtr, |
| *MF.getMachineMemOperand(MachinePointerInfo(), |
| MachineMemOperand::MOStore, |
| PtrTy, PtrAlign)); |
| |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeBitfieldExtract( |
| MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { |
| // Only legal if we can select immediate forms. |
| // TODO: Lower this otherwise. |
| return getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) && |
| getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); |
| } |
| |
| bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI, |
| MachineRegisterInfo &MRI, |
| LegalizerHelper &Helper) const { |
| // When there is no integer popcount instruction (FEAT_CSSC isn't available), |
| // it can be more efficiently lowered to the following sequence that uses |
| // AdvSIMD registers/instructions as long as the copies to/from the AdvSIMD |
| // registers are cheap. |
| // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd |
| // CNT V0.8B, V0.8B // 8xbyte pop-counts |
| // ADDV B0, V0.8B // sum 8xbyte pop-counts |
| // UMOV X0, V0.B[0] // copy byte result back to integer reg |
| // |
| // For 128 bit vector popcounts, we lower to the following sequence: |
| // cnt.16b v0, v0 // v8s16, v4s32, v2s64 |
| // uaddlp.8h v0, v0 // v8s16, v4s32, v2s64 |
| // uaddlp.4s v0, v0 // v4s32, v2s64 |
| // uaddlp.2d v0, v0 // v2s64 |
| // |
| // For 64 bit vector popcounts, we lower to the following sequence: |
| // cnt.8b v0, v0 // v4s16, v2s32 |
| // uaddlp.4h v0, v0 // v4s16, v2s32 |
| // uaddlp.2s v0, v0 // v2s32 |
| |
| MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; |
| Register Dst = MI.getOperand(0).getReg(); |
| Register Val = MI.getOperand(1).getReg(); |
| LLT Ty = MRI.getType(Val); |
| unsigned Size = Ty.getSizeInBits(); |
| |
| assert(Ty == MRI.getType(Dst) && |
| "Expected src and dst to have the same type!"); |
| |
| if (ST->hasCSSC() && Ty.isScalar() && Size == 128) { |
| LLT s64 = LLT::scalar(64); |
| |
| auto Split = MIRBuilder.buildUnmerge(s64, Val); |
| auto CTPOP1 = MIRBuilder.buildCTPOP(s64, Split->getOperand(0)); |
| auto CTPOP2 = MIRBuilder.buildCTPOP(s64, Split->getOperand(1)); |
| auto Add = MIRBuilder.buildAdd(s64, CTPOP1, CTPOP2); |
| |
| MIRBuilder.buildZExt(Dst, Add); |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| if (!ST->hasNEON() || |
| MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) { |
| // Use generic lowering when custom lowering is not possible. |
| return Ty.isScalar() && (Size == 32 || Size == 64) && |
| Helper.lowerBitCount(MI) == |
| LegalizerHelper::LegalizeResult::Legalized; |
| } |
| |
| // Pre-conditioning: widen Val up to the nearest vector type. |
| // s32,s64,v4s16,v2s32 -> v8i8 |
| // v8s16,v4s32,v2s64 -> v16i8 |
| LLT VTy = Size == 128 ? LLT::fixed_vector(16, 8) : LLT::fixed_vector(8, 8); |
| if (Ty.isScalar()) { |
| assert((Size == 32 || Size == 64 || Size == 128) && "Expected only 32, 64, or 128 bit scalars!"); |
| if (Size == 32) { |
| Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0); |
| } |
| } |
| Val = MIRBuilder.buildBitcast(VTy, Val).getReg(0); |
| |
| // Count bits in each byte-sized lane. |
| auto CTPOP = MIRBuilder.buildCTPOP(VTy, Val); |
| |
| // Sum across lanes. |
| Register HSum = CTPOP.getReg(0); |
| unsigned Opc; |
| SmallVector<LLT> HAddTys; |
| if (Ty.isScalar()) { |
| Opc = Intrinsic::aarch64_neon_uaddlv; |
| HAddTys.push_back(LLT::scalar(32)); |
| } else if (Ty == LLT::fixed_vector(8, 16)) { |
| Opc = Intrinsic::aarch64_neon_uaddlp; |
| HAddTys.push_back(LLT::fixed_vector(8, 16)); |
| } else if (Ty == LLT::fixed_vector(4, 32)) { |
| Opc = Intrinsic::aarch64_neon_uaddlp; |
| HAddTys.push_back(LLT::fixed_vector(8, 16)); |
| HAddTys.push_back(LLT::fixed_vector(4, 32)); |
| } else if (Ty == LLT::fixed_vector(2, 64)) { |
| Opc = Intrinsic::aarch64_neon_uaddlp; |
| HAddTys.push_back(LLT::fixed_vector(8, 16)); |
| HAddTys.push_back(LLT::fixed_vector(4, 32)); |
| HAddTys.push_back(LLT::fixed_vector(2, 64)); |
| } else if (Ty == LLT::fixed_vector(4, 16)) { |
| Opc = Intrinsic::aarch64_neon_uaddlp; |
| HAddTys.push_back(LLT::fixed_vector(4, 16)); |
| } else if (Ty == LLT::fixed_vector(2, 32)) { |
| Opc = Intrinsic::aarch64_neon_uaddlp; |
| HAddTys.push_back(LLT::fixed_vector(4, 16)); |
| HAddTys.push_back(LLT::fixed_vector(2, 32)); |
| } else |
| llvm_unreachable("unexpected vector shape"); |
| MachineInstrBuilder UADD; |
| for (LLT HTy : HAddTys) { |
| UADD = MIRBuilder.buildIntrinsic(Opc, {HTy}, /*HasSideEffects =*/false) |
| .addUse(HSum); |
| HSum = UADD.getReg(0); |
| } |
| |
| // Post-conditioning. |
| if (Ty.isScalar() && (Size == 64 || Size == 128)) |
| MIRBuilder.buildZExt(Dst, UADD); |
| else |
| UADD->getOperand(0).setReg(Dst); |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128( |
| MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { |
| MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; |
| LLT s64 = LLT::scalar(64); |
| auto Addr = MI.getOperand(1).getReg(); |
| auto DesiredI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(2)); |
| auto NewI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(3)); |
| auto DstLo = MRI.createGenericVirtualRegister(s64); |
| auto DstHi = MRI.createGenericVirtualRegister(s64); |
| |
| MachineInstrBuilder CAS; |
| if (ST->hasLSE()) { |
| // We have 128-bit CASP instructions taking XSeqPair registers, which are |
| // s128. We need the merge/unmerge to bracket the expansion and pair up with |
| // the rest of the MIR so we must reassemble the extracted registers into a |
| // 128-bit known-regclass one with code like this: |
| // |
| // %in1 = REG_SEQUENCE Lo, Hi ; One for each input |
| // %out = CASP %in1, ... |
| // %OldLo = G_EXTRACT %out, 0 |
| // %OldHi = G_EXTRACT %out, 64 |
| auto Ordering = (*MI.memoperands_begin())->getMergedOrdering(); |
| unsigned Opcode; |
| switch (Ordering) { |
| case AtomicOrdering::Acquire: |
| Opcode = AArch64::CASPAX; |
| break; |
| case AtomicOrdering::Release: |
| Opcode = AArch64::CASPLX; |
| break; |
| case AtomicOrdering::AcquireRelease: |
| case AtomicOrdering::SequentiallyConsistent: |
| Opcode = AArch64::CASPALX; |
| break; |
| default: |
| Opcode = AArch64::CASPX; |
| break; |
| } |
| |
| LLT s128 = LLT::scalar(128); |
| auto CASDst = MRI.createGenericVirtualRegister(s128); |
| auto CASDesired = MRI.createGenericVirtualRegister(s128); |
| auto CASNew = MRI.createGenericVirtualRegister(s128); |
| MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {}) |
| .addUse(DesiredI->getOperand(0).getReg()) |
| .addImm(AArch64::sube64) |
| .addUse(DesiredI->getOperand(1).getReg()) |
| .addImm(AArch64::subo64); |
| MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {}) |
| .addUse(NewI->getOperand(0).getReg()) |
| .addImm(AArch64::sube64) |
| .addUse(NewI->getOperand(1).getReg()) |
| .addImm(AArch64::subo64); |
| |
| CAS = MIRBuilder.buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr}); |
| |
| MIRBuilder.buildExtract({DstLo}, {CASDst}, 0); |
| MIRBuilder.buildExtract({DstHi}, {CASDst}, 64); |
| } else { |
| // The -O0 CMP_SWAP_128 is friendlier to generate code for because LDXP/STXP |
| // can take arbitrary registers so it just has the normal GPR64 operands the |
| // rest of AArch64 is expecting. |
| auto Ordering = (*MI.memoperands_begin())->getMergedOrdering(); |
| unsigned Opcode; |
| switch (Ordering) { |
| case AtomicOrdering::Acquire: |
| Opcode = AArch64::CMP_SWAP_128_ACQUIRE; |
| break; |
| case AtomicOrdering::Release: |
| Opcode = AArch64::CMP_SWAP_128_RELEASE; |
| break; |
| case AtomicOrdering::AcquireRelease: |
| case AtomicOrdering::SequentiallyConsistent: |
| Opcode = AArch64::CMP_SWAP_128; |
| break; |
| default: |
| Opcode = AArch64::CMP_SWAP_128_MONOTONIC; |
| break; |
| } |
| |
| auto Scratch = MRI.createVirtualRegister(&AArch64::GPR64RegClass); |
| CAS = MIRBuilder.buildInstr(Opcode, {DstLo, DstHi, Scratch}, |
| {Addr, DesiredI->getOperand(0), |
| DesiredI->getOperand(1), NewI->getOperand(0), |
| NewI->getOperand(1)}); |
| } |
| |
| CAS.cloneMemRefs(MI); |
| constrainSelectedInstRegOperands(*CAS, *ST->getInstrInfo(), |
| *MRI.getTargetRegisterInfo(), |
| *ST->getRegBankInfo()); |
| |
| MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {DstLo, DstHi}); |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeCTTZ(MachineInstr &MI, |
| LegalizerHelper &Helper) const { |
| MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; |
| MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); |
| LLT Ty = MRI.getType(MI.getOperand(1).getReg()); |
| auto BitReverse = MIRBuilder.buildBitReverse(Ty, MI.getOperand(1)); |
| MIRBuilder.buildCTLZ(MI.getOperand(0).getReg(), BitReverse); |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeMemOps(MachineInstr &MI, |
| LegalizerHelper &Helper) const { |
| MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; |
| |
| // Tagged version MOPSMemorySetTagged is legalised in legalizeIntrinsic |
| if (MI.getOpcode() == TargetOpcode::G_MEMSET) { |
| // Zext the value operand to 64 bit |
| auto &Value = MI.getOperand(1); |
| Register ZExtValueReg = |
| MIRBuilder.buildAnyExt(LLT::scalar(64), Value).getReg(0); |
| Value.setReg(ZExtValueReg); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| bool AArch64LegalizerInfo::legalizeFCopySign(MachineInstr &MI, |
| LegalizerHelper &Helper) const { |
| MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; |
| MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); |
| Register Dst = MI.getOperand(0).getReg(); |
| LLT DstTy = MRI.getType(Dst); |
| assert(DstTy.isScalar() && "Only expected scalars right now!"); |
| const unsigned DstSize = DstTy.getSizeInBits(); |
| assert((DstSize == 32 || DstSize == 64) && "Unexpected dst type!"); |
| assert(MRI.getType(MI.getOperand(2).getReg()) == DstTy && |
| "Expected homogeneous types!"); |
| |
| // We want to materialize a mask with the high bit set. |
| uint64_t EltMask; |
| LLT VecTy; |
| |
| // TODO: s16 support. |
| switch (DstSize) { |
| default: |
| llvm_unreachable("Unexpected type for G_FCOPYSIGN!"); |
| case 64: { |
| // AdvSIMD immediate moves cannot materialize out mask in a single |
| // instruction for 64-bit elements. Instead, materialize zero and then |
| // negate it. |
| EltMask = 0; |
| VecTy = LLT::fixed_vector(2, DstTy); |
| break; |
| } |
| case 32: |
| EltMask = 0x80000000ULL; |
| VecTy = LLT::fixed_vector(4, DstTy); |
| break; |
| } |
| |
| // Widen In1 and In2 to 128 bits. We want these to eventually become |
| // INSERT_SUBREGs. |
| auto Undef = MIRBuilder.buildUndef(VecTy); |
| auto Zero = MIRBuilder.buildConstant(DstTy, 0); |
| auto Ins1 = MIRBuilder.buildInsertVectorElement( |
| VecTy, Undef, MI.getOperand(1).getReg(), Zero); |
| auto Ins2 = MIRBuilder.buildInsertVectorElement( |
| VecTy, Undef, MI.getOperand(2).getReg(), Zero); |
| |
| // Construct the mask. |
| auto Mask = MIRBuilder.buildConstant(VecTy, EltMask); |
| if (DstSize == 64) |
| Mask = MIRBuilder.buildFNeg(VecTy, Mask); |
| |
| auto Sel = MIRBuilder.buildInstr(AArch64::G_BIT, {VecTy}, {Ins1, Ins2, Mask}); |
| |
| // Build an unmerge whose 0th elt is the original G_FCOPYSIGN destination. We |
| // want this to eventually become an EXTRACT_SUBREG. |
| SmallVector<Register, 2> DstRegs(1, Dst); |
| for (unsigned I = 1, E = VecTy.getNumElements(); I < E; ++I) |
| DstRegs.push_back(MRI.createGenericVirtualRegister(DstTy)); |
| MIRBuilder.buildUnmerge(DstRegs, Sel); |
| MI.eraseFromParent(); |
| return true; |
| } |