Subzero. Enables (most) crosstests for ARM32.
This patch enables many crosstests for ARM32. Very limited vector
support is implemented (essentially, whatever it takes to compile the
.ll files contain vector operations.)
Atomics as well as vector crosstests are still disabled.
BUG= https://code.google.com/p/nativeclient/issues/detail?id=4076
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/1359193003 .
diff --git a/src/IceInstARM32.cpp b/src/IceInstARM32.cpp
index aff4ef3..99584dc 100644
--- a/src/IceInstARM32.cpp
+++ b/src/IceInstARM32.cpp
@@ -33,7 +33,7 @@
int8_t SExtAddrOffsetBits;
int8_t ZExtAddrOffsetBits;
} TypeARM32Attributes[] = {
-#define X(tag, elementty, int_width, vec_width, sbits, ubits) \
+#define X(tag, elementty, int_width, vec_width, sbits, ubits, rraddr) \
{ int_width, vec_width, sbits, ubits } \
,
ICETYPEARM32_TABLE
@@ -211,8 +211,6 @@
return Offset == 0;
// Note that encodings for offsets are sign-magnitude for ARM, so we check
// with IsAbsoluteUint().
- if (isScalarFloatingType(Ty))
- return Utils::IsAligned(Offset, 4) && Utils::IsAbsoluteUint(Bits, Offset);
return Utils::IsAbsoluteUint(Bits, Offset);
}
@@ -392,6 +390,11 @@
InstARM32Vmrs::InstARM32Vmrs(Cfg *Func, CondARM32::Cond Predicate)
: InstARM32Pred(Func, InstARM32::Vmrs, 0, nullptr, Predicate) {}
+InstARM32Vabs::InstARM32Vabs(Cfg *Func, Variable *Dest, Variable *Src,
+ CondARM32::Cond Predicate)
+ : InstARM32Pred(Func, InstARM32::Vabs, 1, Dest, Predicate) {
+ addSource(Src);
+}
// ======================== Dump routines ======================== //
// Two-addr ops
@@ -408,9 +411,6 @@
template <> const char *InstARM32Vsqrt::Opcode = "vsqrt";
// Mov-like ops
template <> const char *InstARM32Ldr::Opcode = "ldr";
-template <> const char *InstARM32Mov::Opcode = "mov";
-// FP
-template <> const char *InstARM32Vldr::Opcode = "vldr";
// Three-addr ops
template <> const char *InstARM32Adc::Opcode = "adc";
template <> const char *InstARM32Add::Opcode = "add";
@@ -447,113 +447,56 @@
Inst::dump(Func);
}
-template <> void InstARM32Mov::emit(const Cfg *Func) const {
+void InstARM32Mov::emitMultiDestSingleSource(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
Ostream &Str = Func->getContext()->getStrEmit();
- assert(getSrcSize() == 1);
+ auto *Dest = llvm::cast<Variable64On32>(getDest());
+ Operand *Src = getSrc(0);
+
+ assert(Dest->getType() == IceType_i64);
+ assert(Dest->getHi()->hasReg());
+ assert(Dest->getLo()->hasReg());
+ assert(!llvm::isa<OperandARM32Mem>(Src));
+
+ Str << "\t"
+ << "vmov" << getPredicate() << "\t";
+ Dest->getLo()->emit(Func);
+ Str << ", ";
+ Dest->getHi()->emit(Func);
+ Str << ", ";
+ Src->emit(Func);
+}
+
+void InstARM32Mov::emitSingleDestMultiSource(const Cfg *Func) const {
+ if (!BuildDefs::dump())
+ return;
+ Ostream &Str = Func->getContext()->getStrEmit();
Variable *Dest = getDest();
- if (Dest->hasReg()) {
- IceString ActualOpcode = Opcode;
- Operand *Src0 = getSrc(0);
- if (const auto *Src0V = llvm::dyn_cast<Variable>(Src0)) {
- if (!Src0V->hasReg()) {
- // Always use the whole stack slot. A 32-bit load has a larger range of
- // offsets than 16-bit, etc.
- ActualOpcode = IceString("ldr");
- }
- } else {
- if (llvm::isa<OperandARM32Mem>(Src0))
- ActualOpcode = IceString("ldr") + getWidthString(Dest->getType());
- }
- Str << "\t" << ActualOpcode << getPredicate() << "\t";
- getDest()->emit(Func);
- Str << ", ";
- getSrc(0)->emit(Func);
- } else {
- Variable *Src0 = llvm::cast<Variable>(getSrc(0));
- assert(Src0->hasReg());
- Str << "\t"
- << "str" << getPredicate() << "\t";
- Src0->emit(Func);
- Str << ", ";
- Dest->emit(Func);
- }
-}
+ auto *Src = llvm::cast<Variable64On32>(getSrc(0));
-template <> void InstARM32Mov::emitIAS(const Cfg *Func) const {
- assert(getSrcSize() == 1);
- (void)Func;
- llvm_unreachable("Not yet implemented");
-}
-
-template <> void InstARM32Vldr::emit(const Cfg *Func) const {
- if (!BuildDefs::dump())
- return;
- Ostream &Str = Func->getContext()->getStrEmit();
- assert(getSrcSize() == 1);
- assert(getDest()->hasReg());
- Str << "\t" << Opcode << getPredicate() << "\t";
- getDest()->emit(Func);
- Str << ", ";
- getSrc(0)->emit(Func);
-}
-
-template <> void InstARM32Vldr::emitIAS(const Cfg *Func) const {
- assert(getSrcSize() == 1);
- (void)Func;
- llvm_unreachable("Not yet implemented");
-}
-
-void InstARM32Vmov::emitMultiDestSingleSource(const Cfg *Func) const {
- if (!BuildDefs::dump())
- return;
- Ostream &Str = Func->getContext()->getStrEmit();
- Variable *Dest0 = getDest();
- Operand *Src0 = getSrc(0);
-
- assert(Dest0->hasReg());
- assert(Dest1->hasReg());
- assert(!llvm::isa<OperandARM32Mem>(Src0));
+ assert(Src->getType() == IceType_i64);
+ assert(Src->getHi()->hasReg());
+ assert(Src->getLo()->hasReg());
+ assert(Dest->hasReg());
Str << "\t"
<< "vmov" << getPredicate() << "\t";
- Dest0->emit(Func);
+ Dest->emit(Func);
Str << ", ";
- Dest1->emit(Func);
+ Src->getLo()->emit(Func);
Str << ", ";
- Src0->emit(Func);
-}
-
-void InstARM32Vmov::emitSingleDestMultiSource(const Cfg *Func) const {
- if (!BuildDefs::dump())
- return;
- Ostream &Str = Func->getContext()->getStrEmit();
- Variable *Dest0 = getDest();
- Operand *Src0 = getSrc(0);
- Operand *Src1 = getSrc(1);
-
- assert(Dest0->hasReg());
- assert(!llvm::isa<OperandARM32Mem>(Src0));
- assert(!llvm::isa<OperandARM32Mem>(Src1));
-
- Str << "\t"
- << "vmov" << getPredicate() << "\t";
- Dest0->emit(Func);
- Str << ", ";
- Src0->emit(Func);
- Str << ", ";
- Src1->emit(Func);
+ Src->getHi()->emit(Func);
}
namespace {
+
bool isVariableWithoutRegister(const Operand *Op) {
if (const auto *OpV = llvm::dyn_cast<const Variable>(Op)) {
return !OpV->hasReg();
}
return false;
}
-
bool isMemoryAccess(Operand *Op) {
return isVariableWithoutRegister(Op) || llvm::isa<OperandARM32Mem>(Op);
}
@@ -561,27 +504,38 @@
bool isMoveBetweenCoreAndVFPRegisters(Variable *Dest, Operand *Src) {
const Type DestTy = Dest->getType();
const Type SrcTy = Src->getType();
- assert(!(isScalarIntegerType(DestTy) && isScalarIntegerType(SrcTy)) &&
- "At most one of vmov's operands can be a core register.");
- return isScalarIntegerType(DestTy) || isScalarIntegerType(SrcTy);
+ return !isVectorType(DestTy) && !isVectorType(SrcTy) &&
+ (isScalarIntegerType(DestTy) == isScalarFloatingType(SrcTy));
}
+
} // end of anonymous namespace
-void InstARM32Vmov::emitSingleDestSingleSource(const Cfg *Func) const {
+void InstARM32Mov::emitSingleDestSingleSource(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
Ostream &Str = Func->getContext()->getStrEmit();
Variable *Dest = getDest();
+
if (Dest->hasReg()) {
+ Type DestTy = Dest->getType();
Operand *Src0 = getSrc(0);
- const char *ActualOpcode = isMemoryAccess(Src0) ? "vldr" : "vmov";
+ const bool DestIsVector = isVectorType(DestTy);
+ const bool DestIsScalarFP = isScalarFloatingType(Dest->getType());
+ const bool CoreVFPMove = isMoveBetweenCoreAndVFPRegisters(Dest, Src0);
+ const char *LoadOpcode =
+ DestIsVector ? "vld1" : (DestIsScalarFP ? "vldr" : "ldr");
+ const char *RegMovOpcode =
+ (DestIsVector || DestIsScalarFP || CoreVFPMove) ? "vmov" : "mov";
+ const char *ActualOpcode = isMemoryAccess(Src0) ? LoadOpcode : RegMovOpcode;
// when vmov{c}'ing, we need to emit a width string. Otherwise, the
// assembler might be tempted to assume we want a vector vmov{c}, and that
// is disallowed because ARM.
+ const char *NoWidthString = "";
const char *WidthString =
- (isMemoryAccess(Src0) || isMoveBetweenCoreAndVFPRegisters(Dest, Src0))
- ? ""
- : getVecWidthString(Src0->getType());
+ isMemoryAccess(Src0)
+ ? (DestIsVector ? ".64" : NoWidthString)
+ : (!CoreVFPMove ? getVecWidthString(DestTy) : NoWidthString);
+
Str << "\t" << ActualOpcode << getPredicate() << WidthString << "\t";
Dest->emit(Func);
Str << ", ";
@@ -589,18 +543,24 @@
} else {
Variable *Src0 = llvm::cast<Variable>(getSrc(0));
assert(Src0->hasReg());
- Str << "\t"
- "vstr" << getPredicate() << "\t";
+ const char *ActualOpcode =
+ isVectorType(Src0->getType())
+ ? "vst1"
+ : (isScalarFloatingType(Src0->getType()) ? "vstr" : "str");
+ const char *NoWidthString = "";
+ const char *WidthString =
+ isVectorType(Src0->getType()) ? ".64" : NoWidthString;
+ Str << "\t" << ActualOpcode << getPredicate() << WidthString << "\t";
Src0->emit(Func);
Str << ", ";
Dest->emit(Func);
}
}
-void InstARM32Vmov::emit(const Cfg *Func) const {
+void InstARM32Mov::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
- assert(isMultiDest() + isMultiSource() <= 1 && "Invalid vmov type.");
+ assert(!(isMultiDest() && isMultiSource()) && "Invalid vmov type.");
if (isMultiDest()) {
emitMultiDestSingleSource(Func);
return;
@@ -614,21 +574,37 @@
emitSingleDestSingleSource(Func);
}
-void InstARM32Vmov::emitIAS(const Cfg *Func) const {
+void InstARM32Mov::emitIAS(const Cfg *Func) const {
assert(getSrcSize() == 1);
(void)Func;
llvm_unreachable("Not yet implemented");
}
-void InstARM32Vmov::dump(const Cfg *Func) const {
+void InstARM32Mov::dump(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
+ assert(getSrcSize() == 1);
Ostream &Str = Func->getContext()->getStrDump();
- dumpOpcodePred(Str, "vmov", getDest()->getType());
+ Variable *Dest = getDest();
+ if (auto *Dest64 = llvm::dyn_cast<Variable64On32>(Dest)) {
+ Dest64->getLo()->dump(Func);
+ Str << ", ";
+ Dest64->getHi()->dump(Func);
+ } else {
+ Dest->dump(Func);
+ }
+
+ dumpOpcodePred(Str, " = mov", getDest()->getType());
Str << " ";
- dumpDest(Func);
- Str << ", ";
- dumpSources(Func);
+
+ Operand *Src = getSrc(0);
+ if (auto *Src64 = llvm::dyn_cast<Variable64On32>(Src)) {
+ Src64->getLo()->dump(Func);
+ Str << ", ";
+ Src64->getHi()->dump(Func);
+ } else {
+ Src->dump(Func);
+ }
}
void InstARM32Br::emit(const Cfg *Func) const {
@@ -748,8 +724,16 @@
Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 1);
assert(getDest()->hasReg());
- Type Ty = getSrc(0)->getType();
- Str << "\t" << Opcode << getWidthString(Ty) << getPredicate() << "\t";
+ Variable *Dest = getDest();
+ Type DestTy = Dest->getType();
+ const bool DestIsVector = isVectorType(DestTy);
+ const bool DestIsScalarFloat = isScalarFloatingType(DestTy);
+ const char *ActualOpcode =
+ DestIsVector ? "vld1" : (DestIsScalarFloat ? "vldr" : "ldr");
+ const char *VectorMarker = DestIsVector ? ".64" : "";
+ const char *WidthString = DestIsVector ? "" : getWidthString(DestTy);
+ Str << "\t" << ActualOpcode << WidthString << getPredicate() << VectorMarker
+ << "\t";
getDest()->emit(Func);
Str << ", ";
getSrc(0)->emit(Func);
@@ -799,15 +783,28 @@
void InstARM32Pop::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
- assert(Dests.size() > 0);
+ SizeT IntegerCount = 0;
+ for (const Operand *Op : Dests) {
+ if (isScalarIntegerType(Op->getType())) {
+ ++IntegerCount;
+ }
+ }
Ostream &Str = Func->getContext()->getStrEmit();
+ if (IntegerCount == 0) {
+ Str << "\t@ empty pop";
+ return;
+ }
Str << "\t"
<< "pop"
<< "\t{";
- for (SizeT I = 0; I < Dests.size(); ++I) {
- if (I > 0)
- Str << ", ";
- Dests[I]->emit(Func);
+ bool PrintComma = false;
+ for (const Operand *Op : Dests) {
+ if (isScalarIntegerType(Op->getType())) {
+ if (PrintComma)
+ Str << ", ";
+ Op->emit(Func);
+ PrintComma = true;
+ }
}
Str << "}";
}
@@ -866,12 +863,31 @@
void InstARM32Push::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
- assert(getSrcSize() > 0);
+ SizeT IntegerCount = 0;
+ for (SizeT i = 0; i < getSrcSize(); ++i) {
+ if (isScalarIntegerType(getSrc(i)->getType())) {
+ ++IntegerCount;
+ }
+ }
Ostream &Str = Func->getContext()->getStrEmit();
+ if (IntegerCount == 0) {
+ Str << "\t"
+ << "@empty push";
+ return;
+ }
Str << "\t"
<< "push"
<< "\t{";
- emitSources(Func);
+ bool PrintComma = false;
+ for (SizeT i = 0; i < getSrcSize(); ++i) {
+ Operand *Op = getSrc(i);
+ if (isScalarIntegerType(Op->getType())) {
+ if (PrintComma)
+ Str << ", ";
+ Op->emit(Func);
+ PrintComma = true;
+ }
+ }
Str << "}";
}
@@ -923,8 +939,12 @@
Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 2);
Type Ty = getSrc(0)->getType();
- const char *Opcode = isScalarFloatingType(Ty) ? "vstr" : "str";
- Str << "\t" << Opcode << getWidthString(Ty) << getPredicate() << "\t";
+ const bool IsVectorStore = isVectorType(Ty);
+ const char *Opcode =
+ IsVectorStore ? "vst1" : (isScalarFloatingType(Ty) ? "vstr" : "str");
+ const char *VecEltWidthString = IsVectorStore ? ".64" : "";
+ Str << "\t" << Opcode << getWidthString(Ty) << getPredicate()
+ << VecEltWidthString << "\t";
getSrc(0)->emit(Func);
Str << ", ";
getSrc(1)->emit(Func);
@@ -1119,6 +1139,33 @@
"FPSCR{n,z,c,v}";
}
+void InstARM32Vabs::emit(const Cfg *Func) const {
+ if (!BuildDefs::dump())
+ return;
+ Ostream &Str = Func->getContext()->getStrEmit();
+ assert(getSrcSize() == 1);
+ Str << "\t"
+ "vabs" << getPredicate() << getVecWidthString(getSrc(0)->getType())
+ << "\t";
+ getDest()->emit(Func);
+ Str << ", ";
+ getSrc(0)->emit(Func);
+}
+
+void InstARM32Vabs::emitIAS(const Cfg *Func) const {
+ assert(getSrcSize() == 1);
+ (void)Func;
+ llvm_unreachable("Not yet implemented");
+}
+
+void InstARM32Vabs::dump(const Cfg *Func) const {
+ if (!BuildDefs::dump())
+ return;
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = vabs" << getPredicate() << getVecWidthString(getSrc(0)->getType());
+}
+
void OperandARM32Mem::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
@@ -1128,13 +1175,13 @@
switch (getAddrMode()) {
case PostIndex:
case NegPostIndex:
- Str << "], ";
+ Str << "]";
break;
default:
- Str << ", ";
break;
}
if (isRegReg()) {
+ Str << ", ";
if (isNegAddrMode()) {
Str << "-";
}
@@ -1144,7 +1191,11 @@
<< getShiftAmt();
}
} else {
- getOffset()->emit(Func);
+ ConstantInteger32 *Offset = getOffset();
+ if (Offset && Offset->getValue() != 0) {
+ Str << ", ";
+ Offset->emit(Func);
+ }
}
switch (getAddrMode()) {
case Offset:
diff --git a/src/IceInstARM32.def b/src/IceInstARM32.def
index 4e34cbf..3fb6898 100644
--- a/src/IceInstARM32.def
+++ b/src/IceInstARM32.def
@@ -350,23 +350,24 @@
// the # of offset bits allowed as part of an addressing mode (for sign or zero
// extending load/stores).
#define ICETYPEARM32_TABLE \
- /* tag, element type, int_width, vec_width, addr bits sext, zext */ \
- X(IceType_void, IceType_void, "" , "" , 0 , 0) \
- X(IceType_i1, IceType_void, "b", "" , 8 , 12) \
- X(IceType_i8, IceType_void, "b", "" , 8 , 12) \
- X(IceType_i16, IceType_void, "h", "" , 8 , 8) \
- X(IceType_i32, IceType_void, "" , "" , 12, 12) \
- X(IceType_i64, IceType_void, "d", "" , 8 , 8) \
- X(IceType_f32, IceType_void, "" , ".f32", 10, 10) \
- X(IceType_f64, IceType_void, "" , ".f64", 10, 10) \
- X(IceType_v4i1, IceType_i32 , "" , ".i32", 0 , 0) \
- X(IceType_v8i1, IceType_i16 , "" , ".i16", 0 , 0) \
- X(IceType_v16i1, IceType_i8 , "" , ".i8" , 0 , 0) \
- X(IceType_v16i8, IceType_i8 , "" , ".i8" , 0 , 0) \
- X(IceType_v8i16, IceType_i16 , "" , ".i16", 0 , 0) \
- X(IceType_v4i32, IceType_i32 , "" , ".i32", 0 , 0) \
- X(IceType_v4f32, IceType_f32 , "" , ".f32", 0 , 0)
-//#define X(tag, elementty, int_width, vec_width, sbits, ubits)
+ /* tag, element type, int_width, vec_width, addr bits sext, zext, \
+ reg-reg addr allowed */ \
+ X(IceType_void, IceType_void, "" , "" , 0 , 0 , 0) \
+ X(IceType_i1, IceType_void, "b", "" , 8 , 12, 1) \
+ X(IceType_i8, IceType_void, "b", "" , 8 , 12, 1) \
+ X(IceType_i16, IceType_void, "h", "" , 8 , 8 , 1) \
+ X(IceType_i32, IceType_void, "" , "" , 12, 12, 1) \
+ X(IceType_i64, IceType_void, "d", "" , 8 , 8 , 1) \
+ X(IceType_f32, IceType_void, "" , ".f32", 8, 8 , 0) \
+ X(IceType_f64, IceType_void, "" , ".f64", 8, 8 , 0) \
+ X(IceType_v4i1, IceType_i32 , "" , ".i32", 0 , 0 , 1) \
+ X(IceType_v8i1, IceType_i16 , "" , ".i16", 0 , 0 , 1) \
+ X(IceType_v16i1, IceType_i8 , "" , ".i8" , 0 , 0 , 1) \
+ X(IceType_v16i8, IceType_i8 , "" , ".i8" , 0 , 0 , 1) \
+ X(IceType_v8i16, IceType_i16 , "" , ".i16", 0 , 0 , 1) \
+ X(IceType_v4i32, IceType_i32 , "" , ".i32", 0 , 0 , 1) \
+ X(IceType_v4f32, IceType_f32 , "" , ".f32", 0 , 0 , 1)
+//#define X(tag, elementty, int_width, vec_width, sbits, ubits, rraddr)
// Shifter types for Data-processing operands as defined in section A5.1.2.
#define ICEINSTARM32SHIFT_TABLE \
diff --git a/src/IceInstARM32.h b/src/IceInstARM32.h
index c196d72..c8be52b 100644
--- a/src/IceInstARM32.h
+++ b/src/IceInstARM32.h
@@ -320,12 +320,11 @@
Udiv,
Umull,
Uxt,
+ Vabs,
Vadd,
Vcmp,
Vcvt,
Vdiv,
- Vldr,
- Vmov,
Vmrs,
Vmul,
Vsqrt,
@@ -780,13 +779,6 @@
using InstARM32Vmul = InstARM32ThreeAddrFP<InstARM32::Vmul>;
using InstARM32Vsub = InstARM32ThreeAddrFP<InstARM32::Vsub>;
using InstARM32Ldr = InstARM32Movlike<InstARM32::Ldr>;
-/// Move instruction (variable <- flex). This is more of a pseudo-inst. If var
-/// is a register, then we use "mov". If var is stack, then we use "str" to
-/// store to the stack.
-using InstARM32Mov = InstARM32Movlike<InstARM32::Mov>;
-/// Represents various vector mov instruction forms (simple single source,
-/// single dest forms only, not the 2 GPR <-> 1 D reg forms, etc.).
-using InstARM32Vldr = InstARM32Movlike<InstARM32::Vldr>;
/// MovT leaves the bottom bits alone so dest is also a source. This helps
/// indicate that a previous MovW setting dest is not dead code.
using InstARM32Movt = InstARM32TwoAddrGPR<InstARM32::Movt>;
@@ -1120,90 +1112,47 @@
};
/// Handles (some of) vmov's various formats.
-class InstARM32Vmov final : public InstARM32Pred {
- InstARM32Vmov() = delete;
- InstARM32Vmov(const InstARM32Vmov &) = delete;
- InstARM32Vmov &operator=(const InstARM32Vmov &) = delete;
+class InstARM32Mov final : public InstARM32Pred {
+ InstARM32Mov() = delete;
+ InstARM32Mov(const InstARM32Mov &) = delete;
+ InstARM32Mov &operator=(const InstARM32Mov &) = delete;
public:
- /// RegisterPair is used to group registers in
- ///
- /// vmov D, (R, R)
- ///
- /// and
- ///
- /// vmov (R, R), D
- struct RegisterPair {
- explicit RegisterPair(Variable *V0, Variable *V1) : _0(V0), _1(V1) {
- assert(V0->getType() == IceType_i32);
- assert(V1->getType() == IceType_i32);
- }
- Variable *_0;
- Variable *_1;
- };
-
- static InstARM32Vmov *create(Cfg *Func, Variable *Dest, Operand *Src,
- CondARM32::Cond Predicate) {
- return new (Func->allocate<InstARM32Vmov>())
- InstARM32Vmov(Func, Dest, Src, Predicate);
- }
- static InstARM32Vmov *create(Cfg *Func, const RegisterPair &Dests,
- Variable *Src, CondARM32::Cond Predicate) {
- return new (Func->allocate<InstARM32Vmov>())
- InstARM32Vmov(Func, Dests, Src, Predicate);
- }
- static InstARM32Vmov *create(Cfg *Func, Variable *Dest,
- const RegisterPair &Srcs,
- CondARM32::Cond Predicate) {
- return new (Func->allocate<InstARM32Vmov>())
- InstARM32Vmov(Func, Dest, Srcs, Predicate);
+ static InstARM32Mov *create(Cfg *Func, Variable *Dest, Operand *Src,
+ CondARM32::Cond Predicate) {
+ return new (Func->allocate<InstARM32Mov>())
+ InstARM32Mov(Func, Dest, Src, Predicate);
}
bool isRedundantAssign() const override {
- return Dest1 == nullptr && getSrcSize() == 1 &&
+ return !isMultiDest() && !isMultiSource() &&
checkForRedundantAssign(getDest(), getSrc(0));
}
bool isSimpleAssign() const override { return true; }
void emit(const Cfg *Func) const override;
void emitIAS(const Cfg *Func) const override;
void dump(const Cfg *Func) const override;
- static bool classof(const Inst *Inst) { return isClassof(Inst, Vmov); }
-
-private:
- InstARM32Vmov(Cfg *Func, Variable *Dest, Operand *Src,
- CondARM32::Cond Predicate)
- : InstARM32Pred(Func, InstARM32::Vmov, 1, Dest, Predicate) {
- addSource(Src);
- }
-
- InstARM32Vmov(Cfg *Func, const RegisterPair &Dests, Variable *Src,
- CondARM32::Cond Predicate)
- : InstARM32Pred(Func, InstARM32::Vmov, 1, Dests._0, Predicate),
- Dest1(Dests._1) {
- addSource(Src);
- }
-
- InstARM32Vmov(Cfg *Func, Variable *Dest, const RegisterPair &Srcs,
- CondARM32::Cond Predicate)
- : InstARM32Pred(Func, InstARM32::Vmov, 2, Dest, Predicate) {
- addSource(Srcs._0);
- addSource(Srcs._1);
- }
+ static bool classof(const Inst *Inst) { return isClassof(Inst, Mov); }
bool isMultiDest() const {
assert(getDest() != nullptr);
- return Dest1 != nullptr;
+ return llvm::isa<Variable64On32>(getDest());
}
bool isMultiSource() const {
- assert(getSrcSize() >= 1);
- return getSrcSize() > 1;
+ assert(getSrcSize() == 1);
+ return llvm::isa<Variable64On32>(getSrc(0));
+ }
+
+private:
+ InstARM32Mov(Cfg *Func, Variable *Dest, Operand *Src,
+ CondARM32::Cond Predicate)
+ : InstARM32Pred(Func, InstARM32::Mov, 1, Dest, Predicate) {
+ addSource(Src);
}
void emitMultiDestSingleSource(const Cfg *Func) const;
void emitSingleDestMultiSource(const Cfg *Func) const;
void emitSingleDestSingleSource(const Cfg *Func) const;
-
- Variable *Dest1 = nullptr;
};
class InstARM32Vcmp final : public InstARM32Pred {
@@ -1246,15 +1195,33 @@
InstARM32Vmrs(Cfg *Func, CondARM32::Cond Predicate);
};
+class InstARM32Vabs final : public InstARM32Pred {
+ InstARM32Vabs() = delete;
+ InstARM32Vabs(const InstARM32Vabs &) = delete;
+ InstARM32Vabs &operator=(const InstARM32Vabs &) = delete;
+
+public:
+ static InstARM32Vabs *create(Cfg *Func, Variable *Dest, Variable *Src,
+ CondARM32::Cond Predicate) {
+ return new (Func->allocate<InstARM32Vabs>())
+ InstARM32Vabs(Func, Dest, Src, Predicate);
+ }
+ void emit(const Cfg *Func) const override;
+ void emitIAS(const Cfg *Func) const override;
+ void dump(const Cfg *Func) const override;
+ static bool classof(const Inst *Inst) { return isClassof(Inst, Vabs); }
+
+private:
+ InstARM32Vabs(Cfg *Func, Variable *Dest, Variable *Src,
+ CondARM32::Cond Predicate);
+};
// Declare partial template specializations of emit() methods that already have
// default implementations. Without this, there is the possibility of ODR
// violations and link errors.
template <> void InstARM32Ldr::emit(const Cfg *Func) const;
-template <> void InstARM32Mov::emit(const Cfg *Func) const;
template <> void InstARM32Movw::emit(const Cfg *Func) const;
template <> void InstARM32Movt::emit(const Cfg *Func) const;
-template <> void InstARM32Vldr::emit(const Cfg *Func) const;
} // end of namespace Ice
diff --git a/src/IceTargetLoweringARM32.cpp b/src/IceTargetLoweringARM32.cpp
index d61d93c..f92160b 100644
--- a/src/IceTargetLoweringARM32.cpp
+++ b/src/IceTargetLoweringARM32.cpp
@@ -20,6 +20,7 @@
#include "IceDefs.h"
#include "IceELFObjectWriter.h"
#include "IceGlobalInits.h"
+#include "IceInstARM32.def"
#include "IceInstARM32.h"
#include "IceLiveness.h"
#include "IceOperand.h"
@@ -30,6 +31,7 @@
#include "llvm/Support/MathExtras.h"
#include <algorithm>
+#include <utility>
namespace Ice {
@@ -380,8 +382,21 @@
}
Variable *TargetARM32::getPhysicalRegister(SizeT RegNum, Type Ty) {
- if (Ty == IceType_void)
- Ty = IceType_i32;
+ static const Type DefaultType[] = {
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP32, isFP64, isVec128, alias_init) \
+ (isFP32) \
+ ? IceType_f32 \
+ : ((isFP64) ? IceType_f64 : ((isVec128 ? IceType_v4i32 : IceType_i32))),
+ REGARM32_TABLE
+#undef X
+ };
+
+ assert(RegNum < RegARM32::Reg_NUM);
+ if (Ty == IceType_void) {
+ assert(RegNum < llvm::array_lengthof(DefaultType));
+ Ty = DefaultType[RegNum];
+ }
if (PhysicalRegisters[Ty].empty())
PhysicalRegisters[Ty].resize(RegARM32::Reg_NUM);
assert(RegNum < PhysicalRegisters[Ty].size());
@@ -425,11 +440,17 @@
if (!hasFramePointer())
Offset += getStackAdjustment();
}
- if (!isLegalVariableStackOffset(Offset)) {
+ const Type VarTy = Var->getType();
+ // In general, no Variable64On32 should be emited in textual asm output. It
+ // turns out that some lowering sequences Fake-Def/Fake-Use such a variables.
+ // If they end up being assigned an illegal offset we get a runtime error. We
+ // liberally allow Variable64On32 to have illegal offsets because offsets
+ // don't matter in FakeDefs/FakeUses.
+ if (!llvm::isa<Variable64On32>(Var) &&
+ !isLegalVariableStackOffset(VarTy, Offset)) {
llvm::report_fatal_error("Illegal stack offset");
}
- const Type FrameSPTy = stackSlotType();
- Str << "[" << getRegName(BaseRegNum, FrameSPTy);
+ Str << "[" << getRegName(BaseRegNum, VarTy);
if (Offset != 0) {
Str << ", " << getConstantPrefix() << Offset;
}
@@ -592,17 +613,14 @@
// value from the stack slot.
if (Arg->hasReg()) {
assert(Ty != IceType_i64);
- OperandARM32Mem *Mem = OperandARM32Mem::create(
+ // This should be simple, just load the parameter off the stack using a nice
+ // sp + imm addressing mode. Because ARM, we can't do that (e.g., VLDR, for
+ // fp types, cannot have an index register), so we legalize the memory
+ // operand instead.
+ auto *Mem = OperandARM32Mem::create(
Func, Ty, FramePtr, llvm::cast<ConstantInteger32>(
Ctx->getConstantInt32(Arg->getStackOffset())));
- if (isVectorType(Arg->getType())) {
- // Use vld1.$elem or something?
- UnimplementedError(Func->getContext()->getFlags());
- } else if (isFloatingType(Arg->getType())) {
- _vldr(Arg, Mem);
- } else {
- _ldr(Arg, Mem);
- }
+ legalizeToReg(Mem, Arg->getRegNum());
// This argument-copying instruction uses an explicit OperandARM32Mem
// operand instead of a Variable, so its fill-from-stack operation has to
// be tracked separately for statistics.
@@ -894,16 +912,15 @@
RI->setDeleted();
}
-bool TargetARM32::isLegalVariableStackOffset(int32_t Offset) const {
+bool TargetARM32::isLegalVariableStackOffset(Type Ty, int32_t Offset) const {
constexpr bool SignExt = false;
- // TODO(jvoung): vldr of FP stack slots has a different limit from the plain
- // stackSlotType().
- return OperandARM32Mem::canHoldOffset(stackSlotType(), SignExt, Offset);
+ return OperandARM32Mem::canHoldOffset(Ty, SignExt, Offset);
}
StackVariable *TargetARM32::legalizeVariableSlot(Variable *Var,
+ int32_t StackAdjust,
Variable *OrigBaseReg) {
- int32_t Offset = Var->getStackOffset();
+ int32_t Offset = Var->getStackOffset() + StackAdjust;
// Legalize will likely need a movw/movt combination, but if the top bits are
// all 0 from negating the offset and subtracting, we could use that instead.
bool ShouldSub = (-Offset & 0xFFFF0000) == 0;
@@ -937,7 +954,9 @@
Func->dump("Before legalizeStackSlots");
assert(hasComputedFrame());
// Early exit, if SpillAreaSizeBytes is really small.
- if (isLegalVariableStackOffset(SpillAreaSizeBytes))
+ // TODO(jpp): this is not safe -- loads and stores of q registers can't have
+ // offsets.
+ if (isLegalVariableStackOffset(IceType_v4i32, SpillAreaSizeBytes))
return;
Variable *OrigBaseReg = getPhysicalRegister(getFrameOrStackReg());
int32_t StackAdjust = 0;
@@ -978,64 +997,77 @@
continue;
}
}
+
// For now, only Mov instructions can have stack variables. We need to
// know the type of instruction because we currently create a fresh one
// to replace Dest/Source, rather than mutate in place.
- auto *MovInst = llvm::dyn_cast<InstARM32Mov>(CurInstr);
- if (!MovInst) {
+ bool MayNeedOffsetRewrite = false;
+ if (auto *MovInstr = llvm::dyn_cast<InstARM32Mov>(CurInstr)) {
+ MayNeedOffsetRewrite =
+ !MovInstr->isMultiDest() && !MovInstr->isMultiSource();
+ }
+
+ if (!MayNeedOffsetRewrite) {
continue;
}
+
+ assert(Dest != nullptr);
+ Type DestTy = Dest->getType();
+ assert(DestTy != IceType_i64);
if (!Dest->hasReg()) {
int32_t Offset = Dest->getStackOffset();
Offset += StackAdjust;
- if (!isLegalVariableStackOffset(Offset)) {
+ if (!isLegalVariableStackOffset(DestTy, Offset)) {
if (NewBaseReg) {
int32_t OffsetDiff = Offset - NewBaseOffset;
- if (isLegalVariableStackOffset(OffsetDiff)) {
+ if (isLegalVariableStackOffset(DestTy, OffsetDiff)) {
StackVariable *NewDest =
Func->makeVariable<StackVariable>(stackSlotType());
NewDest->setMustNotHaveReg();
NewDest->setBaseRegNum(NewBaseReg->getBaseRegNum());
NewDest->setStackOffset(OffsetDiff);
Variable *NewDestVar = NewDest;
- _mov(NewDestVar, MovInst->getSrc(0));
- MovInst->setDeleted();
+ _mov(NewDestVar, CurInstr->getSrc(0));
+ CurInstr->setDeleted();
continue;
}
}
- StackVariable *LegalDest = legalizeVariableSlot(Dest, OrigBaseReg);
+ StackVariable *LegalDest =
+ legalizeVariableSlot(Dest, StackAdjust, OrigBaseReg);
assert(LegalDest != Dest);
Variable *LegalDestVar = LegalDest;
- _mov(LegalDestVar, MovInst->getSrc(0));
- MovInst->setDeleted();
+ _mov(LegalDestVar, CurInstr->getSrc(0));
+ CurInstr->setDeleted();
NewBaseReg = LegalDest;
NewBaseOffset = Offset;
continue;
}
}
- assert(MovInst->getSrcSize() == 1);
- Variable *Var = llvm::dyn_cast<Variable>(MovInst->getSrc(0));
+ assert(CurInstr->getSrcSize() == 1);
+ Variable *Var = llvm::dyn_cast<Variable>(CurInstr->getSrc(0));
if (Var && !Var->hasReg()) {
+ Type VarTy = Var->getType();
int32_t Offset = Var->getStackOffset();
Offset += StackAdjust;
- if (!isLegalVariableStackOffset(Offset)) {
+ if (!isLegalVariableStackOffset(VarTy, Offset)) {
if (NewBaseReg) {
int32_t OffsetDiff = Offset - NewBaseOffset;
- if (isLegalVariableStackOffset(OffsetDiff)) {
+ if (isLegalVariableStackOffset(VarTy, OffsetDiff)) {
StackVariable *NewVar =
Func->makeVariable<StackVariable>(stackSlotType());
NewVar->setMustNotHaveReg();
NewVar->setBaseRegNum(NewBaseReg->getBaseRegNum());
NewVar->setStackOffset(OffsetDiff);
_mov(Dest, NewVar);
- MovInst->setDeleted();
+ CurInstr->setDeleted();
continue;
}
}
- StackVariable *LegalVar = legalizeVariableSlot(Var, OrigBaseReg);
+ StackVariable *LegalVar =
+ legalizeVariableSlot(Var, StackAdjust, OrigBaseReg);
assert(LegalVar != Var);
_mov(Dest, LegalVar);
- MovInst->setDeleted();
+ CurInstr->setDeleted();
NewBaseReg = LegalVar;
NewBaseOffset = Offset;
continue;
@@ -1427,6 +1459,20 @@
}
case InstArithmetic::Shl: {
// a=b<<c ==>
+ // pnacl-llc does:
+ // mov t_b.lo, b.lo
+ // mov t_b.hi, b.hi
+ // mov t_c.lo, c.lo
+ // rsb T0, t_c.lo, #32
+ // lsr T1, t_b.lo, T0
+ // orr t_a.hi, T1, t_b.hi, lsl t_c.lo
+ // sub T2, t_c.lo, #32
+ // cmp T2, #0
+ // lslge t_a.hi, t_b.lo, T2
+ // lsl t_a.lo, t_b.lo, t_c.lo
+ // mov a.lo, t_a.lo
+ // mov a.hi, t_a.hi
+ //
// GCC 4.8 does:
// sub t_c1, c.lo, #32
// lsl t_hi, b.hi, c.lo
@@ -1436,78 +1482,88 @@
// lsl t_lo, b.lo, c.lo
// a.lo = t_lo
// a.hi = t_hi
+ //
+ // These are incompatible, therefore we mimic pnacl-llc.
// Can be strength-reduced for constant-shifts, but we don't do that for
// now.
// Given the sub/rsb T_C, C.lo, #32, one of the T_C will be negative. On
// ARM, shifts only take the lower 8 bits of the shift register, and
// saturate to the range 0-32, so the negative value will saturate to 32.
- Variable *T_Hi = makeReg(IceType_i32);
+ Constant *_32 = Ctx->getConstantInt32(32);
+ Constant *_0 = Ctx->getConstantZero(IceType_i32);
Variable *Src1RLo = legalizeToReg(Src1Lo);
- Constant *ThirtyTwo = Ctx->getConstantInt32(32);
- Variable *T_C1 = makeReg(IceType_i32);
- Variable *T_C2 = makeReg(IceType_i32);
- _sub(T_C1, Src1RLo, ThirtyTwo);
- _lsl(T_Hi, Src0RHi, Src1RLo);
- _orr(T_Hi, T_Hi, OperandARM32FlexReg::create(Func, IceType_i32, Src0RLo,
- OperandARM32::LSL, T_C1));
- _rsb(T_C2, Src1RLo, ThirtyTwo);
- _orr(T_Hi, T_Hi, OperandARM32FlexReg::create(Func, IceType_i32, Src0RLo,
- OperandARM32::LSR, T_C2));
- _mov(DestHi, T_Hi);
- Variable *T_Lo = makeReg(IceType_i32);
- // _mov seems to sometimes have better register preferencing than lsl.
- // Otherwise mov w/ lsl shifted register is a pseudo-instruction that
- // maps to lsl.
- _mov(T_Lo, OperandARM32FlexReg::create(Func, IceType_i32, Src0RLo,
- OperandARM32::LSL, Src1RLo));
- _mov(DestLo, T_Lo);
+ Variable *T0 = makeReg(IceType_i32);
+ Variable *T1 = makeReg(IceType_i32);
+ Variable *T2 = makeReg(IceType_i32);
+ Variable *TA_Hi = makeReg(IceType_i32);
+ Variable *TA_Lo = makeReg(IceType_i32);
+ _rsb(T0, Src1RLo, _32);
+ _lsr(T1, Src0RLo, T0);
+ _orr(TA_Hi, T1, OperandARM32FlexReg::create(Func, IceType_i32, Src0RHi,
+ OperandARM32::LSL, Src1RLo));
+ _sub(T2, Src1RLo, _32);
+ _cmp(T2, _0);
+ _lsl(TA_Hi, Src0RLo, T2, CondARM32::GE);
+ _set_dest_nonkillable();
+ _lsl(TA_Lo, Src0RLo, Src1RLo);
+ _mov(DestLo, TA_Lo);
+ _mov(DestHi, TA_Hi);
return;
}
case InstArithmetic::Lshr:
- // a=b>>c (unsigned) ==>
- // GCC 4.8 does:
- // rsb t_c1, c.lo, #32
- // lsr t_lo, b.lo, c.lo
- // orr t_lo, t_lo, b.hi, lsl t_c1
- // sub t_c2, c.lo, #32
- // orr t_lo, t_lo, b.hi, lsr t_c2
- // lsr t_hi, b.hi, c.lo
- // a.lo = t_lo
- // a.hi = t_hi
case InstArithmetic::Ashr: {
- // a=b>>c (signed) ==> ...
- // Ashr is similar, but the sub t_c2, c.lo, #32 should set flags, and the
- // next orr should be conditioned on PLUS. The last two right shifts
- // should also be arithmetic.
- bool IsAshr = Inst->getOp() == InstArithmetic::Ashr;
- Variable *T_Lo = makeReg(IceType_i32);
+ // a=b>>c
+ // pnacl-llc does:
+ // mov t_b.lo, b.lo
+ // mov t_b.hi, b.hi
+ // mov t_c.lo, c.lo
+ // lsr T0, t_b.lo, t_c.lo
+ // rsb T1, t_c.lo, #32
+ // orr t_a.lo, T0, t_b.hi, lsl T1
+ // sub T2, t_c.lo, #32
+ // cmp T2, #0
+ // [al]srge t_a.lo, t_b.hi, T2
+ // [al]sr t_a.hi, t_b.hi, t_c.lo
+ // mov a.lo, t_a.lo
+ // mov a.hi, t_a.hi
+ //
+ // GCC 4.8 does (lsr):
+ // rsb t_c1, c.lo, #32
+ // lsr t_lo, b.lo, c.lo
+ // orr t_lo, t_lo, b.hi, lsl t_c1
+ // sub t_c2, c.lo, #32
+ // orr t_lo, t_lo, b.hi, lsr t_c2
+ // lsr t_hi, b.hi, c.lo
+ // mov a.lo, t_lo
+ // mov a.hi, t_hi
+ //
+ // These are incompatible, therefore we mimic pnacl-llc.
+ const bool IsAshr = Inst->getOp() == InstArithmetic::Ashr;
+ Constant *_32 = Ctx->getConstantInt32(32);
+ Constant *_0 = Ctx->getConstantZero(IceType_i32);
Variable *Src1RLo = legalizeToReg(Src1Lo);
- Constant *ThirtyTwo = Ctx->getConstantInt32(32);
- Variable *T_C1 = makeReg(IceType_i32);
- Variable *T_C2 = makeReg(IceType_i32);
- _rsb(T_C1, Src1RLo, ThirtyTwo);
- _lsr(T_Lo, Src0RLo, Src1RLo);
- _orr(T_Lo, T_Lo, OperandARM32FlexReg::create(Func, IceType_i32, Src0RHi,
- OperandARM32::LSL, T_C1));
- OperandARM32::ShiftKind RShiftKind;
- CondARM32::Cond Pred;
+ Variable *T0 = makeReg(IceType_i32);
+ Variable *T1 = makeReg(IceType_i32);
+ Variable *T2 = makeReg(IceType_i32);
+ Variable *TA_Lo = makeReg(IceType_i32);
+ Variable *TA_Hi = makeReg(IceType_i32);
+ _lsr(T0, Src0RLo, Src1RLo);
+ _rsb(T1, Src1RLo, _32);
+ _orr(TA_Lo, T0, OperandARM32FlexReg::create(Func, IceType_i32, Src0RHi,
+ OperandARM32::LSL, T1));
+ _sub(T2, Src1RLo, _32);
+ _cmp(T2, _0);
if (IsAshr) {
- _subs(T_C2, Src1RLo, ThirtyTwo);
- RShiftKind = OperandARM32::ASR;
- Pred = CondARM32::PL;
+ _asr(TA_Lo, Src0RHi, T2, CondARM32::GE);
+ _set_dest_nonkillable();
+ _asr(TA_Hi, Src0RHi, Src1RLo);
} else {
- _sub(T_C2, Src1RLo, ThirtyTwo);
- RShiftKind = OperandARM32::LSR;
- Pred = CondARM32::AL;
+ _lsr(TA_Lo, Src0RHi, T2, CondARM32::GE);
+ _set_dest_nonkillable();
+ _lsr(TA_Hi, Src0RHi, Src1RLo);
}
- _orr(T_Lo, T_Lo, OperandARM32FlexReg::create(Func, IceType_i32, Src0RHi,
- RShiftKind, T_C2),
- Pred);
- _mov(DestLo, T_Lo);
- Variable *T_Hi = makeReg(IceType_i32);
- _mov(T_Hi, OperandARM32FlexReg::create(Func, IceType_i32, Src0RHi,
- RShiftKind, Src1RLo));
- _mov(DestHi, T_Hi);
+ _mov(DestLo, TA_Lo);
+ _mov(DestHi, TA_Hi);
return;
}
case InstArithmetic::Fadd:
@@ -1527,9 +1583,11 @@
}
return;
} else if (isVectorType(Dest->getType())) {
- UnimplementedError(Func->getContext()->getFlags());
// Add a fake def to keep liveness consistent in the meantime.
- Context.insert(InstFakeDef::create(Func, Dest));
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T));
+ _mov(Dest, T);
+ UnimplementedError(Func->getContext()->getFlags());
return;
}
// Dest->getType() is a non-i64 scalar.
@@ -1585,25 +1643,25 @@
case InstArithmetic::Fadd: {
Variable *Src1R = legalizeToReg(Src1);
_vadd(T, Src0R, Src1R);
- _vmov(Dest, T);
+ _mov(Dest, T);
return;
}
case InstArithmetic::Fsub: {
Variable *Src1R = legalizeToReg(Src1);
_vsub(T, Src0R, Src1R);
- _vmov(Dest, T);
+ _mov(Dest, T);
return;
}
case InstArithmetic::Fmul: {
Variable *Src1R = legalizeToReg(Src1);
_vmul(T, Src0R, Src1R);
- _vmov(Dest, T);
+ _mov(Dest, T);
return;
}
case InstArithmetic::Fdiv: {
Variable *Src1R = legalizeToReg(Src1);
_vdiv(T, Src0R, Src1R);
- _vmov(Dest, T);
+ _mov(Dest, T);
return;
}
}
@@ -1677,7 +1735,8 @@
Operand *Src0Hi = legalize(hiOperand(Src0), Legal_Reg | Legal_Flex);
Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
- Variable *T_Lo = nullptr, *T_Hi = nullptr;
+ Variable *T_Lo = makeReg(IceType_i32);
+ Variable *T_Hi = makeReg(IceType_i32);
_mov(T_Lo, Src0Lo);
_mov(DestLo, T_Lo);
_mov(T_Hi, Src0Hi);
@@ -1696,10 +1755,11 @@
NewSrc = legalize(Src0, Legal_Reg);
}
if (isVectorType(Dest->getType())) {
- UnimplementedError(Func->getContext()->getFlags());
+ Variable *SrcR = legalizeToReg(NewSrc);
+ _mov(Dest, SrcR);
} else if (isFloatingType(Dest->getType())) {
Variable *SrcR = legalizeToReg(NewSrc);
- _vmov(Dest, SrcR);
+ _mov(Dest, SrcR);
} else {
_mov(Dest, NewSrc);
}
@@ -1769,7 +1829,7 @@
ParameterAreaSizeBytes =
applyStackAlignmentTy(ParameterAreaSizeBytes, Ty);
StackArgs.push_back(std::make_pair(Arg, ParameterAreaSizeBytes));
- ParameterAreaSizeBytes += typeWidthInBytesOnStack(Arg->getType());
+ ParameterAreaSizeBytes += typeWidthInBytesOnStack(Ty);
}
}
@@ -1809,19 +1869,6 @@
lowerStore(InstStore::create(Func, StackArg.first, Addr));
}
- // Copy arguments to be passed in registers to the appropriate registers.
- for (auto &GPRArg : GPRArgs) {
- Variable *Reg = legalizeToReg(GPRArg.first, GPRArg.second);
- // Generate a FakeUse of register arguments so that they do not get dead
- // code eliminated as a result of the FakeKill of scratch registers after
- // the call.
- Context.insert(InstFakeUse::create(Func, Reg));
- }
- for (auto &FPArg : FPArgs) {
- Variable *Reg = legalizeToReg(FPArg.first, FPArg.second);
- Context.insert(InstFakeUse::create(Func, Reg));
- }
-
// Generate the call instruction. Assign its result to a temporary with high
// register allocation weight.
Variable *Dest = Instr->getDest();
@@ -1872,6 +1919,19 @@
if (!llvm::isa<ConstantRelocatable>(CallTarget)) {
CallTarget = legalize(CallTarget, Legal_Reg);
}
+
+ // Copy arguments to be passed in registers to the appropriate registers.
+ for (auto &FPArg : FPArgs) {
+ Variable *Reg = legalizeToReg(FPArg.first, FPArg.second);
+ Context.insert(InstFakeUse::create(Func, Reg));
+ }
+ for (auto &GPRArg : GPRArgs) {
+ Variable *Reg = legalizeToReg(GPRArg.first, GPRArg.second);
+ // Generate a FakeUse of register arguments so that they do not get dead
+ // code eliminated as a result of the FakeKill of scratch registers after
+ // the call.
+ Context.insert(InstFakeUse::create(Func, Reg));
+ }
Inst *NewCall = InstARM32Call::create(Func, ReturnReg, CallTarget);
Context.insert(NewCall);
if (ReturnRegHi)
@@ -1908,7 +1968,7 @@
_mov(DestHi, ReturnRegHi);
} else {
if (isFloatingType(Dest->getType()) || isVectorType(Dest->getType())) {
- _vmov(Dest, ReturnReg);
+ _mov(Dest, ReturnReg);
} else {
assert(isIntegerType(Dest->getType()) &&
typeWidthInBytes(Dest->getType()) <= 4);
@@ -1918,6 +1978,13 @@
}
}
+namespace {
+void forceHiLoInReg(Variable64On32 *Var) {
+ Var->getHi()->setMustHaveReg();
+ Var->getLo()->setMustHaveReg();
+}
+} // end of anonymous namespace
+
void TargetARM32::lowerCast(const InstCast *Inst) {
InstCast::OpKind CastKind = Inst->getCastKind();
Variable *Dest = Inst->getDest();
@@ -1928,6 +1995,9 @@
return;
case InstCast::Sext: {
if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T, legalizeToReg(Src0)));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
} else if (Dest->getType() == IceType_i64) {
// t1=sxtb src; t2= mov t1 asr #31; dst.lo=t1; dst.hi=t2
@@ -1978,6 +2048,9 @@
}
case InstCast::Zext: {
if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T, legalizeToReg(Src0)));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
} else if (Dest->getType() == IceType_i64) {
// t1=uxtb src; dst.lo=t1; dst.hi=0
@@ -2024,6 +2097,9 @@
}
case InstCast::Trunc: {
if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T, legalizeToReg(Src0)));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
} else {
if (Src0->getType() == IceType_i64)
@@ -2044,6 +2120,9 @@
// fpext: dest.f64 = fptrunc src0.fp32
const bool IsTrunc = CastKind == InstCast::Fptrunc;
if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T, legalizeToReg(Src0)));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
break;
}
@@ -2057,6 +2136,26 @@
}
case InstCast::Fptosi:
case InstCast::Fptoui: {
+ if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T, legalizeToReg(Src0)));
+ _mov(Dest, T);
+ UnimplementedError(Func->getContext()->getFlags());
+ break;
+ }
+
+ const bool DestIsSigned = CastKind == InstCast::Fptosi;
+ const bool Src0IsF32 = isFloat32Asserting32Or64(Src0->getType());
+ if (llvm::isa<Variable64On32>(Dest)) {
+ const char *HelperName =
+ Src0IsF32 ? (DestIsSigned ? H_fptosi_f32_i64 : H_fptoui_f32_i64)
+ : (DestIsSigned ? H_fptosi_f64_i64 : H_fptoui_f64_i64);
+ static constexpr SizeT MaxSrcs = 1;
+ InstCall *Call = makeHelperCall(HelperName, Dest, MaxSrcs);
+ Call->addArg(Src0);
+ lowerCall(Call);
+ break;
+ }
// fptosi:
// t1.fp = vcvt src0.fp
// t2.i32 = vmov t1.fp
@@ -2065,28 +2164,14 @@
// t1.fp = vcvt src0.fp
// t2.u32 = vmov t1.fp
// dest.uint = conv t2.u32 @ Truncates the result if needed.
- if (isVectorType(Dest->getType())) {
- UnimplementedError(Func->getContext()->getFlags());
- break;
- }
- if (auto *Dest64On32 = llvm::dyn_cast<Variable64On32>(Dest)) {
- Context.insert(InstFakeDef::create(Func, Dest64On32->getLo()));
- Context.insert(InstFakeDef::create(Func, Dest64On32->getHi()));
- UnimplementedError(Func->getContext()->getFlags());
- break;
- }
- const bool DestIsSigned = CastKind == InstCast::Fptosi;
Variable *Src0R = legalizeToReg(Src0);
Variable *T_fp = makeReg(IceType_f32);
- if (isFloat32Asserting32Or64(Src0->getType())) {
- _vcvt(T_fp, Src0R,
- DestIsSigned ? InstARM32Vcvt::S2si : InstARM32Vcvt::S2ui);
- } else {
- _vcvt(T_fp, Src0R,
- DestIsSigned ? InstARM32Vcvt::D2si : InstARM32Vcvt::D2ui);
- }
+ const InstARM32Vcvt::VcvtVariant Conversion =
+ Src0IsF32 ? (DestIsSigned ? InstARM32Vcvt::S2si : InstARM32Vcvt::S2ui)
+ : (DestIsSigned ? InstARM32Vcvt::D2si : InstARM32Vcvt::D2ui);
+ _vcvt(T_fp, Src0R, Conversion);
Variable *T = makeReg(IceType_i32);
- _vmov(T, T_fp);
+ _mov(T, T_fp);
if (Dest->getType() != IceType_i32) {
Variable *T_1 = makeReg(Dest->getType());
lowerCast(InstCast::create(Func, InstCast::Trunc, T_1, T));
@@ -2097,6 +2182,25 @@
}
case InstCast::Sitofp:
case InstCast::Uitofp: {
+ if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T, legalizeToReg(Src0)));
+ _mov(Dest, T);
+ UnimplementedError(Func->getContext()->getFlags());
+ break;
+ }
+ const bool SourceIsSigned = CastKind == InstCast::Sitofp;
+ const bool DestIsF32 = isFloat32Asserting32Or64(Dest->getType());
+ if (Src0->getType() == IceType_i64) {
+ const char *HelperName =
+ DestIsF32 ? (SourceIsSigned ? H_sitofp_i64_f32 : H_uitofp_i64_f32)
+ : (SourceIsSigned ? H_sitofp_i64_f64 : H_uitofp_i64_f64);
+ static constexpr SizeT MaxSrcs = 1;
+ InstCall *Call = makeHelperCall(HelperName, Dest, MaxSrcs);
+ Call->addArg(Src0);
+ lowerCall(Call);
+ break;
+ }
// sitofp:
// t1.i32 = sext src.int @ sign-extends src0 if needed.
// t2.fp32 = vmov t1.i32
@@ -2105,17 +2209,6 @@
// t1.i32 = zext src.int @ zero-extends src0 if needed.
// t2.fp32 = vmov t1.i32
// t3.fp = vcvt.{fp}.s32 @ fp is either f32 or f64
- if (isVectorType(Dest->getType())) {
- UnimplementedError(Func->getContext()->getFlags());
- break;
- }
- if (Src0->getType() == IceType_i64) {
- // avoid cryptic liveness errors
- Context.insert(InstFakeDef::create(Func, Dest));
- UnimplementedError(Func->getContext()->getFlags());
- break;
- }
- const bool SourceIsSigned = CastKind == InstCast::Sitofp;
if (Src0->getType() != IceType_i32) {
Variable *Src0R_32 = makeReg(IceType_i32);
lowerCast(InstCast::create(Func, SourceIsSigned ? InstCast::Sext
@@ -2125,16 +2218,14 @@
}
Variable *Src0R = legalizeToReg(Src0);
Variable *Src0R_f32 = makeReg(IceType_f32);
- _vmov(Src0R_f32, Src0R);
+ _mov(Src0R_f32, Src0R);
Src0R = Src0R_f32;
Variable *T = makeReg(Dest->getType());
- if (isFloat32Asserting32Or64(Dest->getType())) {
- _vcvt(T, Src0R,
- SourceIsSigned ? InstARM32Vcvt::Si2s : InstARM32Vcvt::Ui2s);
- } else {
- _vcvt(T, Src0R,
- SourceIsSigned ? InstARM32Vcvt::Si2d : InstARM32Vcvt::Ui2d);
- }
+ const InstARM32Vcvt::VcvtVariant Conversion =
+ DestIsF32
+ ? (SourceIsSigned ? InstARM32Vcvt::Si2s : InstARM32Vcvt::Ui2s)
+ : (SourceIsSigned ? InstARM32Vcvt::Si2d : InstARM32Vcvt::Ui2d);
+ _vcvt(T, Src0R, Conversion);
_mov(Dest, T);
break;
}
@@ -2153,9 +2244,6 @@
case IceType_i1:
UnimplementedError(Func->getContext()->getFlags());
break;
- case IceType_v4i1:
- UnimplementedError(Func->getContext()->getFlags());
- break;
case IceType_i8:
UnimplementedError(Func->getContext()->getFlags());
break;
@@ -2166,7 +2254,7 @@
case IceType_f32: {
Variable *Src0R = legalizeToReg(Src0);
Variable *T = makeReg(DestType);
- _vmov(T, Src0R);
+ _mov(T, Src0R);
lowerAssign(InstAssign::create(Func, Dest, T));
break;
}
@@ -2175,13 +2263,17 @@
// dest[31..0] = t0
// dest[63..32] = t1
assert(Src0->getType() == IceType_f64);
- Variable *T0 = makeReg(IceType_i32);
- Variable *T1 = makeReg(IceType_i32);
+ auto *T = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64));
+ T->initHiLo(Func);
+ forceHiLoInReg(T);
Variable *Src0R = legalizeToReg(Src0);
- _vmov(InstARM32Vmov::RegisterPair(T0, T1), Src0R);
+ _mov(T, Src0R);
+ Context.insert(InstFakeDef::create(Func, T->getLo()));
+ Context.insert(InstFakeDef::create(Func, T->getHi()));
auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
- lowerAssign(InstAssign::create(Func, Dest64On32->getLo(), T0));
- lowerAssign(InstAssign::create(Func, Dest64On32->getHi(), T1));
+ lowerAssign(InstAssign::create(Func, Dest64On32->getLo(), T->getLo()));
+ lowerAssign(InstAssign::create(Func, Dest64On32->getHi(), T->getHi()));
+ Context.insert(InstFakeUse::create(Func, T));
break;
}
case IceType_f64: {
@@ -2190,41 +2282,47 @@
// vmov T2, T0, T1
// Dest <- T2
assert(Src0->getType() == IceType_i64);
- Variable *SrcLo = legalizeToReg(loOperand(Src0));
- Variable *SrcHi = legalizeToReg(hiOperand(Src0));
- Variable *T = makeReg(IceType_f64);
- _vmov(T, InstARM32Vmov::RegisterPair(SrcLo, SrcHi));
+ auto *Src64 = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64));
+ Src64->initHiLo(Func);
+ forceHiLoInReg(Src64);
+ Variable *T = Src64->getLo();
+ _mov(T, legalizeToReg(loOperand(Src0)));
+ T = Src64->getHi();
+ _mov(T, legalizeToReg(hiOperand(Src0)));
+ T = makeReg(IceType_f64);
+ Context.insert(InstFakeDef::create(Func, Src64));
+ _mov(T, Src64);
+ Context.insert(InstFakeUse::create(Func, Src64->getLo()));
+ Context.insert(InstFakeUse::create(Func, Src64->getHi()));
lowerAssign(InstAssign::create(Func, Dest, T));
break;
}
+ case IceType_v4i1:
case IceType_v8i1:
- UnimplementedError(Func->getContext()->getFlags());
- break;
case IceType_v16i1:
- UnimplementedError(Func->getContext()->getFlags());
- break;
case IceType_v8i16:
- UnimplementedError(Func->getContext()->getFlags());
- break;
case IceType_v16i8:
- UnimplementedError(Func->getContext()->getFlags());
- break;
- case IceType_v4i32:
- // avoid cryptic liveness errors
- Context.insert(InstFakeDef::create(Func, Dest));
- UnimplementedError(Func->getContext()->getFlags());
- break;
case IceType_v4f32:
+ case IceType_v4i32: {
+ // avoid cryptic liveness errors
+ Variable *T = makeReg(DestType);
+ Context.insert(InstFakeDef::create(Func, T, legalizeToReg(Src0)));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
break;
}
+ }
break;
}
}
}
void TargetARM32::lowerExtractElement(const InstExtractElement *Inst) {
- (void)Inst;
+ Variable *Dest = Inst->getDest();
+ Type DestType = Dest->getType();
+ Variable *T = makeReg(DestType);
+ Context.insert(InstFakeDef::create(Func, T));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
}
@@ -2269,6 +2367,9 @@
void TargetARM32::lowerFcmp(const InstFcmp *Inst) {
Variable *Dest = Inst->getDest();
if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return;
}
@@ -2306,6 +2407,9 @@
Operand *Src1 = legalizeUndef(Inst->getSrc(1));
if (isVectorType(Dest->getType())) {
+ Variable *T = makeReg(Dest->getType());
+ Context.insert(InstFakeDef::create(Func, T));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return;
}
@@ -2514,7 +2618,7 @@
if (Val->getType() == IceType_i64) {
Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
Constant *Zero = Ctx->getConstantZero(IceType_i32);
- Variable *T = nullptr;
+ Variable *T = makeReg(Zero->getType());
_mov(T, Zero);
_mov(DestHi, T);
}
@@ -2561,9 +2665,18 @@
return;
}
case Intrinsics::Fabs: {
- // Add a fake def to keep liveness consistent in the meantime.
- Context.insert(InstFakeDef::create(Func, Instr->getDest()));
- UnimplementedError(Func->getContext()->getFlags());
+ Variable *Dest = Instr->getDest();
+ Type DestTy = Dest->getType();
+ Variable *T = makeReg(DestTy);
+ if (isVectorType(DestTy)) {
+ // Add a fake def to keep liveness consistent in the meantime.
+ Context.insert(InstFakeDef::create(Func, T));
+ _mov(Instr->getDest(), T);
+ UnimplementedError(Func->getContext()->getFlags());
+ return;
+ }
+ _vabs(T, legalizeToReg(Instr->getArg(0)));
+ _mov(Dest, T);
return;
}
case Intrinsics::Longjmp: {
@@ -2628,7 +2741,7 @@
Variable *Dest = Instr->getDest();
Variable *T = makeReg(Dest->getType());
_vsqrt(T, Src);
- _vmov(Dest, T);
+ _mov(Dest, T);
return;
}
case Intrinsics::Stacksave: {
@@ -2674,7 +2787,7 @@
// of T2 as if it was used as a source.
_set_dest_nonkillable();
_mov(DestLo, T2);
- Variable *T3 = nullptr;
+ Variable *T3 = makeReg(Zero->getType());
_mov(T3, Zero);
_mov(DestHi, T3);
return;
@@ -2734,7 +2847,8 @@
Reg = Q0;
} else {
Operand *Src0F = legalize(Src0, Legal_Reg | Legal_Flex);
- _mov(Reg, Src0F, CondARM32::AL, RegARM32::Reg_r0);
+ Reg = makeReg(Src0F->getType(), RegARM32::Reg_r0);
+ _mov(Reg, Src0F, CondARM32::AL);
}
}
// Add a ret instruction even if sandboxing is enabled, because addEpilog
@@ -2758,6 +2872,9 @@
Operand *Condition = Inst->getCondition();
if (isVectorType(DestTy)) {
+ Variable *T = makeReg(DestTy);
+ Context.insert(InstFakeDef::create(Func, T));
+ _mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return;
}
@@ -2772,16 +2889,16 @@
SrcF = legalizeUndef(SrcF);
// Set the low portion.
Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
- Variable *TLo = nullptr;
Operand *SrcFLo = legalize(loOperand(SrcF), Legal_Reg | Legal_Flex);
+ Variable *TLo = makeReg(SrcFLo->getType());
_mov(TLo, SrcFLo);
Operand *SrcTLo = legalize(loOperand(SrcT), Legal_Reg | Legal_Flex);
_mov_nonkillable(TLo, SrcTLo, Cond);
_mov(DestLo, TLo);
// Set the high portion.
Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
- Variable *THi = nullptr;
Operand *SrcFHi = legalize(hiOperand(SrcF), Legal_Reg | Legal_Flex);
+ Variable *THi = makeReg(SrcFHi->getType());
_mov(THi, SrcFHi);
Operand *SrcTHi = legalize(hiOperand(SrcT), Legal_Reg | Legal_Flex);
_mov_nonkillable(THi, SrcTHi, Cond);
@@ -2793,17 +2910,17 @@
Variable *T = makeReg(DestTy);
SrcF = legalizeToReg(SrcF);
assert(DestTy == SrcF->getType());
- _vmov(T, SrcF);
+ _mov(T, SrcF);
SrcT = legalizeToReg(SrcT);
assert(DestTy == SrcT->getType());
- _vmov(T, SrcT, Cond);
+ _mov(T, SrcT, Cond);
_set_dest_nonkillable();
- _vmov(Dest, T);
+ _mov(Dest, T);
return;
}
- Variable *T = nullptr;
SrcF = legalize(SrcF, Legal_Reg | Legal_Flex);
+ Variable *T = makeReg(SrcF->getType());
_mov(T, SrcF);
SrcT = legalize(SrcT, Legal_Reg | Legal_Flex);
_mov_nonkillable(T, SrcT, Cond);
@@ -2823,9 +2940,6 @@
_str(ValueHi, llvm::cast<OperandARM32Mem>(hiOperand(NewAddr)));
_str(ValueLo, llvm::cast<OperandARM32Mem>(loOperand(NewAddr)));
} else {
- if (isVectorType(Ty)) {
- UnimplementedError(Func->getContext()->getFlags());
- }
Variable *ValueR = legalizeToReg(Value);
_str(ValueR, NewAddr);
}
@@ -2878,6 +2992,7 @@
Variable *TargetARM32::makeVectorOfZeros(Type Ty, int32_t RegNum) {
Variable *Reg = makeReg(Ty, RegNum);
+ Context.insert(InstFakeDef::create(Func, Reg));
UnimplementedError(Func->getContext()->getFlags());
return Reg;
}
@@ -2887,16 +3002,7 @@
Variable *TargetARM32::copyToReg(Operand *Src, int32_t RegNum) {
Type Ty = Src->getType();
Variable *Reg = makeReg(Ty, RegNum);
- if (isVectorType(Ty)) {
- // TODO(jpp): Src must be a register, or an address with base register.
- _vmov(Reg, Src);
- } else if (isFloatingType(Ty)) {
- _vmov(Reg, Src);
- } else {
- // Mov's Src operand can really only be the flexible second operand type or
- // a register. Users should guarantee that.
- _mov(Reg, Src);
- }
+ _mov(Reg, Src);
return Reg;
}
@@ -2912,10 +3018,22 @@
// type of operand is not legal (e.g., OperandARM32Mem and !Legal_Mem), we
// can always copy to a register.
if (auto Mem = llvm::dyn_cast<OperandARM32Mem>(From)) {
+ static const struct {
+ bool CanHaveOffset;
+ bool CanHaveIndex;
+ } MemTraits[] = {
+#define X(tag, elementty, int_width, vec_width, sbits, ubits, rraddr) \
+ { (ubits) > 0, rraddr } \
+ ,
+ ICETYPEARM32_TABLE
+#undef X
+ };
// Before doing anything with a Mem operand, we need to ensure that the
// Base and Index components are in physical registers.
Variable *Base = Mem->getBase();
Variable *Index = Mem->getIndex();
+ ConstantInteger32 *Offset = Mem->getOffset();
+ assert(Index == nullptr || Offset == nullptr);
Variable *RegBase = nullptr;
Variable *RegIndex = nullptr;
if (Base) {
@@ -2923,32 +3041,43 @@
}
if (Index) {
RegIndex = legalizeToReg(Index);
+ if (!MemTraits[Ty].CanHaveIndex) {
+ Variable *T = makeReg(IceType_i32, getReservedTmpReg());
+ _add(T, RegBase, RegIndex);
+ RegBase = T;
+ RegIndex = nullptr;
+ }
}
+ if (Offset && Offset->getValue() != 0) {
+ static constexpr bool SignExt = false;
+ if (!MemTraits[Ty].CanHaveOffset ||
+ !OperandARM32Mem::canHoldOffset(Ty, SignExt, Offset->getValue())) {
+ Variable *T = legalizeToReg(Offset, getReservedTmpReg());
+ _add(T, T, RegBase);
+ RegBase = T;
+ Offset = llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(0));
+ }
+ }
+
// Create a new operand if there was a change.
if (Base != RegBase || Index != RegIndex) {
// There is only a reg +/- reg or reg + imm form.
// Figure out which to re-create.
- if (Mem->isRegReg()) {
+ if (RegBase && RegIndex) {
Mem = OperandARM32Mem::create(Func, Ty, RegBase, RegIndex,
Mem->getShiftOp(), Mem->getShiftAmt(),
Mem->getAddrMode());
} else {
- Mem = OperandARM32Mem::create(Func, Ty, RegBase, Mem->getOffset(),
+ Mem = OperandARM32Mem::create(Func, Ty, RegBase, Offset,
Mem->getAddrMode());
}
}
- if (!(Allowed & Legal_Mem)) {
- Variable *Reg = makeReg(Ty, RegNum);
- if (isVectorType(Ty)) {
- UnimplementedError(Func->getContext()->getFlags());
- } else if (isFloatingType(Ty)) {
- _vldr(Reg, Mem);
- } else {
- _ldr(Reg, Mem);
- }
- From = Reg;
- } else {
+ if (Allowed & Legal_Mem) {
From = Mem;
+ } else {
+ Variable *Reg = makeReg(Ty, RegNum);
+ _ldr(Reg, Mem);
+ From = Reg;
}
return From;
}
diff --git a/src/IceTargetLoweringARM32.h b/src/IceTargetLoweringARM32.h
index 6085eed..a1f37a6 100644
--- a/src/IceTargetLoweringARM32.h
+++ b/src/IceTargetLoweringARM32.h
@@ -189,7 +189,6 @@
// The following are helpers that insert lowered ARM32 instructions with
// minimal syntactic overhead, so that the lowering code can look as close to
// assembly as practical.
-
void _add(Variable *Dest, Variable *Src0, Operand *Src1,
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Add::create(Func, Dest, Src0, Src1, Pred));
@@ -246,6 +245,10 @@
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Eor::create(Func, Dest, Src0, Src1, Pred));
}
+ /// _ldr, for all your memory to Variable data moves. It handles all types
+ /// (integer, floating point, and vectors.) Addr needs to be valid for Dest's
+ /// type (e.g., no immediates for vector loads, and no index registers for fp
+ /// loads.)
void _ldr(Variable *Dest, OperandARM32Mem *Addr,
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Ldr::create(Func, Dest, Addr, Pred));
@@ -266,14 +269,17 @@
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Mls::create(Func, Dest, Src0, Src1, Acc, Pred));
}
- /// If Dest=nullptr is passed in, then a new variable is created, marked as
- /// infinite register allocation weight, and returned through the in/out Dest
- /// argument.
- void _mov(Variable *&Dest, Operand *Src0,
- CondARM32::Cond Pred = CondARM32::AL,
- int32_t RegNum = Variable::NoRegister) {
- if (Dest == nullptr)
- Dest = makeReg(Src0->getType(), RegNum);
+ /// _mov, for all your Variable to Variable data movement needs. It handles
+ /// all types (integer, floating point, and vectors), as well as moves between
+ /// Core and VFP registers. This is not a panacea: you must obey the (weird,
+ /// confusing, non-uniform) rules for data moves in ARM.
+ void _mov(Variable *Dest, Operand *Src0,
+ CondARM32::Cond Pred = CondARM32::AL) {
+ // _mov used to be unique in the sense that it would create a temporary
+ // automagically if Dest was nullptr. It won't do that anymore, so we keep
+ // an assert around just in case there is some untested code path where Dest
+ // is nullptr.
+ assert(Dest != nullptr);
Context.insert(InstARM32Mov::create(Func, Dest, Src0, Pred));
}
void _mov_nonkillable(Variable *Dest, Operand *Src0,
@@ -348,6 +354,8 @@
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Sdiv::create(Func, Dest, Src0, Src1, Pred));
}
+ /// _str, for all your Variable to memory transfers. Addr has the same
+ /// restrictions that it does in _ldr.
void _str(Variable *Value, OperandARM32Mem *Addr,
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Str::create(Func, Value, Addr, Pred));
@@ -387,6 +395,10 @@
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Uxt::create(Func, Dest, Src0, Pred));
}
+ void _vabs(Variable *Dest, Variable *Src,
+ CondARM32::Cond Pred = CondARM32::AL) {
+ Context.insert(InstARM32Vabs::create(Func, Dest, Src, Pred));
+ }
void _vadd(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstARM32Vadd::create(Func, Dest, Src0, Src1));
}
@@ -397,10 +409,6 @@
void _vdiv(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstARM32Vdiv::create(Func, Dest, Src0, Src1));
}
- void _vldr(Variable *Dest, OperandARM32Mem *Src,
- CondARM32::Cond Pred = CondARM32::AL) {
- Context.insert(InstARM32Vldr::create(Func, Dest, Src, Pred));
- }
void _vcmp(Variable *Src0, Variable *Src1,
CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Vcmp::create(Func, Src0, Src1, Pred));
@@ -408,33 +416,6 @@
void _vmrs(CondARM32::Cond Pred = CondARM32::AL) {
Context.insert(InstARM32Vmrs::create(Func, Pred));
}
- // There are a whole bunch of vmov variants, to transfer within S/D/Q
- // registers, between core integer registers and S/D, and from small
- // immediates into S/D. For integer -> S/D/Q there is a variant which takes
- // two integer register to fill a D, or to fill two consecutive S registers.
- // Vmov can also be used to insert-element. E.g.,
- // "vmov.8 d0[1], r0"
- // but insert-element is a "two-address" operation where only part of the
- // register is modified. This cannot model that.
- //
- // This represents the simple single source, single dest variants only.
- void _vmov(Variable *Dest, Operand *Src0,
- CondARM32::Cond Pred = CondARM32::AL) {
- Context.insert(InstARM32Vmov::create(Func, Dest, Src0, Pred));
- }
- // This represents the single source, multi dest variant.
- void _vmov(InstARM32Vmov::RegisterPair Dests, Variable *Src0) {
- constexpr CondARM32::Cond Pred = CondARM32::AL;
- Context.insert(InstARM32Vmov::create(Func, Dests, Src0, Pred));
- // The Vmov instruction created above does not define Dests._1. Therefore
- // we add a Dest._1 = FakeDef pseudo instruction.
- Context.insert(InstFakeDef::create(Func, Dests._1));
- }
- // This represents the multi source, single dest variant.
- void _vmov(Variable *Dest, InstARM32Vmov::RegisterPair Srcs) {
- constexpr CondARM32::Cond Pred = CondARM32::AL;
- Context.insert(InstARM32Vmov::create(Func, Dest, Srcs, Pred));
- }
void _vmul(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstARM32Vmul::create(Func, Dest, Src0, Src1));
}
@@ -451,10 +432,11 @@
/// offset, such that the addressing mode offset bits are now legal.
void legalizeStackSlots();
/// Returns true if the given Offset can be represented in a stack ldr/str.
- bool isLegalVariableStackOffset(int32_t Offset) const;
+ bool isLegalVariableStackOffset(Type Ty, int32_t Offset) const;
/// Assuming Var needs its offset legalized, define a new base register
- /// centered on the given Var's offset and use it.
- StackVariable *legalizeVariableSlot(Variable *Var, Variable *OrigBaseReg);
+ /// centered on the given Var's offset plus StackAdjust, and use it.
+ StackVariable *legalizeVariableSlot(Variable *Var, int32_t StackAdjust,
+ Variable *OrigBaseReg);
TargetARM32Features CPUFeatures;
bool UsesFramePointer = false;