Add a basic TargetARM32 skeleton which knows nothing.
Later commits will add more information, but this tests the
conditional compilation and build setup.
One way to do conditional compilation: determine this
early, at LLVM configure/CMake time. Configure will
fill in the template of SZTargets.def.in to get
a SZTargets.def file.
LLVM change:
https://codereview.chromium.org/1084753002/
NaCl change:
https://codereview.chromium.org/1082953002/
I suppose an alternative is to fill in the .def file via
-D flags in CXXFLAGS.
For conditional lit testing, pnacl-sz dumps the attributes
when given the --build-atts so we just build on top of that.
We do that instead of go the LLVM way of filling in a
lit.site.cfg.in -> lit.site.cfg at configure/CMake time.
BUG= https://code.google.com/p/nativeclient/issues/detail?id=4076
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/1075363002
diff --git a/Makefile.standalone b/Makefile.standalone
index 85a00e0..18065af 100644
--- a/Makefile.standalone
+++ b/Makefile.standalone
@@ -184,6 +184,7 @@
IceRegAlloc.cpp \
IceRNG.cpp \
IceTargetLowering.cpp \
+ IceTargetLoweringARM32.cpp \
IceTargetLoweringX8632.cpp \
IceThreading.cpp \
IceTimerTree.cpp \
diff --git a/src/IceCompiler.cpp b/src/IceCompiler.cpp
index 47f1ba4..7c8b791 100644
--- a/src/IceCompiler.cpp
+++ b/src/IceCompiler.cpp
@@ -47,9 +47,15 @@
// Validates values of build attributes. Prints them to Stream if
// Stream is non-null.
void ValidateAndGenerateBuildAttributes(const ClFlags &Flags, Ostream *Stream) {
- if (Stream)
+ if (Stream) {
+ // List the requested target.
*Stream << Flags.getTargetArch() << "\n";
+// List the supported targets.
+#define SUBZERO_TARGET(TARGET) *Stream << "target_" #TARGET << "\n";
+#include "llvm/Config/SZTargets.def"
+ }
+
for (size_t i = 0; i < llvm::array_lengthof(ConditionalBuildAttributes);
++i) {
switch (ConditionalBuildAttributes[i].FlagValue) {
diff --git a/src/IceInstARM32.def b/src/IceInstARM32.def
new file mode 100644
index 0000000..ea7032a
--- /dev/null
+++ b/src/IceInstARM32.def
@@ -0,0 +1,65 @@
+//===- subzero/src/IceInstARM32.def - X-Macros for ARM32 insts --*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines properties of ARM32 instructions in the form of x-macros.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEINSTARM32_DEF
+#define SUBZERO_SRC_ICEINSTARM32_DEF
+
+// NOTE: PC and SP are not considered isInt, to avoid register allocating.
+// For the NaCl sandbox we also need to r9 for TLS, so just reserve always.
+// TODO(jvoung): Allow r9 to be isInt when sandboxing is turned off
+// (native mode).
+#define REGARM32_GPR_TABLE \
+ /* val, encode, name, scratch, preserved, stackptr, frameptr, isInt, isFP */ \
+ X(Reg_r0, = 0, "r0", 1, 0, 0, 0, 1, 0) \
+ X(Reg_r1, = Reg_r0 + 1, "r1", 1, 0, 0, 0, 1, 0) \
+ X(Reg_r2, = Reg_r0 + 2, "r2", 1, 0, 0, 0, 1, 0) \
+ X(Reg_r3, = Reg_r0 + 3, "r3", 1, 0, 0, 0, 1, 0) \
+ X(Reg_r4, = Reg_r0 + 4, "r4", 0, 1, 0, 0, 1, 0) \
+ X(Reg_r5, = Reg_r0 + 5, "r5", 0, 1, 0, 0, 1, 0) \
+ X(Reg_r6, = Reg_r0 + 6, "r6", 0, 1, 0, 0, 1, 0) \
+ X(Reg_r7, = Reg_r0 + 7, "r7", 0, 1, 0, 0, 1, 0) \
+ X(Reg_r8, = Reg_r0 + 8, "r8", 0, 1, 0, 0, 1, 0) \
+ X(Reg_r9, = Reg_r0 + 9, "r9", 0, 1, 0, 0, 0, 0) \
+ X(Reg_r10, = Reg_r0 + 10, "r10", 0, 1, 0, 0, 1, 0) \
+ X(Reg_fp, = Reg_r0 + 11, "fp", 0, 1, 0, 1, 1, 0) \
+ X(Reg_ip, = Reg_r0 + 12, "ip", 1, 0, 0, 0, 1, 0) \
+ X(Reg_sp, = Reg_r0 + 13, "sp", 0, 1, 1, 0, 0, 0) \
+ X(Reg_lr, = Reg_r0 + 14, "lr", 0, 1, 0, 0, 1, 0) \
+ X(Reg_pc, = Reg_r0 + 15, "pc", 0, 1, 0, 0, 0, 0) \
+//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
+// isInt, isFP)
+
+// TODO(jvoung): List FP registers and know S0 == D0 == Q0, etc.
+// Be able to grab even registers, and the corresponding odd register
+// for each even register.
+
+// We also provide a combined table, so that there is a namespace where
+// all of the registers are considered and have distinct numberings.
+// This is in contrast to the above, where the "encode" is based on how
+// the register numbers will be encoded in binaries and values can overlap.
+#define REGARM32_TABLE \
+ /* val, encode, name, scratch, preserved, stackptr, frameptr, isInt, isFP */ \
+ REGARM32_GPR_TABLE
+//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
+// isInt, isFP)
+
+#define REGARM32_TABLE_BOUNDS \
+ /* val, init */ \
+ X(Reg_GPR_First, = Reg_r0) \
+ X(Reg_GPR_Last, = Reg_pc)
+//define X(val, init)
+
+// TODO(jvoung): add condition code tables, etc.
+
+
+#endif // SUBZERO_SRC_ICEINSTARM32_DEF
diff --git a/src/IceInstARM32.h b/src/IceInstARM32.h
new file mode 100644
index 0000000..1c7d346
--- /dev/null
+++ b/src/IceInstARM32.h
@@ -0,0 +1,28 @@
+//===- subzero/src/IceInstARM32.h - ARM32 machine instructions --*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the InstARM32 and OperandARM32 classes and
+// their subclasses. This represents the machine instructions and
+// operands used for ARM32 code selection.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEINSTARM32_H
+#define SUBZERO_SRC_ICEINSTARM32_H
+
+#include "IceDefs.h"
+
+namespace Ice {
+
+class TargetARM32;
+// Fill this in.
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEINSTARM32_H
diff --git a/src/IceInstX8632.h b/src/IceInstX8632.h
index de60349..f0824b5 100644
--- a/src/IceInstX8632.h
+++ b/src/IceInstX8632.h
@@ -1,4 +1,4 @@
-//===- subzero/src/IceInstX8632.h - Low-level x86 instructions --*- C++ -*-===//
+//===- subzero/src/IceInstX8632.h - x86-32 machine instructions -*- C++ -*-===//
//
// The Subzero Code Generator
//
diff --git a/src/IceRegistersARM32.h b/src/IceRegistersARM32.h
new file mode 100644
index 0000000..2ad1c8b
--- /dev/null
+++ b/src/IceRegistersARM32.h
@@ -0,0 +1,62 @@
+//===- subzero/src/IceRegistersARM32.h - Register information ---*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the registers and their encodings for ARM32.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEREGISTERSARM32_H
+#define SUBZERO_SRC_ICEREGISTERSARM32_H
+
+#include "IceDefs.h"
+#include "IceInstARM32.def"
+#include "IceTypes.h"
+
+namespace Ice {
+
+namespace RegARM32 {
+
+// An enum of every register. The enum value may not match the encoding
+// used to binary encode register operands in instructions.
+enum AllRegisters {
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ val,
+ REGARM32_TABLE
+#undef X
+ Reg_NUM,
+#define X(val, init) val init,
+ REGARM32_TABLE_BOUNDS
+#undef X
+};
+
+// An enum of GPR Registers. The enum value does match the encoding used
+// to binary encode register operands in instructions.
+enum GPRRegister {
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ Encoded_##val encode,
+ REGARM32_GPR_TABLE
+#undef X
+ Encoded_Not_GPR = -1
+};
+
+// TODO(jvoung): Floating point and vector registers...
+// Need to model overlap and difference in encoding too.
+
+static inline GPRRegister getEncodedGPR(int32_t RegNum) {
+ assert(Reg_GPR_First <= RegNum && RegNum <= Reg_GPR_Last);
+ return GPRRegister(RegNum - Reg_GPR_First);
+}
+
+} // end of namespace RegARM32
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEREGISTERSARM32_H
diff --git a/src/IceTargetLowering.cpp b/src/IceTargetLowering.cpp
index 39d27fe..93b4884 100644
--- a/src/IceTargetLowering.cpp
+++ b/src/IceTargetLowering.cpp
@@ -15,12 +15,14 @@
//
//===----------------------------------------------------------------------===//
+#include "assembler_arm32.h"
#include "assembler_ia32.h"
#include "IceCfg.h" // setError()
#include "IceCfgNode.h"
#include "IceOperand.h"
#include "IceRegAlloc.h"
#include "IceTargetLowering.h"
+#include "IceTargetLoweringARM32.h"
#include "IceTargetLoweringX8632.h"
namespace Ice {
@@ -62,34 +64,33 @@
}
TargetLowering *TargetLowering::createLowering(TargetArch Target, Cfg *Func) {
- // These statements can be #ifdef'd to specialize the code generator
- // to a subset of the available targets. TODO: use CRTP.
- if (Target == Target_X8632)
- return TargetX8632::create(Func);
-#if 0
- if (Target == Target_X8664)
- return IceTargetX8664::create(Func);
- if (Target == Target_ARM32)
- return IceTargetARM32::create(Func);
- if (Target == Target_ARM64)
- return IceTargetARM64::create(Func);
-#endif
+#define SUBZERO_TARGET(X) \
+ if (Target == Target_##X) \
+ return Target##X::create(Func);
+#include "llvm/Config/SZTargets.def"
+
Func->setError("Unsupported target");
return nullptr;
}
TargetLowering::TargetLowering(Cfg *Func)
: Func(Func), Ctx(Func->getContext()), HasComputedFrame(false),
- CallsReturnsTwice(false), StackAdjustment(0), Context(),
- SnapshotStackAdjustment(0) {}
+ CallsReturnsTwice(false), StackAdjustment(0), NextLabelNumber(0),
+ Context(), SnapshotStackAdjustment(0) {}
std::unique_ptr<Assembler> TargetLowering::createAssembler(TargetArch Target,
Cfg *Func) {
// These statements can be #ifdef'd to specialize the assembler
// to a subset of the available targets. TODO: use CRTP.
+ // TODO(jvoung): use SZTargets.def (rename AssemblerX86 -> AssemblerX8632),
+ // and make the namespaces consistent.
if (Target == Target_X8632)
return std::unique_ptr<Assembler>(new x86::AssemblerX86());
- Func->setError("Unsupported target");
+
+ if (Target == Target_ARM32)
+ return std::unique_ptr<Assembler>(new AssemblerARM32());
+
+ Func->setError("Unsupported target assembler");
return nullptr;
}
@@ -229,22 +230,24 @@
LinearScan.scan(RegMask, Ctx->getFlags().shouldRandomizeRegAlloc());
}
+InstCall *TargetLowering::makeHelperCall(const IceString &Name, Variable *Dest,
+ SizeT MaxSrcs) {
+ const bool HasTailCall = false;
+ Constant *CallTarget = Ctx->getConstantExternSym(Name);
+ InstCall *Call =
+ InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall);
+ return Call;
+}
+
std::unique_ptr<TargetDataLowering>
TargetDataLowering::createLowering(GlobalContext *Ctx) {
- // These statements can be #ifdef'd to specialize the code generator
- // to a subset of the available targets. TODO: use CRTP.
TargetArch Target = Ctx->getFlags().getTargetArch();
- if (Target == Target_X8632)
- return std::unique_ptr<TargetDataLowering>(TargetDataX8632::create(Ctx));
-#if 0
- if (Target == Target_X8664)
- return std::unique_ptr<TargetDataLowering>(TargetDataX8664::create(Ctx));
- if (Target == Target_ARM32)
- return std::unique_ptr<TargetDataLowering>(TargetDataARM32::create(Ctx));
- if (Target == Target_ARM64)
- return std::unique_ptr<TargetDataLowering>(TargetDataARM64::create(Ctx));
-#endif
- llvm_unreachable("Unsupported target");
+#define SUBZERO_TARGET(X) \
+ if (Target == Target_##X) \
+ return std::unique_ptr<TargetDataLowering>(TargetData##X::create(Ctx));
+#include "llvm/Config/SZTargets.def"
+
+ llvm_unreachable("Unsupported target data lowering");
return nullptr;
}
diff --git a/src/IceTargetLowering.h b/src/IceTargetLowering.h
index 112572c..baa569f 100644
--- a/src/IceTargetLowering.h
+++ b/src/IceTargetLowering.h
@@ -95,6 +95,7 @@
TargetLowering &operator=(const TargetLowering &) = delete;
public:
+ // TODO(jvoung): return a unique_ptr like the other factory functions.
static TargetLowering *createLowering(TargetArch Target, Cfg *Func);
static std::unique_ptr<Assembler> createAssembler(TargetArch Target,
Cfg *Func);
@@ -171,6 +172,7 @@
int32_t getStackAdjustment() const { return StackAdjustment; }
void updateStackAdjustment(int32_t Offset) { StackAdjustment += Offset; }
void resetStackAdjustment() { StackAdjustment = 0; }
+ SizeT makeNextLabelNumber() { return NextLabelNumber++; }
LoweringContext &getContext() { return Context; }
enum RegSet {
@@ -241,6 +243,10 @@
// expansion before returning.
virtual void postLower() {}
+ // Make a call to an external helper function.
+ InstCall *makeHelperCall(const IceString &Name, Variable *Dest,
+ SizeT MaxSrcs);
+
Cfg *Func;
GlobalContext *Ctx;
bool HasComputedFrame;
@@ -248,6 +254,7 @@
// StackAdjustment keeps track of the current stack offset from its
// natural location, as arguments are pushed for a function call.
int32_t StackAdjustment;
+ SizeT NextLabelNumber;
LoweringContext Context;
// Runtime helper function names
diff --git a/src/IceTargetLoweringARM32.cpp b/src/IceTargetLoweringARM32.cpp
new file mode 100644
index 0000000..6691e1e
--- /dev/null
+++ b/src/IceTargetLoweringARM32.cpp
@@ -0,0 +1,705 @@
+//===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TargetLoweringARM32 class, which consists almost
+// entirely of the lowering sequence for each high-level instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/MathExtras.h"
+
+#include "IceCfg.h"
+#include "IceCfgNode.h"
+#include "IceClFlags.h"
+#include "IceDefs.h"
+#include "IceELFObjectWriter.h"
+#include "IceGlobalInits.h"
+#include "IceInstARM32.h"
+#include "IceLiveness.h"
+#include "IceOperand.h"
+#include "IceRegistersARM32.h"
+#include "IceTargetLoweringARM32.def"
+#include "IceTargetLoweringARM32.h"
+#include "IceUtils.h"
+
+namespace Ice {
+
+TargetARM32::TargetARM32(Cfg *Func)
+ : TargetLowering(Func), UsesFramePointer(false) {
+ // TODO: Don't initialize IntegerRegisters and friends every time.
+ // Instead, initialize in some sort of static initializer for the
+ // class.
+ llvm::SmallBitVector IntegerRegisters(RegARM32::Reg_NUM);
+ llvm::SmallBitVector FloatRegisters(RegARM32::Reg_NUM);
+ llvm::SmallBitVector VectorRegisters(RegARM32::Reg_NUM);
+ llvm::SmallBitVector InvalidRegisters(RegARM32::Reg_NUM);
+ ScratchRegs.resize(RegARM32::Reg_NUM);
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ IntegerRegisters[RegARM32::val] = isInt; \
+ FloatRegisters[RegARM32::val] = isFP; \
+ VectorRegisters[RegARM32::val] = isFP; \
+ ScratchRegs[RegARM32::val] = scratch;
+ REGARM32_TABLE;
+#undef X
+ TypeToRegisterSet[IceType_void] = InvalidRegisters;
+ TypeToRegisterSet[IceType_i1] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i8] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i16] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i32] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i64] = IntegerRegisters;
+ TypeToRegisterSet[IceType_f32] = FloatRegisters;
+ TypeToRegisterSet[IceType_f64] = FloatRegisters;
+ TypeToRegisterSet[IceType_v4i1] = VectorRegisters;
+ TypeToRegisterSet[IceType_v8i1] = VectorRegisters;
+ TypeToRegisterSet[IceType_v16i1] = VectorRegisters;
+ TypeToRegisterSet[IceType_v16i8] = VectorRegisters;
+ TypeToRegisterSet[IceType_v8i16] = VectorRegisters;
+ TypeToRegisterSet[IceType_v4i32] = VectorRegisters;
+ TypeToRegisterSet[IceType_v4f32] = VectorRegisters;
+}
+
+void TargetARM32::translateO2() {
+ TimerMarker T(TimerStack::TT_O2, Func);
+
+ // TODO(stichnot): share passes with X86?
+ // https://code.google.com/p/nativeclient/issues/detail?id=4094
+
+ if (!Ctx->getFlags().getPhiEdgeSplit()) {
+ // Lower Phi instructions.
+ Func->placePhiLoads();
+ if (Func->hasError())
+ return;
+ Func->placePhiStores();
+ if (Func->hasError())
+ return;
+ Func->deletePhis();
+ if (Func->hasError())
+ return;
+ Func->dump("After Phi lowering");
+ }
+
+ // Address mode optimization.
+ Func->getVMetadata()->init(VMK_SingleDefs);
+ Func->doAddressOpt();
+
+ // Argument lowering
+ Func->doArgLowering();
+
+ // Target lowering. This requires liveness analysis for some parts
+ // of the lowering decisions, such as compare/branch fusing. If
+ // non-lightweight liveness analysis is used, the instructions need
+ // to be renumbered first. TODO: This renumbering should only be
+ // necessary if we're actually calculating live intervals, which we
+ // only do for register allocation.
+ Func->renumberInstructions();
+ if (Func->hasError())
+ return;
+
+ // TODO: It should be sufficient to use the fastest liveness
+ // calculation, i.e. livenessLightweight(). However, for some
+ // reason that slows down the rest of the translation. Investigate.
+ Func->liveness(Liveness_Basic);
+ if (Func->hasError())
+ return;
+ Func->dump("After ARM32 address mode opt");
+
+ Func->genCode();
+ if (Func->hasError())
+ return;
+ Func->dump("After ARM32 codegen");
+
+ // Register allocation. This requires instruction renumbering and
+ // full liveness analysis.
+ Func->renumberInstructions();
+ if (Func->hasError())
+ return;
+ Func->liveness(Liveness_Intervals);
+ if (Func->hasError())
+ return;
+ // Validate the live range computations. The expensive validation
+ // call is deliberately only made when assertions are enabled.
+ assert(Func->validateLiveness());
+ // The post-codegen dump is done here, after liveness analysis and
+ // associated cleanup, to make the dump cleaner and more useful.
+ Func->dump("After initial ARM32 codegen");
+ Func->getVMetadata()->init(VMK_All);
+ regAlloc(RAK_Global);
+ if (Func->hasError())
+ return;
+ Func->dump("After linear scan regalloc");
+
+ if (Ctx->getFlags().getPhiEdgeSplit()) {
+ Func->advancedPhiLowering();
+ Func->dump("After advanced Phi lowering");
+ }
+
+ // Stack frame mapping.
+ Func->genFrame();
+ if (Func->hasError())
+ return;
+ Func->dump("After stack frame mapping");
+
+ Func->contractEmptyNodes();
+ Func->reorderNodes();
+
+ // Branch optimization. This needs to be done just before code
+ // emission. In particular, no transformations that insert or
+ // reorder CfgNodes should be done after branch optimization. We go
+ // ahead and do it before nop insertion to reduce the amount of work
+ // needed for searching for opportunities.
+ Func->doBranchOpt();
+ Func->dump("After branch optimization");
+
+ // Nop insertion
+ if (Ctx->getFlags().shouldDoNopInsertion()) {
+ Func->doNopInsertion();
+ }
+}
+
+void TargetARM32::translateOm1() {
+ TimerMarker T(TimerStack::TT_Om1, Func);
+
+ // TODO: share passes with X86?
+
+ Func->placePhiLoads();
+ if (Func->hasError())
+ return;
+ Func->placePhiStores();
+ if (Func->hasError())
+ return;
+ Func->deletePhis();
+ if (Func->hasError())
+ return;
+ Func->dump("After Phi lowering");
+
+ Func->doArgLowering();
+
+ Func->genCode();
+ if (Func->hasError())
+ return;
+ Func->dump("After initial ARM32 codegen");
+
+ regAlloc(RAK_InfOnly);
+ if (Func->hasError())
+ return;
+ Func->dump("After regalloc of infinite-weight variables");
+
+ Func->genFrame();
+ if (Func->hasError())
+ return;
+ Func->dump("After stack frame mapping");
+
+ // Nop insertion
+ if (Ctx->getFlags().shouldDoNopInsertion()) {
+ Func->doNopInsertion();
+ }
+}
+
+bool TargetARM32::doBranchOpt(Inst *I, const CfgNode *NextNode) {
+ (void)I;
+ (void)NextNode;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+IceString TargetARM32::RegNames[] = {
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ name,
+ REGARM32_TABLE
+#undef X
+};
+
+IceString TargetARM32::getRegName(SizeT RegNum, Type Ty) const {
+ assert(RegNum < RegARM32::Reg_NUM);
+ (void)Ty;
+ return RegNames[RegNum];
+}
+
+Variable *TargetARM32::getPhysicalRegister(SizeT RegNum, Type Ty) {
+ if (Ty == IceType_void)
+ Ty = IceType_i32;
+ if (PhysicalRegisters[Ty].empty())
+ PhysicalRegisters[Ty].resize(RegARM32::Reg_NUM);
+ assert(RegNum < PhysicalRegisters[Ty].size());
+ Variable *Reg = PhysicalRegisters[Ty][RegNum];
+ if (Reg == nullptr) {
+ Reg = Func->makeVariable(Ty);
+ Reg->setRegNum(RegNum);
+ PhysicalRegisters[Ty][RegNum] = Reg;
+ // Specially mark SP as an "argument" so that it is considered
+ // live upon function entry.
+ if (RegNum == RegARM32::Reg_sp) {
+ Func->addImplicitArg(Reg);
+ Reg->setIgnoreLiveness();
+ }
+ }
+ return Reg;
+}
+
+void TargetARM32::emitVariable(const Variable *Var) const {
+ Ostream &Str = Ctx->getStrEmit();
+ (void)Var;
+ (void)Str;
+ llvm::report_fatal_error("emitVariable: Not yet implemented");
+}
+
+void TargetARM32::lowerArguments() {
+ llvm::report_fatal_error("lowerArguments: Not yet implemented");
+}
+
+Type TargetARM32::stackSlotType() { return IceType_i32; }
+
+void TargetARM32::addProlog(CfgNode *Node) {
+ (void)Node;
+ llvm::report_fatal_error("addProlog: Not yet implemented");
+}
+
+void TargetARM32::addEpilog(CfgNode *Node) {
+ (void)Node;
+ llvm::report_fatal_error("addEpilog: Not yet implemented");
+}
+
+llvm::SmallBitVector TargetARM32::getRegisterSet(RegSetMask Include,
+ RegSetMask Exclude) const {
+ llvm::SmallBitVector Registers(RegARM32::Reg_NUM);
+
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ if (scratch && (Include & RegSet_CallerSave)) \
+ Registers[RegARM32::val] = true; \
+ if (preserved && (Include & RegSet_CalleeSave)) \
+ Registers[RegARM32::val] = true; \
+ if (stackptr && (Include & RegSet_StackPointer)) \
+ Registers[RegARM32::val] = true; \
+ if (frameptr && (Include & RegSet_FramePointer)) \
+ Registers[RegARM32::val] = true; \
+ if (scratch && (Exclude & RegSet_CallerSave)) \
+ Registers[RegARM32::val] = false; \
+ if (preserved && (Exclude & RegSet_CalleeSave)) \
+ Registers[RegARM32::val] = false; \
+ if (stackptr && (Exclude & RegSet_StackPointer)) \
+ Registers[RegARM32::val] = false; \
+ if (frameptr && (Exclude & RegSet_FramePointer)) \
+ Registers[RegARM32::val] = false;
+
+ REGARM32_TABLE
+
+#undef X
+
+ return Registers;
+}
+
+void TargetARM32::lowerAlloca(const InstAlloca *Inst) {
+ UsesFramePointer = true;
+ // Conservatively require the stack to be aligned. Some stack
+ // adjustment operations implemented below assume that the stack is
+ // aligned before the alloca. All the alloca code ensures that the
+ // stack alignment is preserved after the alloca. The stack alignment
+ // restriction can be relaxed in some cases.
+ NeedsStackAlignment = true;
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerArithmetic(const InstArithmetic *Inst) {
+ switch (Inst->getOp()) {
+ case InstArithmetic::_num:
+ llvm_unreachable("Unknown arithmetic operator");
+ break;
+ case InstArithmetic::Add:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::And:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Or:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Xor:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Sub:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Mul:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Shl:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Lshr:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Ashr:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Udiv:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Sdiv:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Urem:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Srem:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fadd:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fsub:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fmul:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fdiv:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Frem:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+}
+
+void TargetARM32::lowerAssign(const InstAssign *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerBr(const InstBr *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerCall(const InstCall *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerCast(const InstCast *Inst) {
+ InstCast::OpKind CastKind = Inst->getCastKind();
+ switch (CastKind) {
+ default:
+ Func->setError("Cast type not supported");
+ return;
+ case InstCast::Sext: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Zext: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Trunc: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Fptrunc:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Fpext: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Fptosi:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Fptoui:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Sitofp:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Uitofp: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Bitcast: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ }
+}
+
+void TargetARM32::lowerExtractElement(const InstExtractElement *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerFcmp(const InstFcmp *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerIcmp(const InstIcmp *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerInsertElement(const InstInsertElement *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
+ switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
+ case Intrinsics::AtomicCmpxchg: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::AtomicFence:
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::AtomicFenceAll:
+ // NOTE: FenceAll should prevent and load/store from being moved
+ // across the fence (both atomic and non-atomic). The InstARM32Mfence
+ // instruction is currently marked coarsely as "HasSideEffects".
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::AtomicIsLockFree: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::AtomicLoad: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::AtomicRMW:
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::AtomicStore: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Bswap: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Ctpop: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Ctlz: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Cttz: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Fabs: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Longjmp: {
+ InstCall *Call = makeHelperCall(H_call_longjmp, nullptr, 2);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(Instr->getArg(1));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Memcpy: {
+ // In the future, we could potentially emit an inline memcpy/memset, etc.
+ // for intrinsic calls w/ a known length.
+ InstCall *Call = makeHelperCall(H_call_memcpy, nullptr, 3);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(Instr->getArg(1));
+ Call->addArg(Instr->getArg(2));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Memmove: {
+ InstCall *Call = makeHelperCall(H_call_memmove, nullptr, 3);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(Instr->getArg(1));
+ Call->addArg(Instr->getArg(2));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Memset: {
+ // The value operand needs to be extended to a stack slot size
+ // because the PNaCl ABI requires arguments to be at least 32 bits
+ // wide.
+ Operand *ValOp = Instr->getArg(1);
+ assert(ValOp->getType() == IceType_i8);
+ Variable *ValExt = Func->makeVariable(stackSlotType());
+ lowerCast(InstCast::create(Func, InstCast::Zext, ValExt, ValOp));
+ InstCall *Call = makeHelperCall(H_call_memset, nullptr, 3);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(ValExt);
+ Call->addArg(Instr->getArg(2));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::NaClReadTP: {
+ if (Ctx->getFlags().getUseSandboxing()) {
+ llvm::report_fatal_error("Not yet implemented");
+ } else {
+ InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0);
+ lowerCall(Call);
+ }
+ return;
+ }
+ case Intrinsics::Setjmp: {
+ InstCall *Call = makeHelperCall(H_call_setjmp, Instr->getDest(), 1);
+ Call->addArg(Instr->getArg(0));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Sqrt: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Stacksave: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Stackrestore: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Trap:
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::UnknownIntrinsic:
+ Func->setError("Should not be lowering UnknownIntrinsic");
+ return;
+ }
+ return;
+}
+
+void TargetARM32::lowerLoad(const InstLoad *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::doAddressOptLoad() {
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::randomlyInsertNop(float Probability) {
+ RandomNumberGeneratorWrapper RNG(Ctx->getRNG());
+ if (RNG.getTrueWithProbability(Probability)) {
+ llvm::report_fatal_error("Not yet implemented");
+ }
+}
+
+void TargetARM32::lowerPhi(const InstPhi * /*Inst*/) {
+ Func->setError("Phi found in regular instruction list");
+}
+
+void TargetARM32::lowerRet(const InstRet *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerSelect(const InstSelect *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerStore(const InstStore *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::doAddressOptStore() {
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerSwitch(const InstSwitch *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::lowerUnreachable(const InstUnreachable * /*Inst*/) {
+ llvm_unreachable("Not yet implemented");
+}
+
+// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to
+// preserve integrity of liveness analysis. Undef values are also
+// turned into zeroes, since loOperand() and hiOperand() don't expect
+// Undef input.
+void TargetARM32::prelowerPhis() {
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+// Lower the pre-ordered list of assignments into mov instructions.
+// Also has to do some ad-hoc register allocation as necessary.
+void TargetARM32::lowerPhiAssignments(CfgNode *Node,
+ const AssignList &Assignments) {
+ (void)Node;
+ (void)Assignments;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::postLower() {
+ if (Ctx->getFlags().getOptLevel() == Opt_m1)
+ return;
+ // Find two-address non-SSA instructions where Dest==Src0, and set
+ // the DestNonKillable flag to keep liveness analysis consistent.
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetARM32::makeRandomRegisterPermutation(
+ llvm::SmallVectorImpl<int32_t> &Permutation,
+ const llvm::SmallBitVector &ExcludeRegisters) const {
+ (void)Permutation;
+ (void)ExcludeRegisters;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+/* TODO(jvoung): avoid duplicate symbols with multiple targets.
+void ConstantUndef::emitWithoutDollar(GlobalContext *) const {
+ llvm_unreachable("Not expecting to emitWithoutDollar undef");
+}
+
+void ConstantUndef::emit(GlobalContext *) const {
+ llvm_unreachable("undef value encountered by emitter.");
+}
+*/
+
+TargetDataARM32::TargetDataARM32(GlobalContext *Ctx)
+ : TargetDataLowering(Ctx) {}
+
+void TargetDataARM32::lowerGlobal(const VariableDeclaration &Var) const {
+ (void)Var;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetDataARM32::lowerGlobals(
+ std::unique_ptr<VariableDeclarationList> Vars) const {
+ switch (Ctx->getFlags().getOutFileType()) {
+ case FT_Elf: {
+ ELFObjectWriter *Writer = Ctx->getObjectWriter();
+ Writer->writeDataSection(*Vars, llvm::ELF::R_ARM_ABS32);
+ } break;
+ case FT_Asm:
+ case FT_Iasm: {
+ const IceString &TranslateOnly = Ctx->getFlags().getTranslateOnly();
+ OstreamLocker L(Ctx);
+ for (const VariableDeclaration *Var : *Vars) {
+ if (GlobalContext::matchSymbolName(Var->getName(), TranslateOnly)) {
+ lowerGlobal(*Var);
+ }
+ }
+ } break;
+ }
+}
+
+void TargetDataARM32::lowerConstants() const {
+ if (Ctx->getFlags().getDisableTranslation())
+ return;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+} // end of namespace Ice
diff --git a/src/IceTargetLoweringARM32.def b/src/IceTargetLoweringARM32.def
new file mode 100644
index 0000000..baeec2c
--- /dev/null
+++ b/src/IceTargetLoweringARM32.def
@@ -0,0 +1,20 @@
+//===- subzero/src/IceTargetLoweringARM32.def - ARM32 X-macros --*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines certain patterns for lowering to ARM32 target
+// instructions, in the form of x-macros.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICETARGETLOWERINGARM32_DEF
+#define SUBZERO_SRC_ICETARGETLOWERINGARM32_DEF
+
+// TODO(jvoung): Fill out comparison tables, etc. for 32/64-bit compares.
+
+#endif // SUBZERO_SRC_ICETARGETLOWERINGARM32_DEF
diff --git a/src/IceTargetLoweringARM32.h b/src/IceTargetLoweringARM32.h
new file mode 100644
index 0000000..e973652
--- /dev/null
+++ b/src/IceTargetLoweringARM32.h
@@ -0,0 +1,130 @@
+//===- subzero/src/IceTargetLoweringARM32.h - ARM32 lowering ----*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the TargetLoweringARM32 class, which implements the
+// TargetLowering interface for the ARM 32-bit architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICETARGETLOWERINGARM32_H
+#define SUBZERO_SRC_ICETARGETLOWERINGARM32_H
+
+#include "IceDefs.h"
+#include "IceRegistersARM32.h"
+#include "IceTargetLowering.h"
+
+namespace Ice {
+
+class TargetARM32 : public TargetLowering {
+ TargetARM32() = delete;
+ TargetARM32(const TargetARM32 &) = delete;
+ TargetARM32 &operator=(const TargetARM32 &) = delete;
+
+public:
+ // TODO(jvoung): return a unique_ptr.
+ static TargetARM32 *create(Cfg *Func) { return new TargetARM32(Func); }
+
+ void translateOm1() override;
+ void translateO2() override;
+ bool doBranchOpt(Inst *I, const CfgNode *NextNode) override;
+
+ SizeT getNumRegisters() const override { return RegARM32::Reg_NUM; }
+ Variable *getPhysicalRegister(SizeT RegNum, Type Ty = IceType_void) override;
+ IceString getRegName(SizeT RegNum, Type Ty) const override;
+ llvm::SmallBitVector getRegisterSet(RegSetMask Include,
+ RegSetMask Exclude) const override;
+ const llvm::SmallBitVector &getRegisterSetForType(Type Ty) const override {
+ return TypeToRegisterSet[Ty];
+ }
+ bool hasFramePointer() const override { return UsesFramePointer; }
+ SizeT getFrameOrStackReg() const override {
+ return UsesFramePointer ? RegARM32::Reg_fp : RegARM32::Reg_sp;
+ }
+ size_t typeWidthInBytesOnStack(Type Ty) const override {
+ // Round up to the next multiple of 4 bytes. In particular, i1,
+ // i8, and i16 are rounded up to 4 bytes.
+ return (typeWidthInBytes(Ty) + 3) & ~3;
+ }
+ void emitVariable(const Variable *Var) const override;
+ void lowerArguments() override;
+ void addProlog(CfgNode *Node) override;
+ void addEpilog(CfgNode *Node) override;
+
+protected:
+ explicit TargetARM32(Cfg *Func);
+
+ void postLower() override;
+
+ void lowerAlloca(const InstAlloca *Inst) override;
+ void lowerArithmetic(const InstArithmetic *Inst) override;
+ void lowerAssign(const InstAssign *Inst) override;
+ void lowerBr(const InstBr *Inst) override;
+ void lowerCall(const InstCall *Inst) override;
+ void lowerCast(const InstCast *Inst) override;
+ void lowerExtractElement(const InstExtractElement *Inst) override;
+ void lowerFcmp(const InstFcmp *Inst) override;
+ void lowerIcmp(const InstIcmp *Inst) override;
+ void lowerIntrinsicCall(const InstIntrinsicCall *Inst) override;
+ void lowerInsertElement(const InstInsertElement *Inst) override;
+ void lowerLoad(const InstLoad *Inst) override;
+ void lowerPhi(const InstPhi *Inst) override;
+ void lowerRet(const InstRet *Inst) override;
+ void lowerSelect(const InstSelect *Inst) override;
+ void lowerStore(const InstStore *Inst) override;
+ void lowerSwitch(const InstSwitch *Inst) override;
+ void lowerUnreachable(const InstUnreachable *Inst) override;
+ void prelowerPhis() override;
+ void lowerPhiAssignments(CfgNode *Node,
+ const AssignList &Assignments) override;
+ void doAddressOptLoad() override;
+ void doAddressOptStore() override;
+ void randomlyInsertNop(float Probability) override;
+ void makeRandomRegisterPermutation(
+ llvm::SmallVectorImpl<int32_t> &Permutation,
+ const llvm::SmallBitVector &ExcludeRegisters) const override;
+
+ static Type stackSlotType();
+
+ bool UsesFramePointer;
+ bool NeedsStackAlignment;
+ llvm::SmallBitVector TypeToRegisterSet[IceType_NUM];
+ llvm::SmallBitVector ScratchRegs;
+ llvm::SmallBitVector RegsUsed;
+ VarList PhysicalRegisters[IceType_NUM];
+ static IceString RegNames[];
+
+private:
+ ~TargetARM32() override {}
+};
+
+class TargetDataARM32 : public TargetDataLowering {
+ TargetDataARM32() = delete;
+ TargetDataARM32(const TargetDataARM32 &) = delete;
+ TargetDataARM32 &operator=(const TargetDataARM32 &) = delete;
+
+public:
+ static TargetDataLowering *create(GlobalContext *Ctx) {
+ return new TargetDataARM32(Ctx);
+ }
+
+ void lowerGlobals(std::unique_ptr<VariableDeclarationList> Vars) const final;
+ void lowerConstants() const final;
+
+protected:
+ explicit TargetDataARM32(GlobalContext *Ctx);
+
+private:
+ void lowerGlobal(const VariableDeclaration &Var) const;
+ ~TargetDataARM32() override {}
+ template <typename T> static void emitConstantPool(GlobalContext *Ctx);
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICETARGETLOWERINGARM32_H
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp
index bd67844..d4c4c54 100644
--- a/src/IceTargetLoweringX8632.cpp
+++ b/src/IceTargetLoweringX8632.cpp
@@ -266,7 +266,7 @@
Func->getContext()->getFlags().getTargetInstructionSet() -
TargetInstructionSet::X86InstructionSet_Begin)),
IsEbpBasedFrame(false), NeedsStackAlignment(false), FrameSizeLocals(0),
- SpillAreaSizeBytes(0), NextLabelNumber(0) {
+ SpillAreaSizeBytes(0) {
static_assert((X86InstructionSet::End - X86InstructionSet::Begin) ==
(TargetInstructionSet::X86InstructionSet_End -
TargetInstructionSet::X86InstructionSet_Begin),
diff --git a/src/IceTargetLoweringX8632.h b/src/IceTargetLoweringX8632.h
index 9d1bf4a..286e380 100644
--- a/src/IceTargetLoweringX8632.h
+++ b/src/IceTargetLoweringX8632.h
@@ -57,7 +57,6 @@
void lowerArguments() override;
void addProlog(CfgNode *Node) override;
void addEpilog(CfgNode *Node) override;
- SizeT makeNextLabelNumber() { return NextLabelNumber++; }
// Ensure that a 64-bit Variable has been split into 2 32-bit
// Variables, creating them if necessary. This is needed for all
// I64 operations, and it is needed for pushing F64 arguments for
@@ -156,15 +155,6 @@
OperandX8632Mem *FormMemoryOperand(Operand *Ptr, Type Ty);
Variable *makeReg(Type Ty, int32_t RegNum = Variable::NoRegister);
- // Make a call to an external helper function.
- InstCall *makeHelperCall(const IceString &Name, Variable *Dest,
- SizeT MaxSrcs) {
- const bool HasTailCall = false;
- Constant *CallTarget = Ctx->getConstantExternSym(Name);
- InstCall *Call =
- InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall);
- return Call;
- }
static Type stackSlotType();
Variable *copyToReg(Operand *Src, int32_t RegNum = Variable::NoRegister);
@@ -501,7 +491,6 @@
llvm::SmallBitVector TypeToRegisterSet[IceType_NUM];
llvm::SmallBitVector ScratchRegs;
llvm::SmallBitVector RegsUsed;
- SizeT NextLabelNumber;
VarList PhysicalRegisters[IceType_NUM];
static IceString RegNames[];
diff --git a/src/assembler_arm32.h b/src/assembler_arm32.h
new file mode 100644
index 0000000..5452d4a
--- /dev/null
+++ b/src/assembler_arm32.h
@@ -0,0 +1,72 @@
+//===- subzero/src/assembler_arm32.h - Assembler for ARM32 ------*- C++ -*-===//
+//
+// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+//
+// Modified by the Subzero authors.
+//
+//===----------------------------------------------------------------------===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Assembler class for ARM32.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ASSEMBLER_ARM32_H
+#define SUBZERO_SRC_ASSEMBLER_ARM32_H
+
+#include "IceDefs.h"
+#include "IceFixups.h"
+
+#include "assembler.h"
+
+namespace Ice {
+
+class AssemblerARM32 : public Assembler {
+ AssemblerARM32(const AssemblerARM32 &) = delete;
+ AssemblerARM32 &operator=(const AssemblerARM32 &) = delete;
+
+public:
+ explicit AssemblerARM32(bool use_far_branches = false) : Assembler() {
+ // This mode is only needed and implemented for MIPS and ARM.
+ assert(!use_far_branches);
+ (void)use_far_branches;
+ }
+ ~AssemblerARM32() override = default;
+
+ void alignFunction() override {
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ SizeT getBundleAlignLog2Bytes() const override { return 4; }
+
+ llvm::ArrayRef<uint8_t> getNonExecBundlePadding() const override {
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ void padWithNop(intptr_t Padding) override {
+ (void)Padding;
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ void BindCfgNodeLabel(SizeT NodeNumber) override {
+ (void)NodeNumber;
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ bool fixupIsPCRel(FixupKind Kind) const override {
+ (void)Kind;
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ASSEMBLER_ARM32_H
diff --git a/src/assembler_ia32.h b/src/assembler_ia32.h
index 2329fc0..be19162 100644
--- a/src/assembler_ia32.h
+++ b/src/assembler_ia32.h
@@ -1,4 +1,5 @@
//===- subzero/src/assembler_ia32.h - Assembler for x86-32 ------*- C++ -*-===//
+//
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
@@ -18,8 +19,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef SUBZERO_SRC_ASSEMBLER_IA32_H_
-#define SUBZERO_SRC_ASSEMBLER_IA32_H_
+#ifndef SUBZERO_SRC_ASSEMBLER_IA32_H
+#define SUBZERO_SRC_ASSEMBLER_IA32_H
#include "IceConditionCodesX8632.h"
#include "IceDefs.h"
@@ -882,4 +883,4 @@
} // end of namespace x86
} // end of namespace Ice
-#endif // SUBZERO_SRC_ASSEMBLER_IA32_H_
+#endif // SUBZERO_SRC_ASSEMBLER_IA32_H
diff --git a/tests_lit/llvm2ice_tests/arith-opt.ll b/tests_lit/llvm2ice_tests/arith-opt.ll
index 66be97f..1f6a69b 100644
--- a/tests_lit/llvm2ice_tests/arith-opt.ll
+++ b/tests_lit/llvm2ice_tests/arith-opt.ll
@@ -5,6 +5,13 @@
; RUN: %p2i -i %s --filetype=asm --args --verbose inst -threads=0 | FileCheck %s
+; TODO(jvoung): Enable test when it does not llvm::report_fatal_error.
+; The test runner wrappers don't handle error expected errors
+; so we can't just "not" the command.
+; RUIN: %if --need=target_ARM32 --command %p2i -i %s --filetype=asm \
+; RUIN: --args --verbose inst -threads=0 --target arm32 \
+; RUIN: | %if --need=target_ARM32 --command FileCheck %s --check-prefix ARM32
+
define i32 @Add(i32 %a, i32 %b) {
; CHECK: define i32 @Add
entry: