First patch for Mips subzero compiler
BUG= https://code.google.com/p/nativeclient/issues/detail?id=4167
Move issue https://codereview.chromium.org/1159823004/ here so that
it's under the proper email.
Review URL: https://codereview.chromium.org/1169533003
diff --git a/Makefile.standalone b/Makefile.standalone
index a641ce1..520f3b3 100644
--- a/Makefile.standalone
+++ b/Makefile.standalone
@@ -187,6 +187,7 @@
IceTargetLowering.cpp \
IceTargetLoweringARM32.cpp \
IceTargetLoweringX8632.cpp \
+ IceTargetLoweringMIPS32.cpp \
IceThreading.cpp \
IceTimerTree.cpp \
IceTranslator.cpp \
diff --git a/src/IceClFlags.cpp b/src/IceClFlags.cpp
index 069c3e3..a433b9a 100644
--- a/src/IceClFlags.cpp
+++ b/src/IceClFlags.cpp
@@ -136,7 +136,10 @@
clEnumValN(Ice::Target_X8664, "x86_64", "x86-64 (same as x8664)"),
clEnumValN(Ice::Target_ARM32, "arm", "arm32"),
clEnumValN(Ice::Target_ARM32, "arm32", "arm32 (same as arm)"),
- clEnumValN(Ice::Target_ARM64, "arm64", "arm64"), clEnumValEnd));
+ clEnumValN(Ice::Target_ARM64, "arm64", "arm64"),
+ clEnumValN(Ice::Target_MIPS32, "mips", "mips32"),
+ clEnumValN(Ice::Target_MIPS32, "mips32", "mips32 (same as mips)"),
+ clEnumValEnd));
cl::opt<Ice::TargetInstructionSet> TargetInstructionSet(
"mattr", cl::desc("Target architecture attributes"),
cl::init(Ice::X86InstructionSet_SSE2),
diff --git a/src/IceInstMIPS32.def b/src/IceInstMIPS32.def
new file mode 100644
index 0000000..3b6f387
--- /dev/null
+++ b/src/IceInstMIPS32.def
@@ -0,0 +1,81 @@
+//===- subzero/src/IceInstMIPS32.def - X-Macros for MIPS32 insts --*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines properties of MIPS32 instructions in the form of x-macros.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEINSTMIPS32_DEF
+#define SUBZERO_SRC_ICEINSTMIPS32_DEF
+
+// NOTE: PC and SP are not considered isInt, to avoid register allocating.
+// TODO (reed kotler). This needs to be scrubbed and is a placeholder to get
+// the Mips skeleton in.
+//
+#define REGMIPS32_GPR_TABLE \
+ /* val, encode, name, scratch, preserved, stackptr, frameptr, isInt, isFP */ \
+ X(Reg_ZERO, = 0, "zero", 0, 0, 0, 0, 0, 0) \
+ X(Reg_AT, = Reg_ZERO + 1, "at", 1, 0, 0, 0, 1, 0) \
+ X(Reg_V0, = Reg_ZERO + 2, "v0", 1, 0, 0, 0, 1, 0) \
+ X(Reg_V1, = Reg_ZERO + 3, "v1", 1, 0, 0, 0, 1, 0) \
+ X(Reg_A0, = Reg_ZERO + 4, "a0", 1, 0, 0, 0, 1, 0) \
+ X(Reg_A1, = Reg_ZERO + 5, "a1", 1, 0, 0, 0, 1, 0) \
+ X(Reg_A2, = Reg_ZERO + 6, "a2", 1, 0, 0, 0, 1, 0) \
+ X(Reg_A3, = Reg_ZERO + 7, "a3", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T0, = Reg_ZERO + 8, "t0", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T1, = Reg_ZERO + 9, "t1", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T2, = Reg_ZERO + 10, "t2", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T3, = Reg_ZERO + 11, "t3", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T4, = Reg_ZERO + 12, "t4", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T5, = Reg_ZERO + 14, "t5", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T6, = Reg_ZERO + 14, "t6", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T7, = Reg_ZERO + 15, "t7", 1, 0, 0, 0, 1, 0) \
+ X(Reg_S0, = Reg_ZERO + 16, "s0", 0, 1, 0, 0, 1, 0) \
+ X(Reg_S1, = Reg_ZERO + 17, "s1", 0, 1, 0, 0, 1, 0) \
+ X(Reg_S2, = Reg_ZERO + 18, "s2", 0, 1, 0, 0, 1, 0) \
+ X(Reg_S3, = Reg_ZERO + 19, "s3", 0, 1, 0, 0, 1, 0) \
+ X(Reg_S4, = Reg_ZERO + 20, "s4", 0, 1, 0, 0, 1, 0) \
+ X(Reg_S5, = Reg_ZERO + 21, "s5", 0, 1, 0, 0, 1, 0) \
+ X(Reg_S6, = Reg_ZERO + 22, "s6", 0, 1, 0, 0, 1, 0) \
+ X(Reg_S7, = Reg_ZERO + 23, "s7", 0, 1, 0, 0, 1, 0) \
+ X(Reg_T8, = Reg_ZERO + 23, "t8", 1, 0, 0, 0, 1, 0) \
+ X(Reg_T9, = Reg_ZERO + 25, "t9", 1, 0, 0, 0, 1, 0) \
+ X(Reg_K0, = Reg_ZERO + 26, "k0", 0, 0, 0, 0, 0, 0) \
+ X(Reg_K1, = Reg_ZERO + 27, "k1", 0, 0, 0, 0, 0, 0) \
+ X(Reg_GP, = Reg_ZERO + 28, "gp", 0, 0, 0, 0, 0, 0) \
+ X(Reg_SP, = Reg_ZERO + 29, "sp", 0, 0, 1, 0, 0, 0) \
+ X(Reg_FP, = Reg_ZERO + 30, "fp", 0, 0, 0, 1, 0, 0) \
+ X(Reg_RA, = Reg_ZERO + 31, "ra", 0, 1, 0, 0, 0, 0) \
+//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
+// isInt, isFP)
+
+// TODO(reed kotler): List FP registers etc.
+// Be able to grab even registers, and the corresponding odd register
+// for each even register.
+
+// We also provide a combined table, so that there is a namespace where
+// all of the registers are considered and have distinct numberings.
+// This is in contrast to the above, where the "encode" is based on how
+// the register numbers will be encoded in binaries and values can overlap.
+#define REGMIPS32_TABLE \
+ /* val, encode, name, scratch, preserved, stackptr, frameptr, isInt, isFP */ \
+ REGMIPS32_GPR_TABLE
+//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
+// isInt, isFP)
+
+#define REGMIPS32_TABLE_BOUNDS \
+ /* val, init */ \
+ X(Reg_GPR_First, = Reg_ZERO) \
+ X(Reg_GPR_Last, = Reg_RA)
+//define X(val, init)
+
+// TODO(reed kotler): add condition code tables, etc.
+
+
+#endif // SUBZERO_SRC_ICEINSTMIPS32_DEF
diff --git a/src/IceInstMIPS32.h b/src/IceInstMIPS32.h
new file mode 100644
index 0000000..0c7c2d8
--- /dev/null
+++ b/src/IceInstMIPS32.h
@@ -0,0 +1,28 @@
+//===- subzero/src/IceInstMIPS32.h - MIPS32 machine instrs --*- C++ -*-=== //
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the InstMIPS32 and OperandMIPS32 classes and
+// their subclasses. This represents the machine instructions and
+// operands used for MIPS32 code selection.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEINSTMIPS32_H
+#define SUBZERO_SRC_ICEINSTMIPS32_H
+
+#include "IceDefs.h"
+
+namespace Ice {
+
+class TargetMIPS32;
+// Fill this in.
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEINSTMIPS32_H
diff --git a/src/IceRegistersMIPS32.h b/src/IceRegistersMIPS32.h
new file mode 100644
index 0000000..d2180bf
--- /dev/null
+++ b/src/IceRegistersMIPS32.h
@@ -0,0 +1,62 @@
+//===- subzero/src/IceRegistersMIPS32.h - Register information --*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the registers and their encodings for MIPS32.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEREGISTERSMIPS32_H
+#define SUBZERO_SRC_ICEREGISTERSMIPS32_H
+
+#include "IceDefs.h"
+#include "IceInstMIPS32.def"
+#include "IceTypes.h"
+
+namespace Ice {
+
+namespace RegMIPS32 {
+
+// An enum of every register. The enum value may not match the encoding
+// used to binary encode register operands in instructions.
+enum AllRegisters {
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ val,
+ REGMIPS32_TABLE
+#undef X
+ Reg_NUM,
+#define X(val, init) val init,
+ REGMIPS32_TABLE_BOUNDS
+#undef X
+};
+
+// An enum of GPR Registers. The enum value does match the encoding used
+// to binary encode register operands in instructions.
+enum GPRRegister {
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ Encoded_##val encode,
+ REGMIPS32_GPR_TABLE
+#undef X
+ Encoded_Not_GPR = -1
+};
+
+// TODO(jvoung): Floating point and vector registers...
+// Need to model overlap and difference in encoding too.
+
+static inline GPRRegister getEncodedGPR(int32_t RegNum) {
+ assert(Reg_GPR_First <= RegNum && RegNum <= Reg_GPR_Last);
+ return GPRRegister(RegNum - Reg_GPR_First);
+}
+
+} // end of namespace RegMIPS32
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEREGISTERSMIPS32_H
diff --git a/src/IceTargetLowering.cpp b/src/IceTargetLowering.cpp
index 051b00d..b8f11bb 100644
--- a/src/IceTargetLowering.cpp
+++ b/src/IceTargetLowering.cpp
@@ -17,12 +17,14 @@
#include "IceAssemblerARM32.h"
#include "IceAssemblerX8632.h"
+#include "assembler_mips32.h"
#include "IceCfg.h" // setError()
#include "IceCfgNode.h"
#include "IceOperand.h"
#include "IceRegAlloc.h"
#include "IceTargetLowering.h"
#include "IceTargetLoweringARM32.h"
+#include "IceTargetLoweringMIPS32.h"
#include "IceTargetLoweringX8632.h"
namespace Ice {
diff --git a/src/IceTargetLoweringMIPS32.cpp b/src/IceTargetLoweringMIPS32.cpp
new file mode 100644
index 0000000..9ca10b7
--- /dev/null
+++ b/src/IceTargetLoweringMIPS32.cpp
@@ -0,0 +1,705 @@
+//===- subzero/src/IceTargetLoweringMIPS32.cpp - MIPS32 lowering ----------===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TargetLoweringMIPS32 class, which consists almost
+// entirely of the lowering sequence for each high-level instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/MathExtras.h"
+
+#include "IceCfg.h"
+#include "IceCfgNode.h"
+#include "IceClFlags.h"
+#include "IceDefs.h"
+#include "IceELFObjectWriter.h"
+#include "IceGlobalInits.h"
+#include "IceInstMIPS32.h"
+#include "IceLiveness.h"
+#include "IceOperand.h"
+#include "IceRegistersMIPS32.h"
+#include "IceTargetLoweringMIPS32.def"
+#include "IceTargetLoweringMIPS32.h"
+#include "IceUtils.h"
+
+namespace Ice {
+
+TargetMIPS32::TargetMIPS32(Cfg *Func)
+ : TargetLowering(Func), UsesFramePointer(false) {
+ // TODO: Don't initialize IntegerRegisters and friends every time.
+ // Instead, initialize in some sort of static initializer for the
+ // class.
+ llvm::SmallBitVector IntegerRegisters(RegMIPS32::Reg_NUM);
+ llvm::SmallBitVector FloatRegisters(RegMIPS32::Reg_NUM);
+ llvm::SmallBitVector VectorRegisters(RegMIPS32::Reg_NUM);
+ llvm::SmallBitVector InvalidRegisters(RegMIPS32::Reg_NUM);
+ ScratchRegs.resize(RegMIPS32::Reg_NUM);
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ IntegerRegisters[RegMIPS32::val] = isInt; \
+ FloatRegisters[RegMIPS32::val] = isFP; \
+ VectorRegisters[RegMIPS32::val] = isFP; \
+ ScratchRegs[RegMIPS32::val] = scratch;
+ REGMIPS32_TABLE;
+#undef X
+ TypeToRegisterSet[IceType_void] = InvalidRegisters;
+ TypeToRegisterSet[IceType_i1] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i8] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i16] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i32] = IntegerRegisters;
+ TypeToRegisterSet[IceType_i64] = IntegerRegisters;
+ TypeToRegisterSet[IceType_f32] = FloatRegisters;
+ TypeToRegisterSet[IceType_f64] = FloatRegisters;
+ TypeToRegisterSet[IceType_v4i1] = VectorRegisters;
+ TypeToRegisterSet[IceType_v8i1] = VectorRegisters;
+ TypeToRegisterSet[IceType_v16i1] = VectorRegisters;
+ TypeToRegisterSet[IceType_v16i8] = VectorRegisters;
+ TypeToRegisterSet[IceType_v8i16] = VectorRegisters;
+ TypeToRegisterSet[IceType_v4i32] = VectorRegisters;
+ TypeToRegisterSet[IceType_v4f32] = VectorRegisters;
+}
+
+void TargetMIPS32::translateO2() {
+ TimerMarker T(TimerStack::TT_O2, Func);
+
+ // TODO(stichnot): share passes with X86?
+ // https://code.google.com/p/nativeclient/issues/detail?id=4094
+
+ if (!Ctx->getFlags().getPhiEdgeSplit()) {
+ // Lower Phi instructions.
+ Func->placePhiLoads();
+ if (Func->hasError())
+ return;
+ Func->placePhiStores();
+ if (Func->hasError())
+ return;
+ Func->deletePhis();
+ if (Func->hasError())
+ return;
+ Func->dump("After Phi lowering");
+ }
+
+ // Address mode optimization.
+ Func->getVMetadata()->init(VMK_SingleDefs);
+ Func->doAddressOpt();
+
+ // Argument lowering
+ Func->doArgLowering();
+
+ // Target lowering. This requires liveness analysis for some parts
+ // of the lowering decisions, such as compare/branch fusing. If
+ // non-lightweight liveness analysis is used, the instructions need
+ // to be renumbered first. TODO: This renumbering should only be
+ // necessary if we're actually calculating live intervals, which we
+ // only do for register allocation.
+ Func->renumberInstructions();
+ if (Func->hasError())
+ return;
+
+ // TODO: It should be sufficient to use the fastest liveness
+ // calculation, i.e. livenessLightweight(). However, for some
+ // reason that slows down the rest of the translation. Investigate.
+ Func->liveness(Liveness_Basic);
+ if (Func->hasError())
+ return;
+ Func->dump("After MIPS32 address mode opt");
+
+ Func->genCode();
+ if (Func->hasError())
+ return;
+ Func->dump("After MIPS32 codegen");
+
+ // Register allocation. This requires instruction renumbering and
+ // full liveness analysis.
+ Func->renumberInstructions();
+ if (Func->hasError())
+ return;
+ Func->liveness(Liveness_Intervals);
+ if (Func->hasError())
+ return;
+ // Validate the live range computations. The expensive validation
+ // call is deliberately only made when assertions are enabled.
+ assert(Func->validateLiveness());
+ // The post-codegen dump is done here, after liveness analysis and
+ // associated cleanup, to make the dump cleaner and more useful.
+ Func->dump("After initial MIPS32 codegen");
+ Func->getVMetadata()->init(VMK_All);
+ regAlloc(RAK_Global);
+ if (Func->hasError())
+ return;
+ Func->dump("After linear scan regalloc");
+
+ if (Ctx->getFlags().getPhiEdgeSplit()) {
+ Func->advancedPhiLowering();
+ Func->dump("After advanced Phi lowering");
+ }
+
+ // Stack frame mapping.
+ Func->genFrame();
+ if (Func->hasError())
+ return;
+ Func->dump("After stack frame mapping");
+
+ Func->contractEmptyNodes();
+ Func->reorderNodes();
+
+ // Branch optimization. This needs to be done just before code
+ // emission. In particular, no transformations that insert or
+ // reorder CfgNodes should be done after branch optimization. We go
+ // ahead and do it before nop insertion to reduce the amount of work
+ // needed for searching for opportunities.
+ Func->doBranchOpt();
+ Func->dump("After branch optimization");
+
+ // Nop insertion
+ if (Ctx->getFlags().shouldDoNopInsertion()) {
+ Func->doNopInsertion();
+ }
+}
+
+void TargetMIPS32::translateOm1() {
+ TimerMarker T(TimerStack::TT_Om1, Func);
+
+ // TODO: share passes with X86?
+
+ Func->placePhiLoads();
+ if (Func->hasError())
+ return;
+ Func->placePhiStores();
+ if (Func->hasError())
+ return;
+ Func->deletePhis();
+ if (Func->hasError())
+ return;
+ Func->dump("After Phi lowering");
+
+ Func->doArgLowering();
+
+ Func->genCode();
+ if (Func->hasError())
+ return;
+ Func->dump("After initial MIPS32 codegen");
+
+ regAlloc(RAK_InfOnly);
+ if (Func->hasError())
+ return;
+ Func->dump("After regalloc of infinite-weight variables");
+
+ Func->genFrame();
+ if (Func->hasError())
+ return;
+ Func->dump("After stack frame mapping");
+
+ // Nop insertion
+ if (Ctx->getFlags().shouldDoNopInsertion()) {
+ Func->doNopInsertion();
+ }
+}
+
+bool TargetMIPS32::doBranchOpt(Inst *I, const CfgNode *NextNode) {
+ (void)I;
+ (void)NextNode;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+IceString TargetMIPS32::RegNames[] = {
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ name,
+ REGMIPS32_TABLE
+#undef X
+};
+
+IceString TargetMIPS32::getRegName(SizeT RegNum, Type Ty) const {
+ assert(RegNum < RegMIPS32::Reg_NUM);
+ (void)Ty;
+ return RegNames[RegNum];
+}
+
+Variable *TargetMIPS32::getPhysicalRegister(SizeT RegNum, Type Ty) {
+ if (Ty == IceType_void)
+ Ty = IceType_i32;
+ if (PhysicalRegisters[Ty].empty())
+ PhysicalRegisters[Ty].resize(RegMIPS32::Reg_NUM);
+ assert(RegNum < PhysicalRegisters[Ty].size());
+ Variable *Reg = PhysicalRegisters[Ty][RegNum];
+ if (Reg == nullptr) {
+ Reg = Func->makeVariable(Ty);
+ Reg->setRegNum(RegNum);
+ PhysicalRegisters[Ty][RegNum] = Reg;
+ // Specially mark SP as an "argument" so that it is considered
+ // live upon function entry.
+ if (RegNum == RegMIPS32::Reg_SP) {
+ Func->addImplicitArg(Reg);
+ Reg->setIgnoreLiveness();
+ }
+ }
+ return Reg;
+}
+
+void TargetMIPS32::emitVariable(const Variable *Var) const {
+ Ostream &Str = Ctx->getStrEmit();
+ (void)Var;
+ (void)Str;
+ llvm::report_fatal_error("emitVariable: Not yet implemented");
+}
+
+void TargetMIPS32::lowerArguments() {
+ llvm::report_fatal_error("lowerArguments: Not yet implemented");
+}
+
+Type TargetMIPS32::stackSlotType() { return IceType_i32; }
+
+void TargetMIPS32::addProlog(CfgNode *Node) {
+ (void)Node;
+ llvm::report_fatal_error("addProlog: Not yet implemented");
+}
+
+void TargetMIPS32::addEpilog(CfgNode *Node) {
+ (void)Node;
+ llvm::report_fatal_error("addEpilog: Not yet implemented");
+}
+
+llvm::SmallBitVector TargetMIPS32::getRegisterSet(RegSetMask Include,
+ RegSetMask Exclude) const {
+ llvm::SmallBitVector Registers(RegMIPS32::Reg_NUM);
+
+#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
+ isFP) \
+ if (scratch && (Include & RegSet_CallerSave)) \
+ Registers[RegMIPS32::val] = true; \
+ if (preserved && (Include & RegSet_CalleeSave)) \
+ Registers[RegMIPS32::val] = true; \
+ if (stackptr && (Include & RegSet_StackPointer)) \
+ Registers[RegMIPS32::val] = true; \
+ if (frameptr && (Include & RegSet_FramePointer)) \
+ Registers[RegMIPS32::val] = true; \
+ if (scratch && (Exclude & RegSet_CallerSave)) \
+ Registers[RegMIPS32::val] = false; \
+ if (preserved && (Exclude & RegSet_CalleeSave)) \
+ Registers[RegMIPS32::val] = false; \
+ if (stackptr && (Exclude & RegSet_StackPointer)) \
+ Registers[RegMIPS32::val] = false; \
+ if (frameptr && (Exclude & RegSet_FramePointer)) \
+ Registers[RegMIPS32::val] = false;
+
+ REGMIPS32_TABLE
+
+#undef X
+
+ return Registers;
+}
+
+void TargetMIPS32::lowerAlloca(const InstAlloca *Inst) {
+ UsesFramePointer = true;
+ // Conservatively require the stack to be aligned. Some stack
+ // adjustment operations implemented below assume that the stack is
+ // aligned before the alloca. All the alloca code ensures that the
+ // stack alignment is preserved after the alloca. The stack alignment
+ // restriction can be relaxed in some cases.
+ NeedsStackAlignment = true;
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) {
+ switch (Inst->getOp()) {
+ case InstArithmetic::_num:
+ llvm_unreachable("Unknown arithmetic operator");
+ break;
+ case InstArithmetic::Add:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::And:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Or:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Xor:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Sub:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Mul:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Shl:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Lshr:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Ashr:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Udiv:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Sdiv:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Urem:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Srem:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fadd:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fsub:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fmul:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Fdiv:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstArithmetic::Frem:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+}
+
+void TargetMIPS32::lowerAssign(const InstAssign *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerBr(const InstBr *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerCall(const InstCall *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerCast(const InstCast *Inst) {
+ InstCast::OpKind CastKind = Inst->getCastKind();
+ switch (CastKind) {
+ default:
+ Func->setError("Cast type not supported");
+ return;
+ case InstCast::Sext: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Zext: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Trunc: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Fptrunc:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Fpext: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Fptosi:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Fptoui:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Sitofp:
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ case InstCast::Uitofp: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ case InstCast::Bitcast: {
+ llvm::report_fatal_error("Not yet implemented");
+ break;
+ }
+ }
+}
+
+void TargetMIPS32::lowerExtractElement(const InstExtractElement *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerFcmp(const InstFcmp *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerIcmp(const InstIcmp *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerInsertElement(const InstInsertElement *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
+ switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
+ case Intrinsics::AtomicCmpxchg: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::AtomicFence:
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::AtomicFenceAll:
+ // NOTE: FenceAll should prevent and load/store from being moved
+ // across the fence (both atomic and non-atomic). The InstMIPS32Mfence
+ // instruction is currently marked coarsely as "HasSideEffects".
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::AtomicIsLockFree: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::AtomicLoad: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::AtomicRMW:
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::AtomicStore: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Bswap: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Ctpop: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Ctlz: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Cttz: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Fabs: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Longjmp: {
+ InstCall *Call = makeHelperCall(H_call_longjmp, nullptr, 2);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(Instr->getArg(1));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Memcpy: {
+ // In the future, we could potentially emit an inline memcpy/memset, etc.
+ // for intrinsic calls w/ a known length.
+ InstCall *Call = makeHelperCall(H_call_memcpy, nullptr, 3);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(Instr->getArg(1));
+ Call->addArg(Instr->getArg(2));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Memmove: {
+ InstCall *Call = makeHelperCall(H_call_memmove, nullptr, 3);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(Instr->getArg(1));
+ Call->addArg(Instr->getArg(2));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Memset: {
+ // The value operand needs to be extended to a stack slot size
+ // because the PNaCl ABI requires arguments to be at least 32 bits
+ // wide.
+ Operand *ValOp = Instr->getArg(1);
+ assert(ValOp->getType() == IceType_i8);
+ Variable *ValExt = Func->makeVariable(stackSlotType());
+ lowerCast(InstCast::create(Func, InstCast::Zext, ValExt, ValOp));
+ InstCall *Call = makeHelperCall(H_call_memset, nullptr, 3);
+ Call->addArg(Instr->getArg(0));
+ Call->addArg(ValExt);
+ Call->addArg(Instr->getArg(2));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::NaClReadTP: {
+ if (Ctx->getFlags().getUseSandboxing()) {
+ llvm::report_fatal_error("Not yet implemented");
+ } else {
+ InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0);
+ lowerCall(Call);
+ }
+ return;
+ }
+ case Intrinsics::Setjmp: {
+ InstCall *Call = makeHelperCall(H_call_setjmp, Instr->getDest(), 1);
+ Call->addArg(Instr->getArg(0));
+ lowerCall(Call);
+ return;
+ }
+ case Intrinsics::Sqrt: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Stacksave: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Stackrestore: {
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ }
+ case Intrinsics::Trap:
+ llvm::report_fatal_error("Not yet implemented");
+ return;
+ case Intrinsics::UnknownIntrinsic:
+ Func->setError("Should not be lowering UnknownIntrinsic");
+ return;
+ }
+ return;
+}
+
+void TargetMIPS32::lowerLoad(const InstLoad *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::doAddressOptLoad() {
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::randomlyInsertNop(float Probability) {
+ RandomNumberGeneratorWrapper RNG(Ctx->getRNG());
+ if (RNG.getTrueWithProbability(Probability)) {
+ llvm::report_fatal_error("Not yet implemented");
+ }
+}
+
+void TargetMIPS32::lowerPhi(const InstPhi * /*Inst*/) {
+ Func->setError("Phi found in regular instruction list");
+}
+
+void TargetMIPS32::lowerRet(const InstRet *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerSelect(const InstSelect *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerStore(const InstStore *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::doAddressOptStore() {
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerSwitch(const InstSwitch *Inst) {
+ (void)Inst;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::lowerUnreachable(const InstUnreachable * /*Inst*/) {
+ llvm_unreachable("Not yet implemented");
+}
+
+// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to
+// preserve integrity of liveness analysis. Undef values are also
+// turned into zeroes, since loOperand() and hiOperand() don't expect
+// Undef input.
+void TargetMIPS32::prelowerPhis() {
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+// Lower the pre-ordered list of assignments into mov instructions.
+// Also has to do some ad-hoc register allocation as necessary.
+void TargetMIPS32::lowerPhiAssignments(CfgNode *Node,
+ const AssignList &Assignments) {
+ (void)Node;
+ (void)Assignments;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::postLower() {
+ if (Ctx->getFlags().getOptLevel() == Opt_m1)
+ return;
+ // Find two-address non-SSA instructions where Dest==Src0, and set
+ // the DestNonKillable flag to keep liveness analysis consistent.
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetMIPS32::makeRandomRegisterPermutation(
+ llvm::SmallVectorImpl<int32_t> &Permutation,
+ const llvm::SmallBitVector &ExcludeRegisters) const {
+ (void)Permutation;
+ (void)ExcludeRegisters;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+/* TODO(jvoung): avoid duplicate symbols with multiple targets.
+void ConstantUndef::emitWithoutDollar(GlobalContext *) const {
+ llvm_unreachable("Not expecting to emitWithoutDollar undef");
+}
+
+void ConstantUndef::emit(GlobalContext *) const {
+ llvm_unreachable("undef value encountered by emitter.");
+}
+*/
+
+TargetDataMIPS32::TargetDataMIPS32(GlobalContext *Ctx)
+ : TargetDataLowering(Ctx) {}
+
+void TargetDataMIPS32::lowerGlobal(const VariableDeclaration &Var) const {
+ (void)Var;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+void TargetDataMIPS32::lowerGlobals(
+ std::unique_ptr<VariableDeclarationList> Vars) const {
+ switch (Ctx->getFlags().getOutFileType()) {
+ case FT_Elf: {
+ ELFObjectWriter *Writer = Ctx->getObjectWriter();
+ Writer->writeDataSection(*Vars, llvm::ELF::R_MIPS_GLOB_DAT);
+ } break;
+ case FT_Asm:
+ case FT_Iasm: {
+ const IceString &TranslateOnly = Ctx->getFlags().getTranslateOnly();
+ OstreamLocker L(Ctx);
+ for (const VariableDeclaration *Var : *Vars) {
+ if (GlobalContext::matchSymbolName(Var->getName(), TranslateOnly)) {
+ lowerGlobal(*Var);
+ }
+ }
+ } break;
+ }
+}
+
+void TargetDataMIPS32::lowerConstants() const {
+ if (Ctx->getFlags().getDisableTranslation())
+ return;
+ llvm::report_fatal_error("Not yet implemented");
+}
+
+} // end of namespace Ice
diff --git a/src/IceTargetLoweringMIPS32.def b/src/IceTargetLoweringMIPS32.def
new file mode 100644
index 0000000..6242a7e
--- /dev/null
+++ b/src/IceTargetLoweringMIPS32.def
@@ -0,0 +1,20 @@
+//===- subzero/src/IceTargetLoweringMIPS32.def -MIPS32 X-macros -*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines certain patterns for lowering to MIPS32 target
+// instructions, in the form of x-macros.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICETARGETLOWERINGMIPS32_DEF
+#define SUBZERO_SRC_ICETARGETLOWERINGMIPS32_DEF
+
+// TODO(reed kotler): Fill out comparison tables, etc. for 32/64-bit compares.
+
+#endif // SUBZERO_SRC_ICETARGETLOWERINGMIPS32_DEF
diff --git a/src/IceTargetLoweringMIPS32.h b/src/IceTargetLoweringMIPS32.h
new file mode 100644
index 0000000..cb583b5
--- /dev/null
+++ b/src/IceTargetLoweringMIPS32.h
@@ -0,0 +1,149 @@
+//===- subzero/src/IceTargetLoweringMIPS32.h - MIPS32 lowering ---*- C++-*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the TargetLoweringMIPS32 class, which implements the
+// TargetLowering interface for the MIPS 32-bit architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICETARGETLOWERINGMIPS32_H
+#define SUBZERO_SRC_ICETARGETLOWERINGMIPS32_H
+
+#include "IceDefs.h"
+#include "IceInstMIPS32.h"
+#include "IceRegistersMIPS32.h"
+#include "IceTargetLowering.h"
+
+namespace Ice {
+
+class TargetMIPS32 : public TargetLowering {
+ TargetMIPS32() = delete;
+ TargetMIPS32(const TargetMIPS32 &) = delete;
+ TargetMIPS32 &operator=(const TargetMIPS32 &) = delete;
+
+public:
+ // TODO(jvoung): return a unique_ptr.
+ static TargetMIPS32 *create(Cfg *Func) { return new TargetMIPS32(Func); }
+
+ void translateOm1() override;
+ void translateO2() override;
+ bool doBranchOpt(Inst *I, const CfgNode *NextNode) override;
+
+ SizeT getNumRegisters() const override { return RegMIPS32::Reg_NUM; }
+ Variable *getPhysicalRegister(SizeT RegNum, Type Ty = IceType_void) override;
+ IceString getRegName(SizeT RegNum, Type Ty) const override;
+ llvm::SmallBitVector getRegisterSet(RegSetMask Include,
+ RegSetMask Exclude) const override;
+ const llvm::SmallBitVector &getRegisterSetForType(Type Ty) const override {
+ return TypeToRegisterSet[Ty];
+ }
+ bool hasFramePointer() const override { return UsesFramePointer; }
+ SizeT getFrameOrStackReg() const override {
+ return UsesFramePointer ? RegMIPS32::Reg_FP : RegMIPS32::Reg_SP;
+ }
+ size_t typeWidthInBytesOnStack(Type Ty) const override {
+ // Round up to the next multiple of 4 bytes. In particular, i1,
+ // i8, and i16 are rounded up to 4 bytes.
+ return (typeWidthInBytes(Ty) + 3) & ~3;
+ }
+ void emitVariable(const Variable *Var) const override;
+
+ const char *getConstantPrefix() const final { return ""; }
+ void emit(const ConstantUndef *C) const final {
+ llvm::report_fatal_error("Not yet implemented");
+ }
+ void emit(const ConstantInteger32 *C) const final {
+ llvm::report_fatal_error("Not yet implemented");
+ }
+ void emit(const ConstantInteger64 *C) const final {
+ llvm::report_fatal_error("Not yet implemented");
+ }
+ void emit(const ConstantFloat *C) const final {
+ llvm::report_fatal_error("Not yet implemented");
+ }
+ void emit(const ConstantDouble *C) const final {
+ llvm::report_fatal_error("Not yet implemented");
+ }
+
+ void lowerArguments() override;
+ void addProlog(CfgNode *Node) override;
+ void addEpilog(CfgNode *Node) override;
+
+protected:
+ explicit TargetMIPS32(Cfg *Func);
+
+ void postLower() override;
+
+ void lowerAlloca(const InstAlloca *Inst) override;
+ void lowerArithmetic(const InstArithmetic *Inst) override;
+ void lowerAssign(const InstAssign *Inst) override;
+ void lowerBr(const InstBr *Inst) override;
+ void lowerCall(const InstCall *Inst) override;
+ void lowerCast(const InstCast *Inst) override;
+ void lowerExtractElement(const InstExtractElement *Inst) override;
+ void lowerFcmp(const InstFcmp *Inst) override;
+ void lowerIcmp(const InstIcmp *Inst) override;
+ void lowerIntrinsicCall(const InstIntrinsicCall *Inst) override;
+ void lowerInsertElement(const InstInsertElement *Inst) override;
+ void lowerLoad(const InstLoad *Inst) override;
+ void lowerPhi(const InstPhi *Inst) override;
+ void lowerRet(const InstRet *Inst) override;
+ void lowerSelect(const InstSelect *Inst) override;
+ void lowerStore(const InstStore *Inst) override;
+ void lowerSwitch(const InstSwitch *Inst) override;
+ void lowerUnreachable(const InstUnreachable *Inst) override;
+ void prelowerPhis() override;
+ void lowerPhiAssignments(CfgNode *Node,
+ const AssignList &Assignments) override;
+ void doAddressOptLoad() override;
+ void doAddressOptStore() override;
+ void randomlyInsertNop(float Probability) override;
+ void makeRandomRegisterPermutation(
+ llvm::SmallVectorImpl<int32_t> &Permutation,
+ const llvm::SmallBitVector &ExcludeRegisters) const override;
+
+ static Type stackSlotType();
+
+ bool UsesFramePointer;
+ bool NeedsStackAlignment;
+ llvm::SmallBitVector TypeToRegisterSet[IceType_NUM];
+ llvm::SmallBitVector ScratchRegs;
+ llvm::SmallBitVector RegsUsed;
+ VarList PhysicalRegisters[IceType_NUM];
+ static IceString RegNames[];
+
+private:
+ ~TargetMIPS32() override {}
+};
+
+class TargetDataMIPS32 : public TargetDataLowering {
+ TargetDataMIPS32() = delete;
+ TargetDataMIPS32(const TargetDataMIPS32 &) = delete;
+ TargetDataMIPS32 &operator=(const TargetDataMIPS32 &) = delete;
+
+public:
+ static TargetDataLowering *create(GlobalContext *Ctx) {
+ return new TargetDataMIPS32(Ctx);
+ }
+
+ void lowerGlobals(std::unique_ptr<VariableDeclarationList> Vars) const final;
+ void lowerConstants() const final;
+
+protected:
+ explicit TargetDataMIPS32(GlobalContext *Ctx);
+
+private:
+ void lowerGlobal(const VariableDeclaration &Var) const;
+ ~TargetDataMIPS32() override {}
+ template <typename T> static void emitConstantPool(GlobalContext *Ctx);
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICETARGETLOWERINGMIPS32_H
diff --git a/src/IceTypes.def b/src/IceTypes.def
index 0b85ad6..9fd0e13 100644
--- a/src/IceTypes.def
+++ b/src/IceTypes.def
@@ -20,12 +20,13 @@
// ILP32 sandboxes, but for now the 64-bit architectures use ELF64:
// https://code.google.com/p/nativeclient/issues/detail?id=349
// TODO: Whoever adds AArch64 will need to set ABI e_flags.
-#define TARGETARCH_TABLE \
- /* enum value, printable string, is_elf64, e_machine, e_flags */ \
- X(Target_X8632, "x86-32", false, EM_386, 0) \
- X(Target_X8664, "x86-64", true, EM_X86_64, 0) \
- X(Target_ARM32, "arm32", false, EM_ARM, EF_ARM_EABI_VER5) \
- X(Target_ARM64, "arm64", true, EM_AARCH64, 0) \
+#define TARGETARCH_TABLE \
+ /* enum value, printable string, is_elf64, e_machine, e_flags */ \
+ X(Target_X8632, "x86-32", false, EM_386, 0) \
+ X(Target_X8664, "x86-64", true, EM_X86_64, 0) \
+ X(Target_ARM32, "arm32", false, EM_ARM, EF_ARM_EABI_VER5) \
+ X(Target_ARM64, "arm64", true, EM_AARCH64, 0) \
+ X(Target_MIPS32,"mips32", false, EM_MIPS, 0) \
//#define X(tag, str, is_elf64, e_machine, e_flags)
#define ICETYPE_TABLE \
diff --git a/src/assembler_mips32.h b/src/assembler_mips32.h
new file mode 100644
index 0000000..b5bca57
--- /dev/null
+++ b/src/assembler_mips32.h
@@ -0,0 +1,75 @@
+//===- subzero/src/assembler_mips.h - Assembler for MIPS --------*- C++ -*-===//
+//
+// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+//
+// Modified by the Subzero authors.
+//
+//===----------------------------------------------------------------------===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Assembler class for MIPS32.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ASSEMBLER_MIPS32_H
+#define SUBZERO_SRC_ASSEMBLER_MIPS32_H
+
+#include "IceAssembler.h"
+#include "IceDefs.h"
+#include "IceFixups.h"
+
+namespace Ice {
+namespace MIPS32 {
+
+class AssemblerMIPS32 : public Assembler {
+ AssemblerMIPS32(const AssemblerMIPS32 &) = delete;
+ AssemblerMIPS32 &operator=(const AssemblerMIPS32 &) = delete;
+
+public:
+ explicit AssemblerMIPS32(bool use_far_branches = false) : Assembler() {
+ // This mode is only needed and implemented for MIPS32 and ARM.
+ assert(!use_far_branches);
+ (void)use_far_branches;
+ }
+ ~AssemblerMIPS32() override = default;
+
+ void alignFunction() override {
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ SizeT getBundleAlignLog2Bytes() const override { return 4; }
+
+ const char *getNonExecPadDirective() const override { return ".TBD"; }
+
+ llvm::ArrayRef<uint8_t> getNonExecBundlePadding() const override {
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ void padWithNop(intptr_t Padding) override {
+ (void)Padding;
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ void bindCfgNodeLabel(SizeT NodeNumber) override {
+ (void)NodeNumber;
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+
+ bool fixupIsPCRel(FixupKind Kind) const override {
+ (void)Kind;
+ llvm::report_fatal_error("Not yet implemented.");
+ }
+};
+
+} // end of namespace MIPS32
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ASSEMBLER_MIPS32_H