Start processing function blocks.

Handle binops and returns.

BUG= https://code.google.com/p/nativeclient/issues/detail?id=3894
R=jvoung@chromium.org, stichnot@chromium.org

Review URL: https://codereview.chromium.org/395193005
diff --git a/Makefile.standalone b/Makefile.standalone
index a562bf2..7740a40 100644
--- a/Makefile.standalone
+++ b/Makefile.standalone
@@ -61,6 +61,7 @@
 	IceTargetLowering.cpp \
 	IceTargetLoweringX8632.cpp \
 	IceTranslator.cpp \
+	IceTypeConverter.cpp \
 	IceTypes.cpp \
 	llvm2ice.cpp \
 	PNaClTranslator.cpp
diff --git a/src/IceConverter.cpp b/src/IceConverter.cpp
index 65f8f66..8dcfce4 100644
--- a/src/IceConverter.cpp
+++ b/src/IceConverter.cpp
@@ -22,6 +22,7 @@
 #include "IceOperand.h"
 #include "IceTargetLowering.h"
 #include "IceTypes.h"
+#include "IceTypeConverter.h"
 
 #include "llvm/IR/Constant.h"
 #include "llvm/IR/Constants.h"
@@ -54,12 +55,8 @@
 //
 class LLVM2ICEConverter {
 public:
-  LLVM2ICEConverter(Ice::GlobalContext *Ctx)
-      : Ctx(Ctx), Func(NULL), CurrentNode(NULL) {
-    // All PNaCl pointer widths are 32 bits because of the sandbox
-    // model.
-    SubzeroPointerType = Ice::IceType_i32;
-  }
+  LLVM2ICEConverter(Ice::GlobalContext *Ctx, LLVMContext &LLVMContext)
+      : Ctx(Ctx), Func(NULL), CurrentNode(NULL), TypeConverter(LLVMContext) {}
 
   // Caller is expected to delete the returned Ice::Cfg object.
   Ice::Cfg *convertFunction(const Function *F) {
@@ -67,7 +64,7 @@
     NodeMap.clear();
     Func = new Ice::Cfg(Ctx);
     Func->setFunctionName(F->getName());
-    Func->setReturnType(convertType(F->getReturnType()));
+    Func->setReturnType(convertToIceType(F->getReturnType()));
     Func->setInternal(F->hasInternalLinkage());
 
     // The initial definition/use of each arg is the entry node.
@@ -102,12 +99,13 @@
   // global initializers.
   Ice::Constant *convertConstant(const Constant *Const) {
     if (const GlobalValue *GV = dyn_cast<GlobalValue>(Const)) {
-      return Ctx->getConstantSym(convertType(GV->getType()), 0, GV->getName());
+      return Ctx->getConstantSym(convertToIceType(GV->getType()), 0,
+                                 GV->getName());
     } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(Const)) {
-      return Ctx->getConstantInt(convertIntegerType(CI->getType()),
+      return Ctx->getConstantInt(convertToIceType(CI->getType()),
                                  CI->getZExtValue());
     } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Const)) {
-      Ice::Type Type = convertType(CFP->getType());
+      Ice::Type Type = convertToIceType(CFP->getType());
       if (Type == Ice::IceType_f32)
         return Ctx->getConstantFloat(CFP->getValueAPF().convertToFloat());
       else if (Type == Ice::IceType_f64)
@@ -115,7 +113,7 @@
       llvm_unreachable("Unexpected floating point type");
       return NULL;
     } else if (const UndefValue *CU = dyn_cast<UndefValue>(Const)) {
-      return Ctx->getConstantUndef(convertType(CU->getType()));
+      return Ctx->getConstantUndef(convertToIceType(CU->getType()));
     } else {
       llvm_unreachable("Unhandled constant type");
       return NULL;
@@ -125,7 +123,7 @@
 private:
   // LLVM values (instructions, etc.) are mapped directly to ICE variables.
   // mapValueToIceVar has a version that forces an ICE type on the variable,
-  // and a version that just uses convertType on V.
+  // and a version that just uses convertToIceType on V.
   Ice::Variable *mapValueToIceVar(const Value *V, Ice::Type IceTy) {
     if (IceTy == Ice::IceType_void)
       return NULL;
@@ -137,7 +135,7 @@
   }
 
   Ice::Variable *mapValueToIceVar(const Value *V) {
-    return mapValueToIceVar(V, convertType(V->getType()));
+    return mapValueToIceVar(V, convertToIceType(V->getType()));
   }
 
   Ice::CfgNode *mapBasicBlockToNode(const BasicBlock *BB) {
@@ -147,85 +145,12 @@
     return NodeMap[BB];
   }
 
-  Ice::Type convertIntegerType(const IntegerType *IntTy) const {
-    switch (IntTy->getBitWidth()) {
-    case 1:
-      return Ice::IceType_i1;
-    case 8:
-      return Ice::IceType_i8;
-    case 16:
-      return Ice::IceType_i16;
-    case 32:
-      return Ice::IceType_i32;
-    case 64:
-      return Ice::IceType_i64;
-    default:
-      report_fatal_error(std::string("Invalid PNaCl int type: ") +
-                         LLVMObjectAsString(IntTy));
-      return Ice::IceType_void;
-    }
-  }
-
-  Ice::Type convertVectorType(const VectorType *VecTy) const {
-    unsigned NumElements = VecTy->getNumElements();
-    const Type *ElementType = VecTy->getElementType();
-
-    if (ElementType->isFloatTy()) {
-      if (NumElements == 4)
-        return Ice::IceType_v4f32;
-    } else if (ElementType->isIntegerTy()) {
-      switch (cast<IntegerType>(ElementType)->getBitWidth()) {
-      case 1:
-        if (NumElements == 4)
-          return Ice::IceType_v4i1;
-        if (NumElements == 8)
-          return Ice::IceType_v8i1;
-        if (NumElements == 16)
-          return Ice::IceType_v16i1;
-        break;
-      case 8:
-        if (NumElements == 16)
-          return Ice::IceType_v16i8;
-        break;
-      case 16:
-        if (NumElements == 8)
-          return Ice::IceType_v8i16;
-        break;
-      case 32:
-        if (NumElements == 4)
-          return Ice::IceType_v4i32;
-        break;
-      }
-    }
-
-    report_fatal_error(std::string("Unhandled vector type: ") +
-                       LLVMObjectAsString(VecTy));
-    return Ice::IceType_void;
-  }
-
-  Ice::Type convertType(const Type *Ty) const {
-    switch (Ty->getTypeID()) {
-    case Type::VoidTyID:
-      return Ice::IceType_void;
-    case Type::IntegerTyID:
-      return convertIntegerType(cast<IntegerType>(Ty));
-    case Type::FloatTyID:
-      return Ice::IceType_f32;
-    case Type::DoubleTyID:
-      return Ice::IceType_f64;
-    case Type::PointerTyID:
-      return SubzeroPointerType;
-    case Type::FunctionTyID:
-      return SubzeroPointerType;
-    case Type::VectorTyID:
-      return convertVectorType(cast<VectorType>(Ty));
-    default:
-      report_fatal_error(std::string("Invalid PNaCl type: ") +
-                         LLVMObjectAsString(Ty));
-    }
-
-    llvm_unreachable("convertType");
-    return Ice::IceType_void;
+  Ice::Type convertToIceType(Type *LLVMTy) const {
+    Ice::Type IceTy = TypeConverter.convertToIceType(LLVMTy);
+    if (IceTy == Ice::IceType_NUM)
+      llvm::report_fatal_error(std::string("Invalid PNaCl type ") +
+                               LLVMObjectAsString(LLVMTy));
+    return IceTy;
   }
 
   // Given an LLVM instruction and an operand number, produce the
@@ -404,7 +329,8 @@
 
   Ice::Inst *convertIntToPtrInstruction(const IntToPtrInst *Inst) {
     Ice::Operand *Src = convertOperand(Inst, 0);
-    Ice::Variable *Dest = mapValueToIceVar(Inst, SubzeroPointerType);
+    Ice::Variable *Dest =
+        mapValueToIceVar(Inst, TypeConverter.getIcePointerType());
     return Ice::InstAssign::create(Func, Dest, Src);
   }
 
@@ -622,7 +548,8 @@
     // PNaCl bitcode only contains allocas of byte-granular objects.
     Ice::Operand *ByteCount = convertValue(Inst->getArraySize());
     uint32_t Align = Inst->getAlignment();
-    Ice::Variable *Dest = mapValueToIceVar(Inst, SubzeroPointerType);
+    Ice::Variable *Dest =
+        mapValueToIceVar(Inst, TypeConverter.getIcePointerType());
 
     return Ice::InstAlloca::create(Func, ByteCount, Align, Dest);
   }
@@ -671,22 +598,22 @@
   Ice::GlobalContext *Ctx;
   Ice::Cfg *Func;
   Ice::CfgNode *CurrentNode;
-  Ice::Type SubzeroPointerType;
   std::map<const Value *, Ice::Variable *> VarMap;
   std::map<const BasicBlock *, Ice::CfgNode *> NodeMap;
+  Ice::TypeConverter TypeConverter;
 };
 
 } // end of anonymous namespace
 
 namespace Ice {
 
-void Converter::convertToIce(Module *Mod) {
+void Converter::convertToIce() {
   if (!Ctx->getFlags().DisableGlobals)
-    convertGlobals(Mod);
-  convertFunctions(Mod);
+    convertGlobals();
+  convertFunctions();
 }
 
-void Converter::convertGlobals(Module *Mod) {
+void Converter::convertGlobals() {
   OwningPtr<TargetGlobalInitLowering> GlobalLowering(
       TargetGlobalInitLowering::createLowering(Ctx->getTargetArch(), Ctx));
   for (Module::const_global_iterator I = Mod->global_begin(),
@@ -729,11 +656,11 @@
   GlobalLowering.reset();
 }
 
-void Converter::convertFunctions(Module *Mod) {
+void Converter::convertFunctions() {
   for (Module::const_iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) {
     if (I->empty())
       continue;
-    LLVM2ICEConverter FunctionConverter(Ctx);
+    LLVM2ICEConverter FunctionConverter(Ctx, Mod->getContext());
 
     Timer TConvert;
     Cfg *Fcn = FunctionConverter.convertFunction(I);
diff --git a/src/IceConverter.h b/src/IceConverter.h
index bf81228..b187357 100644
--- a/src/IceConverter.h
+++ b/src/IceConverter.h
@@ -24,16 +24,18 @@
 
 class Converter : public Translator {
 public:
-  Converter(GlobalContext *Ctx) : Translator(Ctx) {}
+  Converter(llvm::Module *Mod, GlobalContext *Ctx, const Ice::ClFlags &Flags)
+      : Translator(Ctx, Flags), Mod(Mod) {}
   /// Converts the LLVM Module to ICE. Sets exit status to false if successful,
   /// true otherwise.
-  void convertToIce(llvm::Module *Mod);
+  void convertToIce();
 
 private:
+  llvm::Module *Mod;
   // Converts globals to ICE, and then machine code.
-  void convertGlobals(llvm::Module *Mod);
+  void convertGlobals();
   // Converts functions to ICE, and then machine code.
-  void convertFunctions(llvm::Module *Mod);
+  void convertFunctions();
   Converter(const Converter &) LLVM_DELETED_FUNCTION;
   Converter &operator=(const Converter &) LLVM_DELETED_FUNCTION;
 };
diff --git a/src/IceInst.cpp b/src/IceInst.cpp
index af7152f..a3d4ee6 100644
--- a/src/IceInst.cpp
+++ b/src/IceInst.cpp
@@ -33,8 +33,6 @@
     ICEINSTARITHMETIC_TABLE
 #undef X
   };
-const size_t InstArithmeticAttributesSize =
-    llvm::array_lengthof(InstArithmeticAttributes);
 
 // Using non-anonymous struct so that array_lengthof works.
 const struct InstCastAttributes_ {
@@ -46,7 +44,6 @@
     ICEINSTCAST_TABLE
 #undef X
   };
-const size_t InstCastAttributesSize = llvm::array_lengthof(InstCastAttributes);
 
 // Using non-anonymous struct so that array_lengthof works.
 const struct InstFcmpAttributes_ {
@@ -58,7 +55,6 @@
     ICEINSTFCMP_TABLE
 #undef X
   };
-const size_t InstFcmpAttributesSize = llvm::array_lengthof(InstFcmpAttributes);
 
 // Using non-anonymous struct so that array_lengthof works.
 const struct InstIcmpAttributes_ {
@@ -70,7 +66,6 @@
     ICEINSTICMP_TABLE
 #undef X
   };
-const size_t InstIcmpAttributesSize = llvm::array_lengthof(InstIcmpAttributes);
 
 } // end of anonymous namespace
 
@@ -228,6 +223,13 @@
   addSource(Source2);
 }
 
+const char *InstArithmetic::getOpName(OpKind Op) {
+  size_t OpIndex = static_cast<size_t>(Op);
+  return OpIndex < InstArithmetic::_num
+             ? InstArithmeticAttributes[OpIndex].DisplayString
+             : "???";
+}
+
 bool InstArithmetic::isCommutative() const {
   return InstArithmeticAttributes[getOp()].IsCommutative;
 }
diff --git a/src/IceInst.h b/src/IceInst.h
index 0a6c61d..fd01e92 100644
--- a/src/IceInst.h
+++ b/src/IceInst.h
@@ -203,6 +203,7 @@
         InstArithmetic(Func, Op, Dest, Source1, Source2);
   }
   OpKind getOp() const { return Op; }
+  static const char *getOpName(OpKind Op);
   bool isCommutative() const;
   virtual void dump(const Cfg *Func) const;
   static bool classof(const Inst *Inst) {
diff --git a/src/IceTranslator.h b/src/IceTranslator.h
index 51e4df0..ec5c032 100644
--- a/src/IceTranslator.h
+++ b/src/IceTranslator.h
@@ -29,13 +29,27 @@
 // machine instructions.
 class Translator {
 public:
-  Translator(GlobalContext *Ctx) : Ctx(Ctx), ErrorStatus(0) {}
+  Translator(GlobalContext *Ctx, const ClFlags &Flags)
+      : Ctx(Ctx), Flags(Flags), ErrorStatus(0) {}
 
   ~Translator();
   bool getErrorStatus() const { return ErrorStatus; }
 
+  GlobalContext *getContext() const { return Ctx; }
+
+  const ClFlags &getFlags() const { return Flags; }
+
+  /// Translates the constructed ICE function Fcn to machine code.
+  /// Takes ownership of Fcn. Note: As a side effect, Field Func is
+  /// set to Fcn.
+  void translateFcn(Cfg *Fcn);
+
+  /// Emits the constant pool.
+  void emitConstants();
+
 protected:
   GlobalContext *Ctx;
+  const ClFlags &Flags;
   // The exit status of the translation. False is successful. True
   // otherwise.
   bool ErrorStatus;
@@ -49,13 +63,6 @@
   // that.
   llvm::OwningPtr<Cfg> Func;
 
-  /// Translates the constructed ICE function Fcn to machine code.
-  /// Note: As a side effect, Field Func is set to Fcn.
-  void translateFcn(Cfg *Fcn);
-
-  /// Emits the constant pool.
-  void emitConstants();
-
 private:
   Translator(const Translator &) LLVM_DELETED_FUNCTION;
   Translator &operator=(const Translator &) LLVM_DELETED_FUNCTION;
diff --git a/src/IceTypeConverter.cpp b/src/IceTypeConverter.cpp
new file mode 100644
index 0000000..bfe1976
--- /dev/null
+++ b/src/IceTypeConverter.cpp
@@ -0,0 +1,108 @@
+//===- subzero/src/IceTypeConverter.cpp - Convert ICE/LLVM Types ----------===//
+//
+//                        The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements how to convert LLVM types to ICE types, and ICE types
+// to LLVM types.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceTypeConverter.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace Ice {
+
+TypeConverter::TypeConverter(llvm::LLVMContext &Context) : Context(Context) {
+  AddLLVMType(IceType_void, llvm::Type::getVoidTy(Context));
+  AddLLVMType(IceType_i1, llvm::IntegerType::get(Context, 1));
+  AddLLVMType(IceType_i8, llvm::IntegerType::get(Context, 8));
+  AddLLVMType(IceType_i16, llvm::IntegerType::get(Context, 16));
+  AddLLVMType(IceType_i32, llvm::IntegerType::get(Context, 32));
+  AddLLVMType(IceType_i64, llvm::IntegerType::get(Context, 64));
+  AddLLVMType(IceType_f32, llvm::Type::getFloatTy(Context));
+  AddLLVMType(IceType_f64, llvm::Type::getDoubleTy(Context));
+  AddLLVMType(IceType_v4i1, llvm::VectorType::get(LLVMTypes[IceType_i1], 4));
+  AddLLVMType(IceType_v8i1, llvm::VectorType::get(LLVMTypes[IceType_i1], 8));
+  AddLLVMType(IceType_v16i1, llvm::VectorType::get(LLVMTypes[IceType_i1], 16));
+  AddLLVMType(IceType_v16i8, llvm::VectorType::get(LLVMTypes[IceType_i8], 16));
+  AddLLVMType(IceType_v8i16, llvm::VectorType::get(LLVMTypes[IceType_i16], 8));
+  AddLLVMType(IceType_v4i32, llvm::VectorType::get(LLVMTypes[IceType_i32], 4));
+  AddLLVMType(IceType_v4f32, llvm::VectorType::get(LLVMTypes[IceType_f32], 4));
+  assert(LLVMTypes.size() == static_cast<size_t>(IceType_NUM));
+}
+
+void TypeConverter::AddLLVMType(Type Ty, llvm::Type *LLVMTy) {
+  assert(static_cast<size_t>(Ty) == LLVMTypes.size());
+  LLVMTypes.push_back(LLVMTy);
+  LLVM2IceMap[LLVMTy] = Ty;
+}
+
+Type TypeConverter::convertToIceTypeOther(llvm::Type *LLVMTy) const {
+  switch (LLVMTy->getTypeID()) {
+  case llvm::Type::PointerTyID:
+  case llvm::Type::FunctionTyID:
+    return getIcePointerType();
+  default:
+    return Ice::IceType_NUM;
+  }
+}
+
+llvm::Type *TypeConverter::getLLVMIntegerType(unsigned NumBits) const {
+  switch (NumBits) {
+  case 1:
+    return LLVMTypes[IceType_i1];
+  case 8:
+    return LLVMTypes[IceType_i8];
+  case 16:
+    return LLVMTypes[IceType_i16];
+  case 32:
+    return LLVMTypes[IceType_i32];
+  case 64:
+    return LLVMTypes[IceType_i64];
+  default:
+    return NULL;
+  }
+}
+
+llvm::Type *TypeConverter::getLLVMVectorType(unsigned Size, Type Ty) const {
+  switch (Ty) {
+  case IceType_i1:
+    switch (Size) {
+    case 4:
+      return convertToLLVMType(IceType_v4i1);
+    case 8:
+      return convertToLLVMType(IceType_v8i1);
+    case 16:
+      return convertToLLVMType(IceType_v16i1);
+    default:
+      break;
+    }
+    break;
+  case IceType_i8:
+    if (Size == 16)
+      return convertToLLVMType(IceType_v16i8);
+    break;
+  case IceType_i16:
+    if (Size == 8)
+      return convertToLLVMType(IceType_v8i16);
+    break;
+  case IceType_i32:
+    if (Size == 4)
+      return convertToLLVMType(IceType_v4i32);
+    break;
+  case IceType_f32:
+    if (Size == 4)
+      return convertToLLVMType(IceType_v4f32);
+    break;
+  default:
+    break;
+  }
+  return NULL;
+}
+
+} // end of Ice namespace.
diff --git a/src/IceTypeConverter.h b/src/IceTypeConverter.h
new file mode 100644
index 0000000..81a955c
--- /dev/null
+++ b/src/IceTypeConverter.h
@@ -0,0 +1,80 @@
+//===- subzero/src/IceTypeConverter.h - Convert ICE/LLVM Types --*- C++ -*-===//
+//
+//                        The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines how to convert LLVM types to ICE types, and ICE types
+// to LLVM types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICETYPECONVERTER_H
+#define SUBZERO_SRC_ICETYPECONVERTER_H
+
+#include "IceDefs.h"
+#include "IceTypes.h"
+#include "llvm/IR/DerivedTypes.h"
+
+namespace llvm {
+class LLVMContext;
+} // end of llvm namespace.
+
+namespace Ice {
+
+/// Converts LLVM types to ICE types, and ICE types to LLVM types.
+class TypeConverter {
+  TypeConverter(const TypeConverter &) LLVM_DELETED_FUNCTION;
+  TypeConverter &operator=(const TypeConverter &) LLVM_DELETED_FUNCTION;
+
+public:
+  /// Context is the context to use to build llvm types.
+  TypeConverter(llvm::LLVMContext &Context);
+
+  /// Returns the LLVM type for the corresponding ICE type Ty.
+  llvm::Type *convertToLLVMType(Type Ty) const {
+    // Note: We use "at" here in case Ty wasn't registered.
+    return LLVMTypes.at(Ty);
+  }
+
+  /// Converts LLVM type LLVMTy to an ICE type. Returns
+  /// Ice::IceType_NUM if unable to convert.
+  Type convertToIceType(llvm::Type *LLVMTy) const {
+    std::map<llvm::Type *, Type>::const_iterator Pos = LLVM2IceMap.find(LLVMTy);
+    if (Pos == LLVM2IceMap.end())
+      return convertToIceTypeOther(LLVMTy);
+    return Pos->second;
+  }
+
+  /// Returns ICE model of pointer type.
+  Type getIcePointerType() const { return IceType_i32; }
+
+  /// Returns LLVM integer type with specified number of bits. Returns
+  /// NULL if not a valid PNaCl integer type.
+  llvm::Type *getLLVMIntegerType(unsigned NumBits) const;
+
+  /// Returns the LLVM vector type for Size and Ty arguments. Returns
+  /// NULL if not a valid PNaCl vector type.
+  llvm::Type *getLLVMVectorType(unsigned Size, Type Ty) const;
+
+private:
+  // The LLVM context to use to build LLVM types.
+  llvm::LLVMContext &Context;
+  // The list of allowable LLVM types. Indexed by ICE type.
+  std::vector<llvm::Type *> LLVMTypes;
+  // The inverse mapping of LLVMTypes.
+  std::map<llvm::Type *, Type> LLVM2IceMap;
+
+  // Add LLVM/ICE pair to internal tables.
+  void AddLLVMType(Type Ty, llvm::Type *LLVMTy);
+
+  // Converts types not in LLVM2IceMap.
+  Type convertToIceTypeOther(llvm::Type *LLVMTy) const;
+};
+
+} // end of Ice namespace.
+
+#endif // SUBZERO_SRC_ICETYPECONVERTER_H
diff --git a/src/IceTypes.cpp b/src/IceTypes.cpp
index 0724e51..21157d0 100644
--- a/src/IceTypes.cpp
+++ b/src/IceTypes.cpp
@@ -18,74 +18,203 @@
 
 namespace {
 
-const struct {
+// Dummy function to make sure the two type tables have the same
+// enumerated types.
+void __attribute__((unused)) xIceTypeMacroIntegrityCheck() {
+
+  // Show tags match between ICETYPE_TABLE and ICETYPE_PROPS_TABLE.
+
+  // Define a temporary set of enum values based on ICETYPE_TABLE
+  enum {
+#define X(tag, size, align, elts, elty, str) _table_tag_##tag,
+    ICETYPE_TABLE
+#undef X
+        _enum_table_tag_Names
+  };
+  // Define a temporary set of enum values based on ICETYPE_PROPS_TABLE
+  enum {
+#define X(tag, IsVec, IsInt, IsFloat, IsIntArith) _props_table_tag_##tag,
+    ICETYPE_PROPS_TABLE
+#undef X
+        _enum_props_table_tag_Names
+  };
+// Assert that tags in ICETYPE_TABLE are also in ICETYPE_PROPS_TABLE.
+#define X(tag, size, align, elts, elty, str)                                   \
+  STATIC_ASSERT((unsigned)_table_tag_##tag == (unsigned)_props_table_tag_##tag);
+  ICETYPE_TABLE;
+#undef X
+// Assert that tags in ICETYPE_PROPS_TABLE is in ICETYPE_TABLE.
+#define X(tag, IsVec, IsInt, IsFloat, IsIntArith)                              \
+  STATIC_ASSERT((unsigned)_table_tag_##tag == (unsigned)_props_table_tag_##tag);
+  ICETYPE_PROPS_TABLE;
+#undef X
+
+  // Show vector definitions match in ICETYPE_TABLE and
+  // ICETYPE_PROPS_TABLE.
+
+  // Define constants for each element size in ICETYPE_TABLE.
+  enum {
+#define X(tag, size, align, elts, elty, str) _table_elts_##tag = elts,
+    ICETYPE_TABLE
+#undef X
+        _enum_table_elts_Elements = 0
+  };
+  // Define constants for boolean flag if vector in ICETYPE_PROPS_TABLE.
+  enum {
+#define X(tag, IsVec, IsInt, IsFloat, IsIntArith)                              \
+  _props_table_IsVec_##tag = IsVec,
+    ICETYPE_PROPS_TABLE
+#undef X
+  };
+// Verify that the number of vector elements is consistent with IsVec.
+#define X(tag, IsVec, IsInt, IsFloat, IsIntArith)                              \
+  STATIC_ASSERT((_table_elts_##tag > 1) == _props_table_IsVec_##tag);
+  ICETYPE_PROPS_TABLE;
+#undef X
+}
+
+struct TypeAttributeFields {
   size_t TypeWidthInBytes;
   size_t TypeAlignInBytes;
   size_t TypeNumElements;
   Type TypeElementType;
   const char *DisplayString;
-} TypeAttributes[] = {
+};
+
+const struct TypeAttributeFields TypeAttributes[] = {
 #define X(tag, size, align, elts, elty, str)                                   \
   { size, align, elts, elty, str }                                             \
   ,
     ICETYPE_TABLE
 #undef X
-  };
+};
 
-const size_t TypeAttributesSize =
-    sizeof(TypeAttributes) / sizeof(*TypeAttributes);
+struct TypePropertyFields {
+  bool TypeIsVectorType;
+  bool TypeIsIntegerType;
+  bool TypeIsScalarIntegerType;
+  bool TypeIsVectorIntegerType;
+  bool TypeIsIntegerArithmeticType;
+  bool TypeIsFloatingType;
+  bool TypeIsScalarFloatingType;
+  bool TypeIsVectorFloatingType;
+};
+
+const TypePropertyFields TypePropertiesTable[] = {
+#define X(tag, IsVec, IsInt, IsFloat, IsIntArith)                              \
+  {                                                                            \
+    IsVec, IsInt, IsInt && !IsVec, IsInt && IsVec, IsIntArith, IsFloat,        \
+        IsFloat && !IsVec, IsFloat && IsVec                                    \
+  }                                                                            \
+  ,
+    ICETYPE_PROPS_TABLE
+#undef X
+};
 
 } // end anonymous namespace
 
 size_t typeWidthInBytes(Type Ty) {
-  size_t Width = 0;
   size_t Index = static_cast<size_t>(Ty);
-  if (Index < TypeAttributesSize) {
-    Width = TypeAttributes[Index].TypeWidthInBytes;
-  } else {
-    llvm_unreachable("Invalid type for typeWidthInBytes()");
-  }
-  return Width;
+  if (Index < IceType_NUM)
+    return TypeAttributes[Index].TypeWidthInBytes;
+  llvm_unreachable("Invalid type for typeWidthInBytes()");
+  return 0;
 }
 
 size_t typeAlignInBytes(Type Ty) {
-  size_t Align = 0;
   size_t Index = static_cast<size_t>(Ty);
-  if (Index < TypeAttributesSize) {
-    Align = TypeAttributes[Index].TypeAlignInBytes;
-  } else {
-    llvm_unreachable("Invalid type for typeAlignInBytes()");
-  }
-  return Align;
+  if (Index < IceType_NUM)
+    return TypeAttributes[Index].TypeAlignInBytes;
+  llvm_unreachable("Invalid type for typeAlignInBytes()");
+  return 1;
 }
 
 size_t typeNumElements(Type Ty) {
-  size_t NumElements = 0;
   size_t Index = static_cast<size_t>(Ty);
-  if (Index < TypeAttributesSize) {
-    NumElements = TypeAttributes[Index].TypeNumElements;
-  } else {
-    llvm_unreachable("Invalid type for typeNumElements()");
-  }
-  return NumElements;
+  if (Index < IceType_NUM)
+    return TypeAttributes[Index].TypeNumElements;
+  llvm_unreachable("Invalid type for typeNumElements()");
+  return 1;
 }
 
 Type typeElementType(Type Ty) {
-  Type ElementType = IceType_void;
   size_t Index = static_cast<size_t>(Ty);
-  if (Index < TypeAttributesSize) {
-    ElementType = TypeAttributes[Index].TypeElementType;
-  } else {
-    llvm_unreachable("Invalid type for typeElementType()");
-  }
-  return ElementType;
+  if (Index < IceType_NUM)
+    return TypeAttributes[Index].TypeElementType;
+  llvm_unreachable("Invalid type for typeElementType()");
+  return IceType_void;
 }
 
+bool isVectorType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsVectorType;
+  llvm_unreachable("Invalid type for isVectorType()");
+  return false;
+}
+
+bool isIntegerType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsIntegerType;
+  llvm_unreachable("Invalid type for isIntegerType()");
+  return false;
+}
+
+bool isScalarIntegerType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsScalarIntegerType;
+  llvm_unreachable("Invalid type for isScalIntegerType()");
+  return false;
+}
+
+bool isVectorIntegerType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsVectorIntegerType;
+  llvm_unreachable("Invalid type for isVectorIntegerType()");
+  return false;
+}
+
+bool isIntegerArithmeticType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsIntegerArithmeticType;
+  llvm_unreachable("Invalid type for isIntegerArithmeticType()");
+  return false;
+}
+
+bool isFloatingType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsFloatingType;
+  llvm_unreachable("Invalid type for isFloatingType()");
+  return false;
+}
+
+bool isScalarFloatingType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsScalarFloatingType;
+  llvm_unreachable("Invalid type for isScalarFloatingType()");
+  return false;
+}
+
+bool isVectorFloatingType(Type Ty) {
+  size_t Index = static_cast<size_t>(Ty);
+  if (Index < IceType_NUM)
+    return TypePropertiesTable[Index].TypeIsVectorFloatingType;
+  llvm_unreachable("Invalid type for isVectorFloatingType()");
+  return false;
+}
+
+// ======================== Dump routines ======================== //
+
 const char *typeString(Type Ty) {
   size_t Index = static_cast<size_t>(Ty);
-  if (Index < TypeAttributesSize) {
+  if (Index < IceType_NUM)
     return TypeAttributes[Index].DisplayString;
-  }
   llvm_unreachable("Invalid type for typeString");
   return "???";
 }
diff --git a/src/IceTypes.def b/src/IceTypes.def
index 03d302d..6e1935b 100644
--- a/src/IceTypes.def
+++ b/src/IceTypes.def
@@ -35,4 +35,28 @@
   X(IceType_v4f32, 16,  4,     4,      IceType_f32,  "<4 x float>")      \
 //#define X(tag, size, align, elts, elty, str)
 
+// Dictionary:
+//   V - Is vector type.
+//   I - Is integer value (scalar or vector).
+//   F - Is floating point value (scalar or vector).
+//   IA - Is integer arithmetic type
+#define ICETYPE_PROPS_TABLE                                              \
+  /* Enum Value    V  I  F IA */                                         \
+  X(IceType_void,  0, 0, 0, 0)                                           \
+  X(IceType_i1,    0, 1, 0, 0)                                           \
+  X(IceType_i8,    0, 1, 0, 1)                                           \
+  X(IceType_i16,   0, 1, 0, 1)                                           \
+  X(IceType_i32,   0, 1, 0, 1)                                           \
+  X(IceType_i64,   0, 1, 0, 1)                                           \
+  X(IceType_f32,   0, 0, 1, 0)                                           \
+  X(IceType_f64,   0, 0, 1, 0)                                           \
+  X(IceType_v4i1,  1, 1, 0, 0)                                           \
+  X(IceType_v8i1,  1, 1, 0, 0)                                           \
+  X(IceType_v16i1, 1, 1, 0, 0)                                           \
+  X(IceType_v16i8, 1, 1, 0, 1)                                           \
+  X(IceType_v8i16, 1, 1, 0, 1)                                           \
+  X(IceType_v4i32, 1, 1, 0, 1)                                           \
+  X(IceType_v4f32, 1, 0, 1, 0)                                           \
+//#define X(tag, IsVec, IsInt, IsFloat, IsIntArith)
+
 #endif // SUBZERO_SRC_ICETYPES_DEF
diff --git a/src/IceTypes.h b/src/IceTypes.h
index fa91763..0c67e43 100644
--- a/src/IceTypes.h
+++ b/src/IceTypes.h
@@ -47,7 +47,16 @@
 Type typeElementType(Type Ty);
 const char *typeString(Type Ty);
 
-inline bool isVectorType(Type Ty) { return typeNumElements(Ty) > 1; }
+bool isVectorType(Type Ty);
+
+bool isIntegerType(Type Ty); // scalar or vector
+bool isScalarIntegerType(Type Ty);
+bool isVectorIntegerType(Type Ty);
+bool isIntegerArithmeticType(Type Ty);
+
+bool isFloatingType(Type Ty); // scalar or vector
+bool isScalarFloatingType(Type Ty);
+bool isVectorFloatingType(Type Ty);
 
 template <typename StreamType>
 inline StreamType &operator<<(StreamType &Str, const Type &Ty) {
diff --git a/src/PNaClTranslator.cpp b/src/PNaClTranslator.cpp
index 0eb6d7c..5381315 100644
--- a/src/PNaClTranslator.cpp
+++ b/src/PNaClTranslator.cpp
@@ -14,6 +14,12 @@
 
 #include "PNaClTranslator.h"
 #include "IceCfg.h"
+#include "IceCfgNode.h"
+#include "IceClFlags.h"
+#include "IceDefs.h"
+#include "IceInst.h"
+#include "IceOperand.h"
+#include "IceTypeConverter.h"
 #include "llvm/Bitcode/NaCl/NaClBitcodeDecoders.h"
 #include "llvm/Bitcode/NaCl/NaClBitcodeHeader.h"
 #include "llvm/Bitcode/NaCl/NaClBitcodeParser.h"
@@ -33,28 +39,42 @@
 
 namespace {
 
+// TODO(kschimpf) Remove error recovery once implementation complete.
+static cl::opt<bool> AllowErrorRecovery(
+    "allow-pnacl-reader-error-recovery",
+    cl::desc("Allow error recovery when reading PNaCl bitcode."),
+    cl::init(false));
+
 // Top-level class to read PNaCl bitcode files, and translate to ICE.
 class TopLevelParser : public NaClBitcodeParser {
   TopLevelParser(const TopLevelParser &) LLVM_DELETED_FUNCTION;
   TopLevelParser &operator=(const TopLevelParser &) LLVM_DELETED_FUNCTION;
 
 public:
-  TopLevelParser(const std::string &InputName, NaClBitcodeHeader &Header,
-                 NaClBitstreamCursor &Cursor, bool &ErrorStatus)
-      : NaClBitcodeParser(Cursor),
+  TopLevelParser(Ice::Translator &Translator, const std::string &InputName,
+                 NaClBitcodeHeader &Header, NaClBitstreamCursor &Cursor,
+                 bool &ErrorStatus)
+      : NaClBitcodeParser(Cursor), Translator(Translator),
         Mod(new Module(InputName, getGlobalContext())), Header(Header),
-        ErrorStatus(ErrorStatus), NumErrors(0), NumFunctionIds(0),
-        GlobalVarPlaceHolderType(Type::getInt8Ty(getLLVMContext())) {
+        TypeConverter(getLLVMContext()), ErrorStatus(ErrorStatus), NumErrors(0),
+        NumFunctionIds(0), NumFunctionBlocks(0),
+        GlobalVarPlaceHolderType(convertToLLVMType(Ice::IceType_i8)) {
     Mod->setDataLayout(PNaClDataLayout);
   }
 
   virtual ~TopLevelParser() {}
   LLVM_OVERRIDE;
 
+  Ice::Translator &getTranslator() { return Translator; }
+
+  // Generates error with given Message. Always returns true.
   virtual bool Error(const std::string &Message) LLVM_OVERRIDE {
     ErrorStatus = true;
     ++NumErrors;
-    return NaClBitcodeParser::Error(Message);
+    NaClBitcodeParser::Error(Message);
+    if (!AllowErrorRecovery)
+      report_fatal_error("Unable to continue");
+    return true;
   }
 
   /// Returns the number of errors found while parsing the bitcode
@@ -104,10 +124,20 @@
     DefiningFunctionsList.push_back(ValueIDValues.size());
   }
 
+  /// Returns the value id that should be associated with the the
+  /// current function block. Increments internal counters during call
+  /// so that it will be in correct position for next function block.
+  unsigned getNextFunctionBlockValueID() {
+    if (NumFunctionBlocks >= DefiningFunctionsList.size())
+      report_fatal_error(
+          "More function blocks than defined function addresses");
+    return DefiningFunctionsList[NumFunctionBlocks++];
+  }
+
   /// Returns the LLVM IR value associatd with the global value ID.
   Value *getGlobalValueByID(unsigned ID) const {
     if (ID >= ValueIDValues.size())
-      return 0;
+      return NULL;
     return ValueIDValues[ID];
   }
 
@@ -131,7 +161,7 @@
   /// later.
   Constant *getOrCreateGlobalVarRef(unsigned ID) {
     if (ID >= ValueIDValues.size())
-      return 0;
+      return NULL;
     if (Value *C = ValueIDValues[ID])
       return dyn_cast<Constant>(C);
     Constant *C = new GlobalVariable(*Mod, GlobalVarPlaceHolderType, false,
@@ -147,7 +177,7 @@
     if (ID < NumFunctionIds || ID >= ValueIDValues.size())
       return false;
     WeakVH &OldV = ValueIDValues[ID];
-    if (OldV == 0) {
+    if (OldV == NULL) {
       ValueIDValues[ID] = GV;
       return true;
     }
@@ -162,11 +192,46 @@
     return true;
   }
 
+  /// Returns the corresponding ICE type for LLVMTy.
+  Ice::Type convertToIceType(Type *LLVMTy) {
+    Ice::Type IceTy = TypeConverter.convertToIceType(LLVMTy);
+    if (IceTy >= Ice::IceType_NUM) {
+      return convertToIceTypeError(LLVMTy);
+    }
+    return IceTy;
+  }
+
+  /// Returns the corresponding LLVM type for IceTy.
+  Type *convertToLLVMType(Ice::Type IceTy) const {
+    return TypeConverter.convertToLLVMType(IceTy);
+  }
+
+  /// Returns the LLVM integer type with the given number of Bits.  If
+  /// Bits is not a valid PNaCl type, returns NULL.
+  Type *getLLVMIntegerType(unsigned Bits) const {
+    return TypeConverter.getLLVMIntegerType(Bits);
+  }
+
+  /// Returns the LLVM vector with the given Size and Ty. If not a
+  /// valid PNaCl vector type, returns NULL.
+  Type *getLLVMVectorType(unsigned Size, Ice::Type Ty) const {
+    return TypeConverter.getLLVMVectorType(Size, Ty);
+  }
+
+  /// Returns the model for pointer types in ICE.
+  Ice::Type getIcePointerType() const {
+    return TypeConverter.getIcePointerType();
+  }
+
 private:
+  // The translator associated with the parser.
+  Ice::Translator &Translator;
   // The parsed module.
   OwningPtr<Module> Mod;
   // The bitcode header.
   NaClBitcodeHeader &Header;
+  // Converter between LLVM and ICE types.
+  Ice::TypeConverter TypeConverter;
   // The exit status that should be set to true if an error occurs.
   bool &ErrorStatus;
   // The number of errors reported.
@@ -177,6 +242,8 @@
   std::vector<WeakVH> ValueIDValues;
   // The number of function IDs.
   unsigned NumFunctionIds;
+  // The number of function blocks (processed so far).
+  unsigned NumFunctionBlocks;
   // The list of value IDs (in the order found) of defining function
   // addresses.
   std::vector<unsigned> DefiningFunctionsList;
@@ -192,6 +259,10 @@
 
   /// Reports error about bad call to setTypeID.
   void reportBadSetTypeID(unsigned ID, Type *Ty);
+
+  // Reports that there is no corresponding ICE type for LLVMTy, and
+  // returns ICE::IceType_void.
+  Ice::Type convertToIceTypeError(Type *LLVMTy);
 };
 
 Type *TopLevelParser::reportTypeIDAsUndefined(unsigned ID) {
@@ -199,7 +270,8 @@
   raw_string_ostream StrBuf(Buffer);
   StrBuf << "Can't find type for type id: " << ID;
   Error(StrBuf.str());
-  Type *Ty = Type::getVoidTy(getLLVMContext());
+  // TODO(kschimpf) Remove error recovery once implementation complete.
+  Type *Ty = TypeConverter.convertToLLVMType(Ice::IceType_void);
   // To reduce error messages, update type list if possible.
   if (ID < TypeIDValues.size())
     TypeIDValues[ID] = Ty;
@@ -219,6 +291,14 @@
   Error(StrBuf.str());
 }
 
+Ice::Type TopLevelParser::convertToIceTypeError(Type *LLVMTy) {
+  std::string Buffer;
+  raw_string_ostream StrBuf(Buffer);
+  StrBuf << "Invalid LLVM type: " << *LLVMTy;
+  Error(StrBuf.str());
+  return Ice::IceType_void;
+}
+
 // Base class for parsing blocks within the bitcode file.  Note:
 // Because this is the base class of block parsers, we generate error
 // messages if ParseBlock or ParseRecord is not overridden in derived
@@ -240,6 +320,9 @@
       : NaClBitcodeParser(BlockID, EnclosingParser),
         Context(EnclosingParser->Context) {}
 
+  // Gets the translator associated with the bitcode parser.
+  Ice::Translator &getTranslator() { return Context->getTranslator(); }
+
   // Generates an error Message with the bit address prefixed to it.
   virtual bool Error(const std::string &Message) LLVM_OVERRIDE {
     uint64_t Bit = Record.GetStartBit() + Context->getHeaderSize() * 8;
@@ -258,66 +341,72 @@
   // understood.
   virtual void ProcessRecord() LLVM_OVERRIDE;
 
-  /// Checks if the size of the record is Size. If not, an error is
-  /// produced using the given RecordName. Return true if error was
-  /// reported. Otherwise false.
-  bool checkRecordSize(unsigned Size, const char *RecordName) {
+  // Checks if the size of the record is Size.  Return true if valid.
+  // Otherwise generates an error and returns false.
+  bool isValidRecordSize(unsigned Size, const char *RecordName) {
     const NaClBitcodeRecord::RecordVector &Values = Record.GetValues();
-    if (Values.size() != Size) {
-      return RecordSizeError(Size, RecordName, 0);
-    }
+    if (Values.size() == Size)
+      return true;
+    ReportRecordSizeError(Size, RecordName, NULL);
     return false;
   }
 
-  /// Checks if the size of the record is at least as large as the
-  /// LowerLimit.
-  bool checkRecordSizeAtLeast(unsigned LowerLimit, const char *RecordName) {
+  // Checks if the size of the record is at least as large as the
+  // LowerLimit. Returns true if valid.  Otherwise generates an error
+  // and returns false.
+  bool isValidRecordSizeAtLeast(unsigned LowerLimit, const char *RecordName) {
     const NaClBitcodeRecord::RecordVector &Values = Record.GetValues();
-    if (Values.size() < LowerLimit) {
-      return RecordSizeError(LowerLimit, RecordName, "at least");
-    }
+    if (Values.size() >= LowerLimit)
+      return true;
+    ReportRecordSizeError(LowerLimit, RecordName, "at least");
     return false;
   }
 
-  /// Checks if the size of the record is no larger than the
-  /// UpperLimit.
-  bool checkRecordSizeNoMoreThan(unsigned UpperLimit, const char *RecordName) {
+  // Checks if the size of the record is no larger than the
+  // UpperLimit.  Returns true if valid.  Otherwise generates an error
+  // and returns false.
+  bool isValidRecordSizeAtMost(unsigned UpperLimit, const char *RecordName) {
     const NaClBitcodeRecord::RecordVector &Values = Record.GetValues();
-    if (Values.size() > UpperLimit) {
-      return RecordSizeError(UpperLimit, RecordName, "no more than");
-    }
+    if (Values.size() <= UpperLimit)
+      return true;
+    ReportRecordSizeError(UpperLimit, RecordName, "no more than");
     return false;
   }
 
-  /// Checks if the size of the record is at least as large as the
-  /// LowerLimit, and no larger than the UpperLimit.
-  bool checkRecordSizeInRange(unsigned LowerLimit, unsigned UpperLimit,
-                              const char *RecordName) {
-    return checkRecordSizeAtLeast(LowerLimit, RecordName) ||
-           checkRecordSizeNoMoreThan(UpperLimit, RecordName);
+  // Checks if the size of the record is at least as large as the
+  // LowerLimit, and no larger than the UpperLimit.  Returns true if
+  // valid.  Otherwise generates an error and returns false.
+  bool isValidRecordSizeInRange(unsigned LowerLimit, unsigned UpperLimit,
+                                const char *RecordName) {
+    return isValidRecordSizeAtLeast(LowerLimit, RecordName) ||
+           isValidRecordSizeAtMost(UpperLimit, RecordName);
   }
 
 private:
   /// Generates a record size error. ExpectedSize is the number
   /// of elements expected. RecordName is the name of the kind of
-  /// record that has incorrect size. ContextMessage (if not 0)
+  /// record that has incorrect size. ContextMessage (if not NULL)
   /// is appended to "record expects" to describe how ExpectedSize
   /// should be interpreted.
-  bool RecordSizeError(unsigned ExpectedSize, const char *RecordName,
-                       const char *ContextMessage) {
-    std::string Buffer;
-    raw_string_ostream StrBuf(Buffer);
-    StrBuf << RecordName << " record expects";
-    if (ContextMessage)
-      StrBuf << " " << ContextMessage;
-    StrBuf << " " << ExpectedSize << " argument";
-    if (ExpectedSize > 1)
-      StrBuf << "s";
-    StrBuf << ". Found: " << Record.GetValues().size();
-    return Error(StrBuf.str());
-  }
+  void ReportRecordSizeError(unsigned ExpectedSize, const char *RecordName,
+                             const char *ContextMessage);
 };
 
+void BlockParserBaseClass::ReportRecordSizeError(unsigned ExpectedSize,
+                                                 const char *RecordName,
+                                                 const char *ContextMessage) {
+  std::string Buffer;
+  raw_string_ostream StrBuf(Buffer);
+  StrBuf << RecordName << " record expects";
+  if (ContextMessage)
+    StrBuf << " " << ContextMessage;
+  StrBuf << " " << ExpectedSize << " argument";
+  if (ExpectedSize > 1)
+    StrBuf << "s";
+  StrBuf << ". Found: " << Record.GetValues().size();
+  Error(StrBuf.str());
+}
+
 bool BlockParserBaseClass::ParseBlock(unsigned BlockID) {
   // If called, derived class doesn't know how to handle block.
   // Report error and skip.
@@ -325,6 +414,7 @@
   raw_string_ostream StrBuf(Buffer);
   StrBuf << "Don't know how to parse block id: " << BlockID;
   Error(StrBuf.str());
+  // TODO(kschimpf) Remove error recovery once implementation complete.
   SkipBlock();
   return false;
 }
@@ -359,45 +449,64 @@
   switch (Record.GetCode()) {
   case naclbitc::TYPE_CODE_NUMENTRY:
     // NUMENTRY: [numentries]
-    if (checkRecordSize(1, "Type count"))
+    if (!isValidRecordSize(1, "Type count"))
       return;
     Context->resizeTypeIDValues(Values[0]);
     return;
   case naclbitc::TYPE_CODE_VOID:
     // VOID
-    if (checkRecordSize(0, "Type void"))
-      break;
-    Ty = Type::getVoidTy(Context->getLLVMContext());
+    if (!isValidRecordSize(0, "Type void"))
+      return;
+    Ty = Context->convertToLLVMType(Ice::IceType_void);
     break;
   case naclbitc::TYPE_CODE_FLOAT:
     // FLOAT
-    if (checkRecordSize(0, "Type float"))
-      break;
-    Ty = Type::getFloatTy(Context->getLLVMContext());
+    if (!isValidRecordSize(0, "Type float"))
+      return;
+    Ty = Context->convertToLLVMType(Ice::IceType_f32);
     break;
   case naclbitc::TYPE_CODE_DOUBLE:
     // DOUBLE
-    if (checkRecordSize(0, "Type double"))
-      break;
-    Ty = Type::getDoubleTy(Context->getLLVMContext());
+    if (!isValidRecordSize(0, "Type double"))
+      return;
+    Ty = Context->convertToLLVMType(Ice::IceType_f64);
     break;
   case naclbitc::TYPE_CODE_INTEGER:
     // INTEGER: [width]
-    if (checkRecordSize(1, "Type integer"))
-      break;
-    Ty = IntegerType::get(Context->getLLVMContext(), Values[0]);
-    // TODO(kschimpf) Check if size is legal.
+    if (!isValidRecordSize(1, "Type integer"))
+      return;
+    Ty = Context->getLLVMIntegerType(Values[0]);
+    if (Ty == NULL) {
+      std::string Buffer;
+      raw_string_ostream StrBuf(Buffer);
+      StrBuf << "Type integer record with invalid bitsize: " << Values[0];
+      Error(StrBuf.str());
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      // Fix type so that we can continue.
+      Ty = Context->convertToLLVMType(Ice::IceType_i32);
+    }
     break;
-  case naclbitc::TYPE_CODE_VECTOR:
+  case naclbitc::TYPE_CODE_VECTOR: {
     // VECTOR: [numelts, eltty]
-    if (checkRecordSize(2, "Type vector"))
-      break;
-    Ty = VectorType::get(Context->getTypeByID(Values[1]), Values[0]);
+    if (!isValidRecordSize(2, "Type vector"))
+      return;
+    Type *BaseTy = Context->getTypeByID(Values[1]);
+    Ty = Context->getLLVMVectorType(Values[0],
+                                    Context->convertToIceType(BaseTy));
+    if (Ty == NULL) {
+      std::string Buffer;
+      raw_string_ostream StrBuf(Buffer);
+      StrBuf << "Invalid type vector record: <" << Values[0] << " x " << *BaseTy
+             << ">";
+      Error(StrBuf.str());
+      Ty = Context->convertToLLVMType(Ice::IceType_void);
+    }
     break;
+  }
   case naclbitc::TYPE_CODE_FUNCTION: {
     // FUNCTION: [vararg, retty, paramty x N]
-    if (checkRecordSizeAtLeast(2, "Type signature"))
-      break;
+    if (!isValidRecordSizeAtLeast(2, "Type signature"))
+      return;
     SmallVector<Type *, 8> ArgTys;
     for (unsigned i = 2, e = Values.size(); i != e; ++i) {
       ArgTys.push_back(Context->getTypeByID(Values[i]));
@@ -407,11 +516,11 @@
   }
   default:
     BlockParserBaseClass::ProcessRecord();
-    break;
+    return;
   }
   // If Ty not defined, assume error. Use void as filler.
   if (Ty == NULL)
-    Ty = Type::getVoidTy(Context->getLLVMContext());
+    Ty = Context->convertToLLVMType(Ice::IceType_void);
   Context->setTypeID(NextTypeId++, Ty);
 }
 
@@ -474,6 +583,7 @@
         StrBuf << "s";
       StrBuf << ". Found: " << Initializers.size();
       Error(StrBuf.str());
+      // TODO(kschimpf) Remove error recovery once implementation complete.
       // Fix up state so that we can continue.
       InitializersNeeded = Initializers.size();
       installGlobalVar();
@@ -530,7 +640,7 @@
   switch (Record.GetCode()) {
   case naclbitc::GLOBALVAR_COUNT:
     // COUNT: [n]
-    if (checkRecordSize(1, "Globals count"))
+    if (!isValidRecordSize(1, "Globals count"))
       return;
     if (NextGlobalID != Context->getNumFunctionIDs()) {
       Error("Globals count record not first in block.");
@@ -541,7 +651,7 @@
     return;
   case naclbitc::GLOBALVAR_VAR: {
     // VAR: [align, isconst]
-    if (checkRecordSize(2, "Globals variable"))
+    if (!isValidRecordSize(2, "Globals variable"))
       return;
     verifyNoMissingInitializers();
     InitializersNeeded = 1;
@@ -552,7 +662,7 @@
   }
   case naclbitc::GLOBALVAR_COMPOUND:
     // COMPOUND: [size]
-    if (checkRecordSize(1, "globals compound"))
+    if (!isValidRecordSize(1, "globals compound"))
       return;
     if (Initializers.size() > 0 || InitializersNeeded != 1) {
       Error("Globals compound record not first initializer");
@@ -569,18 +679,18 @@
     return;
   case naclbitc::GLOBALVAR_ZEROFILL: {
     // ZEROFILL: [size]
-    if (checkRecordSize(1, "Globals zerofill"))
+    if (!isValidRecordSize(1, "Globals zerofill"))
       return;
     reserveInitializer("Globals zerofill");
     Type *Ty =
-        ArrayType::get(Type::getInt8Ty(Context->getLLVMContext()), Values[0]);
+        ArrayType::get(Context->convertToLLVMType(Ice::IceType_i8), Values[0]);
     Constant *Zero = ConstantAggregateZero::get(Ty);
     Initializers.push_back(Zero);
     break;
   }
   case naclbitc::GLOBALVAR_DATA: {
     // DATA: [b0, b1, ...]
-    if (checkRecordSizeAtLeast(1, "Globals data"))
+    if (!isValidRecordSizeAtLeast(1, "Globals data"))
       return;
     reserveInitializer("Globals data");
     unsigned Size = Values.size();
@@ -594,17 +704,17 @@
   }
   case naclbitc::GLOBALVAR_RELOC: {
     // RELOC: [val, [addend]]
-    if (checkRecordSizeInRange(1, 2, "Globals reloc"))
+    if (!isValidRecordSizeInRange(1, 2, "Globals reloc"))
       return;
     Constant *BaseVal = Context->getOrCreateGlobalVarRef(Values[0]);
-    if (BaseVal == 0) {
+    if (BaseVal == NULL) {
       std::string Buffer;
       raw_string_ostream StrBuf(Buffer);
       StrBuf << "Can't find global relocation value: " << Values[0];
       Error(StrBuf.str());
       return;
     }
-    Type *IntPtrType = IntegerType::get(Context->getLLVMContext(), 32);
+    Type *IntPtrType = Context->convertToLLVMType(Context->getIcePointerType());
     Constant *Val = ConstantExpr::getPtrToInt(BaseVal, IntPtrType);
     if (Values.size() == 2) {
       Val = ConstantExpr::getAdd(Val, ConstantInt::get(IntPtrType, Values[1]));
@@ -654,11 +764,11 @@
   switch (Record.GetCode()) {
   case naclbitc::VST_CODE_ENTRY: {
     // VST_ENTRY: [ValueId, namechar x N]
-    if (checkRecordSizeAtLeast(2, "Valuesymtab value entry"))
+    if (!isValidRecordSizeAtLeast(2, "Valuesymtab value entry"))
       return;
     ConvertToString(ConvertedName);
     Value *V = Context->getGlobalValueByID(Values[0]);
-    if (V == 0) {
+    if (V == NULL) {
       std::string Buffer;
       raw_string_ostream StrBuf(Buffer);
       StrBuf << "Invalid global address ID in valuesymtab: " << Values[0];
@@ -685,6 +795,354 @@
   return;
 }
 
+/// Parses function blocks in the bitcode file.
+class FunctionParser : public BlockParserBaseClass {
+  FunctionParser(const FunctionParser &) LLVM_DELETED_FUNCTION;
+  FunctionParser &operator=(const FunctionParser &) LLVM_DELETED_FUNCTION;
+
+public:
+  FunctionParser(unsigned BlockID, BlockParserBaseClass *EnclosingParser)
+      : BlockParserBaseClass(BlockID, EnclosingParser),
+        Func(new Ice::Cfg(getTranslator().getContext())), CurrentBbIndex(0),
+        FcnId(Context->getNextFunctionBlockValueID()),
+        LLVMFunc(cast<Function>(Context->getGlobalValueByID(FcnId))),
+        CachedNumGlobalValueIDs(Context->getNumGlobalValueIDs()),
+        InstIsTerminating(false) {
+    Func->setFunctionName(LLVMFunc->getName());
+    Func->setReturnType(Context->convertToIceType(LLVMFunc->getReturnType()));
+    Func->setInternal(LLVMFunc->hasInternalLinkage());
+    CurrentNode = InstallNextBasicBlock();
+    for (Function::const_arg_iterator ArgI = LLVMFunc->arg_begin(),
+                                      ArgE = LLVMFunc->arg_end();
+         ArgI != ArgE; ++ArgI) {
+      Func->addArg(NextInstVar(Context->convertToIceType(ArgI->getType())));
+    }
+  }
+
+  ~FunctionParser() LLVM_OVERRIDE;
+
+private:
+  // Timer for reading function bitcode and converting to ICE.
+  Ice::Timer TConvert;
+  // The corresponding ICE function defined by the function block.
+  Ice::Cfg *Func;
+  // The index to the current basic block being built.
+  uint32_t CurrentBbIndex;
+  // The basic block being built.
+  Ice::CfgNode *CurrentNode;
+  // The ID for the function.
+  unsigned FcnId;
+  // The corresponding LLVM function.
+  Function *LLVMFunc;
+  // Holds operands local to the function block, based on indices
+  // defined in the bitcode file.
+  std::vector<Ice::Operand *> LocalOperands;
+  // Holds the dividing point between local and global absolute value indices.
+  uint32_t CachedNumGlobalValueIDs;
+  // True if the last processed instruction was a terminating
+  // instruction.
+  bool InstIsTerminating;
+
+  virtual void ProcessRecord() LLVM_OVERRIDE;
+
+  virtual void ExitBlock() LLVM_OVERRIDE;
+
+  // Creates and appends a new basic block to the list of basic blocks.
+  Ice::CfgNode *InstallNextBasicBlock() { return Func->makeNode(); }
+
+  // Returns the Index-th basic block in the list of basic blocks.
+  Ice::CfgNode *GetBasicBlock(uint32_t Index) {
+    const Ice::NodeList &Nodes = Func->getNodes();
+    if (Index >= Nodes.size()) {
+      std::string Buffer;
+      raw_string_ostream StrBuf(Buffer);
+      StrBuf << "Reference to basic block " << Index
+             << " not found. Must be less than " << Nodes.size();
+      Error(StrBuf.str());
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      Index = 0;
+    }
+    return Nodes[Index];
+  }
+
+  // Generates the next available local variable using the given
+  // type.  Note: if Ty is void, this function returns NULL.
+  Ice::Variable *NextInstVar(Ice::Type Ty) {
+    if (Ty == Ice::IceType_void)
+      return NULL;
+    Ice::Variable *Var = Func->makeVariable(Ty, CurrentNode);
+    LocalOperands.push_back(Var);
+    return Var;
+  }
+
+  // Converts a relative index (to the next instruction to be read) to
+  // an absolute value index.
+  uint32_t convertRelativeToAbsIndex(int32_t Id) {
+    int32_t AbsNextId = CachedNumGlobalValueIDs + LocalOperands.size();
+    if (Id > 0 && AbsNextId < static_cast<uint32_t>(Id)) {
+      std::string Buffer;
+      raw_string_ostream StrBuf(Buffer);
+      StrBuf << "Invalid relative value id: " << Id
+             << " (must be <= " << AbsNextId << ")";
+      Error(StrBuf.str());
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      return 0;
+    }
+    return AbsNextId - Id;
+  }
+
+  // Returns the value referenced by the given value Index.
+  Ice::Operand *getOperand(uint32_t Index) {
+    if (Index < CachedNumGlobalValueIDs) {
+      // TODO(kschimpf): Define implementation.
+      report_fatal_error("getOperand of global addresses not implemented");
+    }
+    uint32_t LocalIndex = Index - CachedNumGlobalValueIDs;
+    if (LocalIndex >= LocalOperands.size()) {
+      std::string Buffer;
+      raw_string_ostream StrBuf(Buffer);
+      StrBuf << "Value index " << Index << " out of range. Must be less than "
+             << (LocalOperands.size() + CachedNumGlobalValueIDs);
+      Error(StrBuf.str());
+      report_fatal_error("Unable to continue");
+    }
+    return LocalOperands[LocalIndex];
+  }
+
+  // Generates type error message for binary operator Op
+  // operating on Type OpTy.
+  void ReportInvalidBinaryOp(Ice::InstArithmetic::OpKind Op, Ice::Type OpTy);
+
+  // Validates if integer logical Op, for type OpTy, is valid.
+  // Returns true if valid. Otherwise generates error message and
+  // returns false.
+  bool isValidIntegerLogicalOp(Ice::InstArithmetic::OpKind Op, Ice::Type OpTy) {
+    if (Ice::isIntegerType(OpTy))
+      return true;
+    ReportInvalidBinaryOp(Op, OpTy);
+    return false;
+  }
+
+  // Validates if integer (or vector of integers) arithmetic Op, for type
+  // OpTy, is valid.  Returns true if valid. Otherwise generates
+  // error message and returns false.
+  bool isValidIntegerArithOp(Ice::InstArithmetic::OpKind Op, Ice::Type OpTy) {
+    if (Ice::isIntegerArithmeticType(OpTy))
+      return true;
+    ReportInvalidBinaryOp(Op, OpTy);
+    return false;
+  }
+
+  // Checks if floating arithmetic Op, for type OpTy, is valid.
+  // Returns false if valid. Otherwise generates an error message and
+  // returns true.
+  bool isValidFloatingArithOp(Ice::InstArithmetic::OpKind Op, Ice::Type OpTy) {
+    if (Ice::isFloatingType(OpTy))
+      return true;
+    ReportInvalidBinaryOp(Op, OpTy);
+    return false;
+  }
+
+  // Reports that the given binary Opcode, for the given type Ty,
+  // is not understood.
+  void ReportInvalidBinopOpcode(unsigned Opcode, Ice::Type Ty);
+
+  // Takes the PNaCl bitcode binary operator Opcode, and the opcode
+  // type Ty, and sets Op to the corresponding ICE binary
+  // opcode. Returns true if able to convert, false otherwise.
+  bool convertBinopOpcode(unsigned Opcode, Ice::Type Ty,
+                          Ice::InstArithmetic::OpKind &Op) {
+    Instruction::BinaryOps LLVMOpcode;
+    if (!naclbitc::DecodeBinaryOpcode(Opcode, Context->convertToLLVMType(Ty),
+                                      LLVMOpcode)) {
+      ReportInvalidBinopOpcode(Opcode, Ty);
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      Op = Ice::InstArithmetic::Add;
+      return false;
+    }
+    switch (LLVMOpcode) {
+    default: {
+      ReportInvalidBinopOpcode(Opcode, Ty);
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      Op = Ice::InstArithmetic::Add;
+      return false;
+    }
+    case Instruction::Add:
+      Op = Ice::InstArithmetic::Add;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::FAdd:
+      Op = Ice::InstArithmetic::Fadd;
+      return isValidFloatingArithOp(Op, Ty);
+    case Instruction::Sub:
+      Op = Ice::InstArithmetic::Sub;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::FSub:
+      Op = Ice::InstArithmetic::Fsub;
+      return isValidFloatingArithOp(Op, Ty);
+    case Instruction::Mul:
+      Op = Ice::InstArithmetic::Mul;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::FMul:
+      Op = Ice::InstArithmetic::Fmul;
+      return isValidFloatingArithOp(Op, Ty);
+    case Instruction::UDiv:
+      Op = Ice::InstArithmetic::Udiv;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::SDiv:
+      Op = Ice::InstArithmetic::Sdiv;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::FDiv:
+      Op = Ice::InstArithmetic::Fdiv;
+      return isValidFloatingArithOp(Op, Ty);
+    case Instruction::URem:
+      Op = Ice::InstArithmetic::Urem;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::SRem:
+      Op = Ice::InstArithmetic::Srem;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::FRem:
+      Op = Ice::InstArithmetic::Frem;
+      return isValidFloatingArithOp(Op, Ty);
+    case Instruction::Shl:
+      Op = Ice::InstArithmetic::Shl;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::LShr:
+      Op = Ice::InstArithmetic::Lshr;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::AShr:
+      Op = Ice::InstArithmetic::Ashr;
+      return isValidIntegerArithOp(Op, Ty);
+    case Instruction::And:
+      Op = Ice::InstArithmetic::And;
+      return isValidIntegerLogicalOp(Op, Ty);
+    case Instruction::Or:
+      Op = Ice::InstArithmetic::Or;
+      return isValidIntegerLogicalOp(Op, Ty);
+    case Instruction::Xor:
+      Op = Ice::InstArithmetic::Xor;
+      return isValidIntegerLogicalOp(Op, Ty);
+    }
+  }
+};
+
+FunctionParser::~FunctionParser() {
+  if (getTranslator().getFlags().SubzeroTimingEnabled) {
+    errs() << "[Subzero timing] Convert function " << Func->getFunctionName()
+           << ": " << TConvert.getElapsedSec() << " sec\n";
+  }
+}
+
+void FunctionParser::ReportInvalidBinopOpcode(unsigned Opcode, Ice::Type Ty) {
+  std::string Buffer;
+  raw_string_ostream StrBuf(Buffer);
+  StrBuf << "Binary opcode " << Opcode << "not understood for type " << Ty;
+  Error(StrBuf.str());
+}
+
+void FunctionParser::ExitBlock() {
+  // Before translating, check for blocks without instructions, and
+  // insert unreachable. This shouldn't happen, but be safe.
+  unsigned Index = 0;
+  const Ice::NodeList &Nodes = Func->getNodes();
+  for (std::vector<Ice::CfgNode *>::const_iterator Iter = Nodes.begin(),
+                                                   IterEnd = Nodes.end();
+       Iter != IterEnd; ++Iter, ++Index) {
+    Ice::CfgNode *Node = *Iter;
+    if (Node->getInsts().size() == 0) {
+      std::string Buffer;
+      raw_string_ostream StrBuf(Buffer);
+      StrBuf << "Basic block " << Index << " contains no instructions";
+      Error(StrBuf.str());
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      Node->appendInst(Ice::InstUnreachable::create(Func));
+    }
+  }
+  getTranslator().translateFcn(Func);
+}
+
+void FunctionParser::ReportInvalidBinaryOp(Ice::InstArithmetic::OpKind Op,
+                                           Ice::Type OpTy) {
+  std::string Buffer;
+  raw_string_ostream StrBuf(Buffer);
+  StrBuf << "Invalid operator type for " << Ice::InstArithmetic::getOpName(Op)
+         << ". Found " << OpTy;
+  Error(StrBuf.str());
+}
+
+void FunctionParser::ProcessRecord() {
+  const NaClBitcodeRecord::RecordVector &Values = Record.GetValues();
+  if (InstIsTerminating) {
+    InstIsTerminating = false;
+    CurrentNode = GetBasicBlock(++CurrentBbIndex);
+  }
+  Ice::Inst *Inst = NULL;
+  switch (Record.GetCode()) {
+  case naclbitc::FUNC_CODE_DECLAREBLOCKS: {
+    // DECLAREBLOCKS: [n]
+    if (!isValidRecordSize(1, "function block count"))
+      break;
+    if (Func->getNodes().size() != 1) {
+      Error("Duplicate function block count record");
+      return;
+    }
+    uint32_t NumBbs = Values[0];
+    if (NumBbs == 0) {
+      Error("Functions must contain at least one basic block.");
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      NumBbs = 1;
+    }
+    // Install the basic blocks, skipping bb0 which was created in the
+    // constructor.
+    for (size_t i = 1; i < NumBbs; ++i)
+      InstallNextBasicBlock();
+    break;
+  }
+  case naclbitc::FUNC_CODE_INST_BINOP: {
+    // BINOP: [opval, opval, opcode]
+    if (!isValidRecordSize(3, "function block binop"))
+      break;
+    Ice::Operand *Op1 = getOperand(convertRelativeToAbsIndex(Values[0]));
+    Ice::Operand *Op2 = getOperand(convertRelativeToAbsIndex(Values[1]));
+    Ice::Type Type1 = Op1->getType();
+    Ice::Type Type2 = Op2->getType();
+    if (Type1 != Type2) {
+      std::string Buffer;
+      raw_string_ostream StrBuf(Buffer);
+      StrBuf << "Binop argument types differ: " << Type1 << " and " << Type2;
+      Error(StrBuf.str());
+      // TODO(kschimpf) Remove error recovery once implementation complete.
+      Op2 = Op1;
+    }
+
+    Ice::InstArithmetic::OpKind Opcode;
+    if (!convertBinopOpcode(Values[2], Type1, Opcode))
+      break;
+    Ice::Variable *Dest = NextInstVar(Type1);
+    Inst = Ice::InstArithmetic::create(Func, Opcode, Dest, Op1, Op2);
+    break;
+  }
+  case naclbitc::FUNC_CODE_INST_RET: {
+    // RET: [opval?]
+    InstIsTerminating = true;
+    if (!isValidRecordSizeInRange(0, 1, "function block ret"))
+      break;
+    if (Values.size() == 0) {
+      Inst = Ice::InstRet::create(Func);
+    } else {
+      Inst = Ice::InstRet::create(
+          Func, getOperand(convertRelativeToAbsIndex(Values[0])));
+    }
+    break;
+  }
+  default:
+    // Generate error message!
+    BlockParserBaseClass::ProcessRecord();
+    break;
+  }
+  if (Inst)
+    CurrentNode->appendInst(Inst);
+}
+
 /// Parses the module block in the bitcode file.
 class ModuleParser : public BlockParserBaseClass {
 public:
@@ -716,9 +1174,8 @@
     return Parser.ParseThisBlock();
   }
   case naclbitc::FUNCTION_BLOCK_ID: {
-    Error("Function block parser not yet implemented, skipping");
-    SkipBlock();
-    return false;
+    FunctionParser Parser(BlockID, this);
+    return Parser.ParseThisBlock();
   }
   default:
     return BlockParserBaseClass::ParseBlock(BlockID);
@@ -730,7 +1187,7 @@
   switch (Record.GetCode()) {
   case naclbitc::MODULE_CODE_VERSION: {
     // VERSION: [version#]
-    if (checkRecordSize(1, "Module version"))
+    if (!isValidRecordSize(1, "Module version"))
       return;
     unsigned Version = Values[0];
     if (Version != 1) {
@@ -743,11 +1200,11 @@
   }
   case naclbitc::MODULE_CODE_FUNCTION: {
     // FUNCTION:  [type, callingconv, isproto, linkage]
-    if (checkRecordSize(4, "Function heading"))
+    if (!isValidRecordSize(4, "Function heading"))
       return;
     Type *Ty = Context->getTypeByID(Values[0]);
     FunctionType *FTy = dyn_cast<FunctionType>(Ty);
-    if (FTy == 0) {
+    if (FTy == NULL) {
       std::string Buffer;
       raw_string_ostream StrBuf(Buffer);
       StrBuf << "Function heading expects function type. Found: " << Ty;
@@ -788,13 +1245,7 @@
 bool TopLevelParser::ParseBlock(unsigned BlockID) {
   if (BlockID == naclbitc::MODULE_BLOCK_ID) {
     ModuleParser Parser(BlockID, this);
-    bool ReturnValue = Parser.ParseThisBlock();
-    // TODO(kschimpf): Remove once translating function blocks.
-    errs() << "Global addresses:\n";
-    for (size_t i = 0; i < ValueIDValues.size(); ++i) {
-      errs() << "[" << i << "]: " << *ValueIDValues[i] << "\n";
-    }
-    return ReturnValue;
+    return Parser.ParseThisBlock();
   }
   // Generate error message by using default block implementation.
   BlockParserBaseClass Parser(BlockID, this);
@@ -836,8 +1287,8 @@
   NaClBitstreamReader InputStreamFile(BufPtr, EndBufPtr);
   NaClBitstreamCursor InputStream(InputStreamFile);
 
-  TopLevelParser Parser(MemBuf->getBufferIdentifier(), Header, InputStream,
-                        ErrorStatus);
+  TopLevelParser Parser(*this, MemBuf->getBufferIdentifier(), Header,
+                        InputStream, ErrorStatus);
   int TopLevelBlocks = 0;
   while (!InputStream.AtEndOfStream()) {
     if (Parser.Parse()) {
diff --git a/src/PNaClTranslator.h b/src/PNaClTranslator.h
index 23f61cc..87284fa 100644
--- a/src/PNaClTranslator.h
+++ b/src/PNaClTranslator.h
@@ -22,7 +22,8 @@
 
 class PNaClTranslator : public Translator {
 public:
-  PNaClTranslator(GlobalContext *Ctx) : Translator(Ctx) {}
+  PNaClTranslator(GlobalContext *Ctx, const ClFlags &Flags)
+      : Translator(Ctx, Flags) {}
   // Reads the PNaCl bitcode file and translates to ICE, which is then
   // converted to machine code. Sets ErrorStatus to true if any
   // errors occurred.
diff --git a/src/llvm2ice.cpp b/src/llvm2ice.cpp
index b1d2d6f..a4a96b9 100644
--- a/src/llvm2ice.cpp
+++ b/src/llvm2ice.cpp
@@ -143,7 +143,7 @@
                          Flags);
 
   if (BuildOnRead) {
-    Ice::PNaClTranslator Translator(&Ctx);
+    Ice::PNaClTranslator Translator(&Ctx, Flags);
     Translator.translate(IRFilename);
     return Translator.getErrorStatus();
   } else {
@@ -163,8 +163,8 @@
       return 1;
     }
 
-    Ice::Converter Converter(&Ctx);
-    Converter.convertToIce(Mod);
+    Ice::Converter Converter(Mod, &Ctx, Flags);
+    Converter.convertToIce();
     return Converter.getErrorStatus();
   }
 }
diff --git a/tests_lit/reader_tests/binops.ll b/tests_lit/reader_tests/binops.ll
new file mode 100644
index 0000000..59056d9
--- /dev/null
+++ b/tests_lit/reader_tests/binops.ll
@@ -0,0 +1,892 @@
+; Tests if we can read binary operators.
+
+; RUN: llvm-as < %s | pnacl-freeze \
+; RUN:              | %llvm2ice -notranslate -verbose=inst -build-on-read \
+; RUN:                -allow-pnacl-reader-error-recovery \
+; RUN:              | FileCheck %s
+
+; TODO(kschimpf): add i8/i16. Needs bitcasts.
+
+define i32 @AddI32(i32 %a, i32 %b) {
+  %add = add i32 %b, %a
+  ret i32 %add
+}
+
+; CHECK:      define i32 @AddI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = add i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @AddI64(i64 %a, i64 %b) {
+  %add = add i64 %b, %a
+  ret i64 %add
+}
+
+; CHECK-NEXT: define i64 @AddI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = add i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %add = add <16 x i8> %b, %a
+  ret <16 x i8> %add
+}
+
+; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = add <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %add = add <8 x i16> %b, %a
+  ret <8 x i16> %add
+}
+
+; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = add <8 x i16> %__1, %__0
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %add = add <4 x i32> %b, %a
+  ret <4 x i32> %add
+}
+
+; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = add <4 x i32> %__1, %__0
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+define float @AddFloat(float %a, float %b) {
+  %add = fadd float %b, %a
+  ret float %add
+}
+
+; CHECK-NEXT: define float @AddFloat(float %__0, float %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fadd float %__1, %__0
+; CHECK-NEXT:   ret float %__2
+; CHECK-NEXT: }
+
+define double @AddDouble(double %a, double %b) {
+  %add = fadd double %b, %a
+  ret double %add
+}
+
+; CHECK-NEXT: define double @AddDouble(double %__0, double %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fadd double %__1, %__0
+; CHECK-NEXT:   ret double %__2
+; CHECK-NEXT: }
+
+define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) {
+  %add = fadd <4 x float> %b, %a
+  ret <4 x float> %add
+}
+
+; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %__0, <4 x float> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fadd <4 x float> %__1, %__0
+; CHECK-NEXT:   ret <4 x float> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): sub i8/i16. Needs bitcasts.
+
+define i32 @SubI32(i32 %a, i32 %b) {
+  %sub = sub i32 %a, %b
+  ret i32 %sub
+}
+
+; CHECK-NEXT: define i32 @SubI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sub i32 %__0, %__1
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @SubI64(i64 %a, i64 %b) {
+  %sub = sub i64 %a, %b
+  ret i64 %sub
+}
+
+; CHECK-NEXT: define i64 @SubI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sub i64 %__0, %__1
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %sub = sub <16 x i8> %a, %b
+  ret <16 x i8> %sub
+}
+
+; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sub <16 x i8> %__0, %__1
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %sub = sub <8 x i16> %a, %b
+  ret <8 x i16> %sub
+}
+
+; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sub <8 x i16> %__0, %__1
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %sub = sub <4 x i32> %a, %b
+  ret <4 x i32> %sub
+}
+
+; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sub <4 x i32> %__0, %__1
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+define float @SubFloat(float %a, float %b) {
+  %sub = fsub float %a, %b
+  ret float %sub
+}
+
+; CHECK-NEXT: define float @SubFloat(float %__0, float %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fsub float %__0, %__1
+; CHECK-NEXT:   ret float %__2
+; CHECK-NEXT: }
+
+define double @SubDouble(double %a, double %b) {
+  %sub = fsub double %a, %b
+  ret double %sub
+}
+
+; CHECK-NEXT: define double @SubDouble(double %__0, double %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fsub double %__0, %__1
+; CHECK-NEXT:   ret double %__2
+; CHECK-NEXT: }
+
+define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) {
+  %sub = fsub <4 x float> %a, %b
+  ret <4 x float> %sub
+}
+
+; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %__0, <4 x float> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fsub <4 x float> %__0, %__1
+; CHECK-NEXT:   ret <4 x float> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): mul i8/i16. Needs bitcasts.
+
+define i32 @MulI32(i32 %a, i32 %b) {
+  %mul = mul i32 %b, %a
+  ret i32 %mul
+}
+
+; CHECK-NEXT: define i32 @MulI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = mul i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @MulI64(i64 %a, i64 %b) {
+  %mul = mul i64 %b, %a
+  ret i64 %mul
+}
+
+; CHECK-NEXT: define i64 @MulI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = mul i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+
+define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %mul = mul <16 x i8> %b, %a
+  ret <16 x i8> %mul
+}
+
+; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = mul <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define float @MulFloat(float %a, float %b) {
+  %mul = fmul float %b, %a
+  ret float %mul
+}
+
+; CHECK-NEXT: define float @MulFloat(float %__0, float %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fmul float %__1, %__0
+; CHECK-NEXT:   ret float %__2
+; CHECK-NEXT: }
+
+define double @MulDouble(double %a, double %b) {
+  %mul = fmul double %b, %a
+  ret double %mul
+}
+
+; CHECK-NEXT: define double @MulDouble(double %__0, double %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fmul double %__1, %__0
+; CHECK-NEXT:   ret double %__2
+; CHECK-NEXT: }
+
+define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) {
+  %mul = fmul <4 x float> %b, %a
+  ret <4 x float> %mul
+}
+
+; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %__0, <4 x float> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fmul <4 x float> %__1, %__0
+; CHECK-NEXT:   ret <4 x float> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): sdiv i8/i16. Needs bitcasts.
+
+define i32 @SdivI32(i32 %a, i32 %b) {
+  %div = sdiv i32 %a, %b
+  ret i32 %div
+}
+
+; CHECK-NEXT: define i32 @SdivI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sdiv i32 %__0, %__1
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @SdivI64(i64 %a, i64 %b) {
+  %div = sdiv i64 %a, %b
+  ret i64 %div
+}
+
+; CHECK-NEXT: define i64 @SdivI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sdiv i64 %__0, %__1
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %div = sdiv <16 x i8> %a, %b
+  ret <16 x i8> %div
+}
+
+; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sdiv <16 x i8> %__0, %__1
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %div = sdiv <8 x i16> %a, %b
+  ret <8 x i16> %div
+}
+
+; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sdiv <8 x i16> %__0, %__1
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %div = sdiv <4 x i32> %a, %b
+  ret <4 x i32> %div
+}
+
+; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = sdiv <4 x i32> %__0, %__1
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): srem i8/i16. Needs bitcasts.
+
+define i32 @SremI32(i32 %a, i32 %b) {
+  %rem = srem i32 %a, %b
+  ret i32 %rem
+}
+
+; CHECK-NEXT: define i32 @SremI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = srem i32 %__0, %__1
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @SremI64(i64 %a, i64 %b) {
+  %rem = srem i64 %a, %b
+  ret i64 %rem
+}
+
+; CHECK-NEXT: define i64 @SremI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = srem i64 %__0, %__1
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %rem = srem <16 x i8> %a, %b
+  ret <16 x i8> %rem
+}
+
+; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = srem <16 x i8> %__0, %__1
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %rem = srem <8 x i16> %a, %b
+  ret <8 x i16> %rem
+}
+
+; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = srem <8 x i16> %__0, %__1
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %rem = srem <4 x i32> %a, %b
+  ret <4 x i32> %rem
+}
+
+; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = srem <4 x i32> %__0, %__1
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): udiv i8/i16. Needs bitcasts.
+
+define i32 @UdivI32(i32 %a, i32 %b) {
+  %div = udiv i32 %a, %b
+  ret i32 %div
+}
+
+; CHECK-NEXT: define i32 @UdivI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = udiv i32 %__0, %__1
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @UdivI64(i64 %a, i64 %b) {
+  %div = udiv i64 %a, %b
+  ret i64 %div
+}
+
+; CHECK-NEXT: define i64 @UdivI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = udiv i64 %__0, %__1
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %div = udiv <16 x i8> %a, %b
+  ret <16 x i8> %div
+}
+
+; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = udiv <16 x i8> %__0, %__1
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %div = udiv <8 x i16> %a, %b
+  ret <8 x i16> %div
+}
+
+; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = udiv <8 x i16> %__0, %__1
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %div = udiv <4 x i32> %a, %b
+  ret <4 x i32> %div
+}
+
+; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = udiv <4 x i32> %__0, %__1
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): urem i8/i16. Needs bitcasts.
+
+define i32 @UremI32(i32 %a, i32 %b) {
+  %rem = urem i32 %a, %b
+  ret i32 %rem
+}
+
+; CHECK-NEXT: define i32 @UremI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = urem i32 %__0, %__1
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @UremI64(i64 %a, i64 %b) {
+  %rem = urem i64 %a, %b
+  ret i64 %rem
+}
+
+; CHECK-NEXT: define i64 @UremI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = urem i64 %__0, %__1
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %rem = urem <16 x i8> %a, %b
+  ret <16 x i8> %rem
+}
+
+; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = urem <16 x i8> %__0, %__1
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %rem = urem <8 x i16> %a, %b
+  ret <8 x i16> %rem
+}
+
+; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = urem <8 x i16> %__0, %__1
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %rem = urem <4 x i32> %a, %b
+  ret <4 x i32> %rem
+}
+
+; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = urem <4 x i32> %__0, %__1
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+define float @fdivFloat(float %a, float %b) {
+  %div = fdiv float %a, %b
+  ret float %div
+}
+
+; CHECK-NEXT: define float @fdivFloat(float %__0, float %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fdiv float %__0, %__1
+; CHECK-NEXT:   ret float %__2
+; CHECK-NEXT: }
+
+define double @fdivDouble(double %a, double %b) {
+  %div = fdiv double %a, %b
+  ret double %div
+}
+
+; CHECK-NEXT: define double @fdivDouble(double %__0, double %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fdiv double %__0, %__1
+; CHECK-NEXT:   ret double %__2
+; CHECK-NEXT: }
+
+define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) {
+  %div = fdiv <4 x float> %a, %b
+  ret <4 x float> %div
+}
+
+; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %__0, <4 x float> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = fdiv <4 x float> %__0, %__1
+; CHECK-NEXT:   ret <4 x float> %__2
+; CHECK-NEXT: }
+
+define float @fremFloat(float %a, float %b) {
+  %rem = frem float %a, %b
+  ret float %rem
+}
+
+; CHECK-NEXT: define float @fremFloat(float %__0, float %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = frem float %__0, %__1
+; CHECK-NEXT:   ret float %__2
+; CHECK-NEXT: }
+
+
+define double @fremDouble(double %a, double %b) {
+  %rem = frem double %a, %b
+  ret double %rem
+}
+
+; CHECK-NEXT: define double @fremDouble(double %__0, double %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = frem double %__0, %__1
+; CHECK-NEXT:   ret double %__2
+; CHECK-NEXT: }
+
+define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) {
+  %rem = frem <4 x float> %a, %b
+  ret <4 x float> %rem
+}
+
+; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %__0, <4 x float> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = frem <4 x float> %__0, %__1
+; CHECK-NEXT:   ret <4 x float> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): and i1/i8/i16. Needs bitcasts.
+
+define i32 @AndI32(i32 %a, i32 %b) {
+  %and = and i32 %b, %a
+  ret i32 %and
+}
+
+; CHECK-NEXT: define i32 @AndI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = and i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @AndI64(i64 %a, i64 %b) {
+  %and = and i64 %b, %a
+  ret i64 %and
+}
+
+; CHECK-NEXT: define i64 @AndI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = and i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %and = and <16 x i8> %b, %a
+  ret <16 x i8> %and
+}
+
+; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = and <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %and = and <8 x i16> %b, %a
+  ret <8 x i16> %and
+}
+
+; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = and <8 x i16> %__1, %__0
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %and = and <4 x i32> %b, %a
+  ret <4 x i32> %and
+}
+
+; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = and <4 x i32> %__1, %__0
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): or i1/i8/i16. Needs bitcasts.
+
+define i32 @OrI32(i32 %a, i32 %b) {
+  %or = or i32 %b, %a
+  ret i32 %or
+}
+
+; CHECK-NEXT: define i32 @OrI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = or i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @OrI64(i64 %a, i64 %b) {
+  %or = or i64 %b, %a
+  ret i64 %or
+}
+
+; CHECK-NEXT: define i64 @OrI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = or i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %or = or <16 x i8> %b, %a
+  ret <16 x i8> %or
+}
+
+; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = or <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %or = or <8 x i16> %b, %a
+  ret <8 x i16> %or
+}
+
+; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = or <8 x i16> %__1, %__0
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %or = or <4 x i32> %b, %a
+  ret <4 x i32> %or
+}
+
+; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = or <4 x i32> %__1, %__0
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts.
+
+define i32 @XorI32(i32 %a, i32 %b) {
+  %xor = xor i32 %b, %a
+  ret i32 %xor
+}
+
+; CHECK-NEXT: define i32 @XorI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = xor i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @XorI64(i64 %a, i64 %b) {
+  %xor = xor i64 %b, %a
+  ret i64 %xor
+}
+
+; CHECK-NEXT: define i64 @XorI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = xor i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %xor = xor <16 x i8> %b, %a
+  ret <16 x i8> %xor
+}
+
+; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = xor <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %xor = xor <8 x i16> %b, %a
+  ret <8 x i16> %xor
+}
+
+; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = xor <8 x i16> %__1, %__0
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %xor = xor <4 x i32> %b, %a
+  ret <4 x i32> %xor
+}
+
+; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = xor <4 x i32> %__1, %__0
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): shl i8/i16. Needs bitcasts.
+
+define i32 @ShlI32(i32 %a, i32 %b) {
+  %shl = shl i32 %b, %a
+  ret i32 %shl
+}
+
+; CHECK-NEXT: define i32 @ShlI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = shl i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @ShlI64(i64 %a, i64 %b) {
+  %shl = shl i64 %b, %a
+  ret i64 %shl
+}
+
+; CHECK-NEXT: define i64 @ShlI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = shl i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %shl = shl <16 x i8> %b, %a
+  ret <16 x i8> %shl
+}
+
+; CHECK-NEXT: define <16 x i8> @ShlV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = shl <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %shl = shl <8 x i16> %b, %a
+  ret <8 x i16> %shl
+}
+
+; CHECK-NEXT: define <8 x i16> @ShlV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = shl <8 x i16> %__1, %__0
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %shl = shl <4 x i32> %b, %a
+  ret <4 x i32> %shl
+}
+
+; CHECK-NEXT: define <4 x i32> @ShlV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = shl <4 x i32> %__1, %__0
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): ashr i8/i16. Needs bitcasts.
+
+define i32 @ashrI32(i32 %a, i32 %b) {
+  %ashr = ashr i32 %b, %a
+  ret i32 %ashr
+}
+
+; CHECK-NEXT: define i32 @ashrI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = ashr i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @AshrI64(i64 %a, i64 %b) {
+  %ashr = ashr i64 %b, %a
+  ret i64 %ashr
+}
+
+; CHECK-NEXT: define i64 @AshrI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = ashr i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %ashr = ashr <16 x i8> %b, %a
+  ret <16 x i8> %ashr
+}
+
+; CHECK-NEXT: define <16 x i8> @AshrV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = ashr <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %ashr = ashr <8 x i16> %b, %a
+  ret <8 x i16> %ashr
+}
+
+; CHECK-NEXT: define <8 x i16> @AshrV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = ashr <8 x i16> %__1, %__0
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %ashr = ashr <4 x i32> %b, %a
+  ret <4 x i32> %ashr
+}
+
+; CHECK-NEXT: define <4 x i32> @AshrV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = ashr <4 x i32> %__1, %__0
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }
+
+; TODO(kschimpf): lshr i8/i16. Needs bitcasts.
+
+define i32 @lshrI32(i32 %a, i32 %b) {
+  %lshr = lshr i32 %b, %a
+  ret i32 %lshr
+}
+
+; CHECK-NEXT: define i32 @lshrI32(i32 %__0, i32 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = lshr i32 %__1, %__0
+; CHECK-NEXT:   ret i32 %__2
+; CHECK-NEXT: }
+
+define i64 @LshrI64(i64 %a, i64 %b) {
+  %lshr = lshr i64 %b, %a
+  ret i64 %lshr
+}
+
+; CHECK-NEXT: define i64 @LshrI64(i64 %__0, i64 %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = lshr i64 %__1, %__0
+; CHECK-NEXT:   ret i64 %__2
+; CHECK-NEXT: }
+
+define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+  %lshr = lshr <16 x i8> %b, %a
+  ret <16 x i8> %lshr
+}
+
+; CHECK-NEXT: define <16 x i8> @LshrV16I8(<16 x i8> %__0, <16 x i8> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = lshr <16 x i8> %__1, %__0
+; CHECK-NEXT:   ret <16 x i8> %__2
+; CHECK-NEXT: }
+
+define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+  %lshr = lshr <8 x i16> %b, %a
+  ret <8 x i16> %lshr
+}
+
+; CHECK-NEXT: define <8 x i16> @LshrV8I16(<8 x i16> %__0, <8 x i16> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = lshr <8 x i16> %__1, %__0
+; CHECK-NEXT:   ret <8 x i16> %__2
+; CHECK-NEXT: }
+
+define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+  %lshr = lshr <4 x i32> %b, %a
+  ret <4 x i32> %lshr
+}
+
+; CHECK-NEXT: define <4 x i32> @LshrV4I32(<4 x i32> %__0, <4 x i32> %__1) {
+; CHECK-NEXT: __0:
+; CHECK-NEXT:   %__2 = lshr <4 x i32> %__1, %__0
+; CHECK-NEXT:   ret <4 x i32> %__2
+; CHECK-NEXT: }