Subzero: Clean up some uses of *_cast<>.

Some casts to size_t for use as array indexes are simply unnecessary.

Some explicit declaration types are changed to "auto" to avoid redundancy with the static_cast type.

A few llvm::dyn_cast<> operations are changed to llvm::cast<>, and vice versa.

A few explicit declaration types are changed to "auto" when used with llvm::cast<> and llvm::dynamic_cast<>.  Some of these were missed during an earlier cleansing because of multi-line issues.

There are still a few opportunities related to Variable register numbers, but they are ignored for now because they are being addressed in another CL.

BUG= none
R=jpp@chromium.org

Review URL: https://codereview.chromium.org/1674033002 .
diff --git a/src/IceAssemblerARM32.cpp b/src/IceAssemblerARM32.cpp
index 8291f51..89854b1 100644
--- a/src/IceAssemblerARM32.cpp
+++ b/src/IceAssemblerARM32.cpp
@@ -706,7 +706,7 @@
 // Pull out offset from branch Inst.
 IOffsetT AssemblerARM32::decodeBranchOffset(IValueT Inst) {
   // Sign-extend, left-shift by 2, and adjust to the way ARM CPUs read PC.
-  IOffsetT Offset = static_cast<IOffsetT>((Inst & kBranchOffsetMask) << 8);
+  const IOffsetT Offset = (Inst & kBranchOffsetMask) << 8;
   return (Offset >> 6) + kPCReadOffset;
 }
 
diff --git a/src/IceCfg.cpp b/src/IceCfg.cpp
index fe44ee7..6565667 100644
--- a/src/IceCfg.cpp
+++ b/src/IceCfg.cpp
@@ -916,8 +916,7 @@
           // of the block, because a Phi temporary may be live at the end of
           // the previous block, and if it is also assigned in the first
           // instruction of this block, the adjacent live ranges get merged.
-          if (static_cast<class Inst *>(&Instr) != FirstInst &&
-              !Instr.isDestRedefined() &&
+          if (&Instr != FirstInst && !Instr.isDestRedefined() &&
               Dest->getLiveRange().containsValue(InstNumber - 1, IsDest))
             Invalid = true;
           if (Invalid) {
diff --git a/src/IceInst.cpp b/src/IceInst.cpp
index 27f5349..9827db4 100644
--- a/src/IceInst.cpp
+++ b/src/IceInst.cpp
@@ -280,10 +280,8 @@
 }
 
 const char *InstArithmetic::getOpName(OpKind Op) {
-  size_t OpIndex = static_cast<size_t>(Op);
-  return OpIndex < InstArithmetic::_num
-             ? InstArithmeticAttributes[OpIndex].DisplayString
-             : "???";
+  return Op < InstArithmetic::_num ? InstArithmeticAttributes[Op].DisplayString
+                                   : "???";
 }
 
 bool InstArithmetic::isCommutative() const {
@@ -729,9 +727,8 @@
 }
 
 const char *InstCast::getCastName(InstCast::OpKind Kind) {
-  size_t Index = static_cast<size_t>(Kind);
-  if (Index < InstCast::OpKind::_num)
-    return InstCastAttributes[Index].DisplayString;
+  if (Kind < InstCast::OpKind::_num)
+    return InstCastAttributes[Kind].DisplayString;
   llvm_unreachable("Invalid InstCast::OpKind");
   return "???";
 }
diff --git a/src/IceInstARM32.h b/src/IceInstARM32.h
index bce1aca..2be562a 100644
--- a/src/IceInstARM32.h
+++ b/src/IceInstARM32.h
@@ -348,7 +348,7 @@
   static StackVariable *create(Cfg *Func, Type Ty, SizeT Index) {
     return new (Func->allocate<StackVariable>()) StackVariable(Ty, Index);
   }
-  const static OperandKind StackVariableKind =
+  constexpr static auto StackVariableKind =
       static_cast<OperandKind>(kVariable_Target);
   static bool classof(const Operand *Operand) {
     return Operand->getKind() == StackVariableKind;
diff --git a/src/IceTargetLoweringARM32.cpp b/src/IceTargetLoweringARM32.cpp
index 0322a7e..4b70dad 100644
--- a/src/IceTargetLoweringARM32.cpp
+++ b/src/IceTargetLoweringARM32.cpp
@@ -136,9 +136,8 @@
 };
 
 CondARM32::Cond getIcmp32Mapping(InstIcmp::ICond Cond) {
-  size_t Index = static_cast<size_t>(Cond);
-  assert(Index < llvm::array_lengthof(TableIcmp32));
-  return TableIcmp32[Index].Mapping;
+  assert(Cond < llvm::array_lengthof(TableIcmp32));
+  return TableIcmp32[Cond].Mapping;
 }
 
 // In some cases, there are x-macros tables for both high-level and low-level
@@ -3952,8 +3951,7 @@
 TargetARM32::CondWhenTrue
 TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
                                 Operand *Src1) {
-  size_t Index = static_cast<size_t>(Condition);
-  assert(Index < llvm::array_lengthof(TableIcmp64));
+  assert(Condition < llvm::array_lengthof(TableIcmp64));
 
   Int32Operands SrcsLo(loOperand(Src0), loOperand(Src1));
   Int32Operands SrcsHi(hiOperand(Src0), hiOperand(Src1));
@@ -3971,7 +3969,7 @@
       Variable *Src0HiR = SrcsHi.src0R(this);
       _orrs(T, Src0LoR, Src0HiR);
       Context.insert<InstFakeUse>(T);
-      return CondWhenTrue(TableIcmp64[Index].C1);
+      return CondWhenTrue(TableIcmp64[Condition].C1);
     }
 
     Variable *Src0RLo = SrcsLo.src0R(this);
@@ -3979,10 +3977,11 @@
     Operand *Src1RFLo = SrcsLo.src1RF(this);
     Operand *Src1RFHi = ValueLo == ValueHi ? Src1RFLo : SrcsHi.src1RF(this);
 
-    const bool UseRsb = TableIcmp64[Index].Swapped != SrcsLo.swappedOperands();
+    const bool UseRsb =
+        TableIcmp64[Condition].Swapped != SrcsLo.swappedOperands();
 
     if (UseRsb) {
-      if (TableIcmp64[Index].IsSigned) {
+      if (TableIcmp64[Condition].IsSigned) {
         Variable *T = makeReg(IceType_i32);
         _rsbs(T, Src0RLo, Src1RFLo);
         Context.insert<InstFakeUse>(T);
@@ -4003,7 +4002,7 @@
         Context.insert<InstFakeUse>(T);
       }
     } else {
-      if (TableIcmp64[Index].IsSigned) {
+      if (TableIcmp64[Condition].IsSigned) {
         _cmp(Src0RLo, Src1RFLo);
         Variable *T = makeReg(IceType_i32);
         _sbcs(T, Src0RHi, Src1RFHi);
@@ -4014,12 +4013,12 @@
       }
     }
 
-    return CondWhenTrue(TableIcmp64[Index].C1);
+    return CondWhenTrue(TableIcmp64[Condition].C1);
   }
 
   Variable *Src0RLo, *Src0RHi;
   Operand *Src1RFLo, *Src1RFHi;
-  if (TableIcmp64[Index].Swapped) {
+  if (TableIcmp64[Condition].Swapped) {
     Src0RLo = legalizeToReg(loOperand(Src1));
     Src0RHi = legalizeToReg(hiOperand(Src1));
     Src1RFLo = legalizeToReg(loOperand(Src0));
@@ -4060,7 +4059,7 @@
   //
   // So, we are going with the GCC version since it's usually better (except
   // perhaps for eq/ne). We could revisit special-casing eq/ne later.
-  if (TableIcmp64[Index].IsSigned) {
+  if (TableIcmp64[Condition].IsSigned) {
     Variable *ScratchReg = makeReg(IceType_i32);
     _cmp(Src0RLo, Src1RFLo);
     _sbcs(ScratchReg, Src0RHi, Src1RFHi);
@@ -4071,7 +4070,7 @@
     _cmp(Src0RHi, Src1RFHi);
     _cmp(Src0RLo, Src1RFLo, CondARM32::EQ);
   }
-  return CondWhenTrue(TableIcmp64[Index].C1);
+  return CondWhenTrue(TableIcmp64[Condition].C1);
 }
 
 TargetARM32::CondWhenTrue
diff --git a/src/IceTargetLoweringMIPS32.cpp b/src/IceTargetLoweringMIPS32.cpp
index ff93c4b..40d510d 100644
--- a/src/IceTargetLoweringMIPS32.cpp
+++ b/src/IceTargetLoweringMIPS32.cpp
@@ -1245,7 +1245,7 @@
     Context.insert<InstFakeDef>(Reg);
     return Reg;
   } else if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
-    uint32_t Value = static_cast<uint32_t>(C32->getValue());
+    const uint32_t Value = C32->getValue();
     // Check if the immediate will fit in a Flexible second operand,
     // if a Flexible second operand is allowed. We need to know the exact
     // value, so that rules out relocatable constants.
diff --git a/src/IceTargetLoweringX8632Traits.h b/src/IceTargetLoweringX8632Traits.h
index 897aa3b..53d50fb 100644
--- a/src/IceTargetLoweringX8632Traits.h
+++ b/src/IceTargetLoweringX8632Traits.h
@@ -733,9 +733,7 @@
   /// representation of the vector.
   static Type getInVectorElementType(Type Ty) {
     assert(isVectorType(Ty));
-    size_t Index = static_cast<size_t>(Ty);
-    (void)Index;
-    assert(Index < TableTypeX8632AttributesSize);
+    assert(Ty < TableTypeX8632AttributesSize);
     return TableTypeX8632Attributes[Ty].InVectorElementType;
   }
 
@@ -790,9 +788,8 @@
   /// @}
 
   static Cond::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) {
-    size_t Index = static_cast<size_t>(Cond);
-    assert(Index < TableIcmp32Size);
-    return TableIcmp32[Index].Mapping;
+    assert(Cond < TableIcmp32Size);
+    return TableIcmp32[Cond].Mapping;
   }
 
   static const struct TableTypeX8632AttributesType {
@@ -959,7 +956,7 @@
     static SpillVariable *create(Cfg *Func, Type Ty, SizeT Index) {
       return new (Func->allocate<SpillVariable>()) SpillVariable(Ty, Index);
     }
-    const static OperandKind SpillVariableKind =
+    constexpr static auto SpillVariableKind =
         static_cast<OperandKind>(kVariable_Target);
     static bool classof(const Operand *Operand) {
       return Operand->getKind() == SpillVariableKind;
diff --git a/src/IceTargetLoweringX8664.cpp b/src/IceTargetLoweringX8664.cpp
index 3befd98..9b99e81 100644
--- a/src/IceTargetLoweringX8664.cpp
+++ b/src/IceTargetLoweringX8664.cpp
@@ -412,7 +412,7 @@
   if (Offset != nullptr) {
     if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Offset)) {
       NeedsLea = CR->getName() != "" || CR->getOffset() < 0;
-    } else if (const auto *Imm = llvm::cast<ConstantInteger32>(Offset)) {
+    } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Offset)) {
       NeedsLea = Imm->getValue() < 0;
     } else {
       llvm::report_fatal_error("Unexpected Offset type.");
diff --git a/src/IceTargetLoweringX8664Traits.h b/src/IceTargetLoweringX8664Traits.h
index 50a1544..336e3df 100644
--- a/src/IceTargetLoweringX8664Traits.h
+++ b/src/IceTargetLoweringX8664Traits.h
@@ -784,9 +784,7 @@
   /// representation of the vector.
   static Type getInVectorElementType(Type Ty) {
     assert(isVectorType(Ty));
-    size_t Index = static_cast<size_t>(Ty);
-    (void)Index;
-    assert(Index < TableTypeX8664AttributesSize);
+    assert(Ty < TableTypeX8664AttributesSize);
     return TableTypeX8664Attributes[Ty].InVectorElementType;
   }
 
@@ -841,9 +839,8 @@
   /// @}
 
   static Cond::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) {
-    size_t Index = static_cast<size_t>(Cond);
-    assert(Index < TableIcmp32Size);
-    return TableIcmp32[Index].Mapping;
+    assert(Cond < TableIcmp32Size);
+    return TableIcmp32[Cond].Mapping;
   }
 
   static const struct TableTypeX8664AttributesType {
@@ -1004,7 +1001,7 @@
     static SpillVariable *create(Cfg *Func, Type Ty, SizeT Index) {
       return new (Func->allocate<SpillVariable>()) SpillVariable(Ty, Index);
     }
-    const static OperandKind SpillVariableKind =
+    constexpr static auto SpillVariableKind =
         static_cast<OperandKind>(kVariable_Target);
     static bool classof(const Operand *Operand) {
       return Operand->getKind() == SpillVariableKind;
diff --git a/src/IceTargetLoweringX86BaseImpl.h b/src/IceTargetLoweringX86BaseImpl.h
index 9b59329..8d838d4 100644
--- a/src/IceTargetLoweringX86BaseImpl.h
+++ b/src/IceTargetLoweringX86BaseImpl.h
@@ -2228,8 +2228,8 @@
       // Optimize division by constant power of 2, but not for Om1 or O0, just
       // to keep things simple there.
       if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) {
-        int32_t Divisor = C->getValue();
-        uint32_t UDivisor = static_cast<uint32_t>(Divisor);
+        const int32_t Divisor = C->getValue();
+        const uint32_t UDivisor = Divisor;
         if (Divisor > 0 && llvm::isPowerOf2_32(UDivisor)) {
           uint32_t LogDiv = llvm::Log2_32(UDivisor);
           // LLVM does the following for dest=src/(1<<log):
@@ -2318,8 +2318,8 @@
       // Optimize mod by constant power of 2, but not for Om1 or O0, just to
       // keep things simple there.
       if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) {
-        int32_t Divisor = C->getValue();
-        uint32_t UDivisor = static_cast<uint32_t>(Divisor);
+        const int32_t Divisor = C->getValue();
+        const uint32_t UDivisor = Divisor;
         if (Divisor > 0 && llvm::isPowerOf2_32(UDivisor)) {
           uint32_t LogDiv = llvm::Log2_32(UDivisor);
           // LLVM does the following for dest=src%(1<<log):
@@ -2434,15 +2434,15 @@
       break;
     case BoolFolding<Traits>::PK_Icmp32:
     case BoolFolding<Traits>::PK_Icmp64: {
-      lowerIcmpAndConsumer(llvm::dyn_cast<InstIcmp>(Producer), Br);
+      lowerIcmpAndConsumer(llvm::cast<InstIcmp>(Producer), Br);
       return;
     }
     case BoolFolding<Traits>::PK_Fcmp: {
-      lowerFcmpAndConsumer(llvm::dyn_cast<InstFcmp>(Producer), Br);
+      lowerFcmpAndConsumer(llvm::cast<InstFcmp>(Producer), Br);
       return;
     }
     case BoolFolding<Traits>::PK_Arith: {
-      lowerArithAndConsumer(llvm::dyn_cast<InstArithmetic>(Producer), Br);
+      lowerArithAndConsumer(llvm::cast<InstArithmetic>(Producer), Br);
       return;
     }
     }
@@ -3097,8 +3097,7 @@
 void TargetX86Base<TraitsType>::lowerExtractElement(
     const InstExtractElement *Instr) {
   Operand *SourceVectNotLegalized = Instr->getSrc(0);
-  ConstantInteger32 *ElementIndex =
-      llvm::dyn_cast<ConstantInteger32>(Instr->getSrc(1));
+  auto *ElementIndex = llvm::dyn_cast<ConstantInteger32>(Instr->getSrc(1));
   // Only constant indices are allowed in PNaCl IR.
   assert(ElementIndex);
 
@@ -3218,12 +3217,11 @@
   //   ucomiss b, c       /* but swap b,c order if SwapOperands==true */
   //   setcc a, C1
   InstFcmp::FCond Condition = Fcmp->getCondition();
-  size_t Index = static_cast<size_t>(Condition);
-  assert(Index < Traits::TableFcmpSize);
-  if (Traits::TableFcmp[Index].SwapScalarOperands)
+  assert(Condition < Traits::TableFcmpSize);
+  if (Traits::TableFcmp[Condition].SwapScalarOperands)
     std::swap(Src0, Src1);
-  bool HasC1 = (Traits::TableFcmp[Index].C1 != Traits::Cond::Br_None);
-  bool HasC2 = (Traits::TableFcmp[Index].C2 != Traits::Cond::Br_None);
+  const bool HasC1 = (Traits::TableFcmp[Condition].C1 != Traits::Cond::Br_None);
+  const bool HasC2 = (Traits::TableFcmp[Condition].C2 != Traits::Cond::Br_None);
   if (HasC1) {
     Src0 = legalize(Src0);
     Operand *Src1RM = legalize(Src1, Legal_Reg | Legal_Mem);
@@ -3231,20 +3229,20 @@
     _mov(T, Src0);
     _ucomiss(T, Src1RM);
     if (!HasC2) {
-      assert(Traits::TableFcmp[Index].Default);
-      setccOrConsumer(Traits::TableFcmp[Index].C1, Dest, Consumer);
+      assert(Traits::TableFcmp[Condition].Default);
+      setccOrConsumer(Traits::TableFcmp[Condition].C1, Dest, Consumer);
       return;
     }
   }
-  int32_t IntDefault = Traits::TableFcmp[Index].Default;
+  int32_t IntDefault = Traits::TableFcmp[Condition].Default;
   if (Consumer == nullptr) {
     Constant *Default = Ctx->getConstantInt(Dest->getType(), IntDefault);
     _mov(Dest, Default);
     if (HasC1) {
       InstX86Label *Label = InstX86Label::create(Func, this);
-      _br(Traits::TableFcmp[Index].C1, Label);
+      _br(Traits::TableFcmp[Condition].C1, Label);
       if (HasC2) {
-        _br(Traits::TableFcmp[Index].C2, Label);
+        _br(Traits::TableFcmp[Condition].C2, Label);
       }
       Constant *NonDefault = Ctx->getConstantInt(Dest->getType(), !IntDefault);
       _redefined(_mov(Dest, NonDefault));
@@ -3258,9 +3256,9 @@
     if (IntDefault != 0)
       std::swap(TrueSucc, FalseSucc);
     if (HasC1) {
-      _br(Traits::TableFcmp[Index].C1, FalseSucc);
+      _br(Traits::TableFcmp[Condition].C1, FalseSucc);
       if (HasC2) {
-        _br(Traits::TableFcmp[Index].C2, FalseSucc);
+        _br(Traits::TableFcmp[Condition].C2, FalseSucc);
       }
       _br(TrueSucc);
       return;
@@ -3277,9 +3275,9 @@
     lowerMove(SelectDest, SrcF, false);
     if (HasC1) {
       InstX86Label *Label = InstX86Label::create(Func, this);
-      _br(Traits::TableFcmp[Index].C1, Label);
+      _br(Traits::TableFcmp[Condition].C1, Label);
       if (HasC2) {
-        _br(Traits::TableFcmp[Index].C2, Label);
+        _br(Traits::TableFcmp[Condition].C2, Label);
       }
       static constexpr bool IsRedefinition = true;
       lowerMove(SelectDest, SrcT, IsRedefinition);
@@ -3300,10 +3298,9 @@
     llvm::report_fatal_error("Expected vector compare");
 
   InstFcmp::FCond Condition = Fcmp->getCondition();
-  size_t Index = static_cast<size_t>(Condition);
-  assert(Index < Traits::TableFcmpSize);
+  assert(Condition < Traits::TableFcmpSize);
 
-  if (Traits::TableFcmp[Index].SwapVectorOperands)
+  if (Traits::TableFcmp[Condition].SwapVectorOperands)
     std::swap(Src0, Src1);
 
   Variable *T = nullptr;
@@ -3321,7 +3318,7 @@
 
     switch (Condition) {
     default: {
-      CmppsCond Predicate = Traits::TableFcmp[Index].Predicate;
+      const CmppsCond Predicate = Traits::TableFcmp[Condition].Predicate;
       assert(Predicate != Traits::Cond::Cmpps_Invalid);
       T = makeReg(Src0RM->getType());
       _movp(T, Src0RM);
@@ -3523,8 +3520,7 @@
   Operand *Src1 = legalize(Icmp->getSrc(1));
   Variable *Dest = Icmp->getDest();
   InstIcmp::ICond Condition = Icmp->getCondition();
-  size_t Index = static_cast<size_t>(Condition);
-  assert(Index < Traits::TableIcmp64Size);
+  assert(Condition < Traits::TableIcmp64Size);
   Operand *Src0LoRM = nullptr;
   Operand *Src0HiRM = nullptr;
   // Legalize the portions of Src0 that are going to be needed.
@@ -3616,12 +3612,12 @@
     InstX86Label *LabelTrue = InstX86Label::create(Func, this);
     _mov(Dest, One);
     _cmp(Src0HiRM, Src1HiRI);
-    if (Traits::TableIcmp64[Index].C1 != Traits::Cond::Br_None)
-      _br(Traits::TableIcmp64[Index].C1, LabelTrue);
-    if (Traits::TableIcmp64[Index].C2 != Traits::Cond::Br_None)
-      _br(Traits::TableIcmp64[Index].C2, LabelFalse);
+    if (Traits::TableIcmp64[Condition].C1 != Traits::Cond::Br_None)
+      _br(Traits::TableIcmp64[Condition].C1, LabelTrue);
+    if (Traits::TableIcmp64[Condition].C2 != Traits::Cond::Br_None)
+      _br(Traits::TableIcmp64[Condition].C2, LabelFalse);
     _cmp(Src0LoRM, Src1LoRI);
-    _br(Traits::TableIcmp64[Index].C3, LabelTrue);
+    _br(Traits::TableIcmp64[Condition].C3, LabelTrue);
     Context.insert(LabelFalse);
     _redefined(_mov(Dest, Zero));
     Context.insert(LabelTrue);
@@ -3629,12 +3625,12 @@
   }
   if (const auto *Br = llvm::dyn_cast<InstBr>(Consumer)) {
     _cmp(Src0HiRM, Src1HiRI);
-    if (Traits::TableIcmp64[Index].C1 != Traits::Cond::Br_None)
-      _br(Traits::TableIcmp64[Index].C1, Br->getTargetTrue());
-    if (Traits::TableIcmp64[Index].C2 != Traits::Cond::Br_None)
-      _br(Traits::TableIcmp64[Index].C2, Br->getTargetFalse());
+    if (Traits::TableIcmp64[Condition].C1 != Traits::Cond::Br_None)
+      _br(Traits::TableIcmp64[Condition].C1, Br->getTargetTrue());
+    if (Traits::TableIcmp64[Condition].C2 != Traits::Cond::Br_None)
+      _br(Traits::TableIcmp64[Condition].C2, Br->getTargetFalse());
     _cmp(Src0LoRM, Src1LoRI);
-    _br(Traits::TableIcmp64[Index].C3, Br->getTargetTrue(),
+    _br(Traits::TableIcmp64[Condition].C3, Br->getTargetTrue(),
         Br->getTargetFalse());
     return;
   }
@@ -3646,12 +3642,12 @@
     InstX86Label *LabelTrue = InstX86Label::create(Func, this);
     lowerMove(SelectDest, SrcT, false);
     _cmp(Src0HiRM, Src1HiRI);
-    if (Traits::TableIcmp64[Index].C1 != Traits::Cond::Br_None)
-      _br(Traits::TableIcmp64[Index].C1, LabelTrue);
-    if (Traits::TableIcmp64[Index].C2 != Traits::Cond::Br_None)
-      _br(Traits::TableIcmp64[Index].C2, LabelFalse);
+    if (Traits::TableIcmp64[Condition].C1 != Traits::Cond::Br_None)
+      _br(Traits::TableIcmp64[Condition].C1, LabelTrue);
+    if (Traits::TableIcmp64[Condition].C2 != Traits::Cond::Br_None)
+      _br(Traits::TableIcmp64[Condition].C2, LabelFalse);
     _cmp(Src0LoRM, Src1LoRI);
-    _br(Traits::TableIcmp64[Index].C3, LabelTrue);
+    _br(Traits::TableIcmp64[Condition].C3, LabelTrue);
     Context.insert(LabelFalse);
     static constexpr bool IsRedefinition = true;
     lowerMove(SelectDest, SrcF, IsRedefinition);
@@ -3757,8 +3753,7 @@
     const InstInsertElement *Instr) {
   Operand *SourceVectNotLegalized = Instr->getSrc(0);
   Operand *ElementToInsertNotLegalized = Instr->getSrc(1);
-  ConstantInteger32 *ElementIndex =
-      llvm::dyn_cast<ConstantInteger32>(Instr->getSrc(2));
+  auto *ElementIndex = llvm::dyn_cast<ConstantInteger32>(Instr->getSrc(2));
   // Only constant indices are allowed in PNaCl IR.
   assert(ElementIndex);
   unsigned Index = ElementIndex->getValue();
@@ -5576,11 +5571,11 @@
       break;
     case BoolFolding<Traits>::PK_Icmp32:
     case BoolFolding<Traits>::PK_Icmp64: {
-      lowerIcmpAndConsumer(llvm::dyn_cast<InstIcmp>(Producer), Select);
+      lowerIcmpAndConsumer(llvm::cast<InstIcmp>(Producer), Select);
       return;
     }
     case BoolFolding<Traits>::PK_Fcmp: {
-      lowerFcmpAndConsumer(llvm::dyn_cast<InstFcmp>(Producer), Select);
+      lowerFcmpAndConsumer(llvm::cast<InstFcmp>(Producer), Select);
       return;
     }
     }
@@ -5624,10 +5619,10 @@
     SrcT = legalizeUndef(SrcT);
     SrcF = legalizeUndef(SrcF);
     // Set the low portion.
-    Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
+    auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
     lowerSelectIntMove(DestLo, Cond, loOperand(SrcT), loOperand(SrcF));
     // Set the high portion.
-    Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
+    auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
     lowerSelectIntMove(DestHi, Cond, hiOperand(SrcT), hiOperand(SrcF));
     return;
   }
@@ -5658,8 +5653,8 @@
     Src = legalize(Src);
     Operand *SrcLo = loOperand(Src);
     Operand *SrcHi = hiOperand(Src);
-    Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
-    Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
+    auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
+    auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
     Variable *T_Lo = nullptr, *T_Hi = nullptr;
     _mov(T_Lo, SrcLo);
     _redefined(_mov(DestLo, T_Lo), IsRedefinition);
@@ -6151,8 +6146,8 @@
     Src = legalizeUndef(Src);
     Operand *SrcLo = legalize(loOperand(Src), Legal_Reg | Legal_Imm);
     Operand *SrcHi = legalize(hiOperand(Src), Legal_Reg | Legal_Imm);
-    X86OperandMem *AddrLo = llvm::cast<X86OperandMem>(loOperand(Addr));
-    X86OperandMem *AddrHi = llvm::cast<X86OperandMem>(hiOperand(Addr));
+    auto *AddrLo = llvm::cast<X86OperandMem>(loOperand(Addr));
+    auto *AddrHi = llvm::cast<X86OperandMem>(hiOperand(Addr));
     switch (RMW->getOp()) {
     default:
       // TODO(stichnot): Implement other arithmetic operators.
diff --git a/src/IceTypes.cpp b/src/IceTypes.cpp
index 39eff2d..d1f270e 100644
--- a/src/IceTypes.cpp
+++ b/src/IceTypes.cpp
@@ -133,9 +133,8 @@
 } // end anonymous namespace
 
 const char *targetArchString(const TargetArch Arch) {
-  size_t Index = static_cast<size_t>(Arch);
-  if (Index < TargetArch_NUM)
-    return TargetArchName[Index];
+  if (Arch < TargetArch_NUM)
+    return TargetArchName[Arch];
   llvm_unreachable("Invalid target arch for targetArchString");
   return "???";
 }
@@ -146,121 +145,106 @@
 }
 
 int8_t typeWidthInBytesLog2(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypeAttributes[Index].TypeWidthInBytesLog2;
+  if (Ty < IceType_NUM)
+    return TypeAttributes[Ty].TypeWidthInBytesLog2;
   llvm_unreachable("Invalid type for typeWidthInBytesLog2()");
   return 0;
 }
 
 size_t typeAlignInBytes(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypeAttributes[Index].TypeAlignInBytes;
+  if (Ty < IceType_NUM)
+    return TypeAttributes[Ty].TypeAlignInBytes;
   llvm_unreachable("Invalid type for typeAlignInBytes()");
   return 1;
 }
 
 size_t typeNumElements(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypeAttributes[Index].TypeNumElements;
+  if (Ty < IceType_NUM)
+    return TypeAttributes[Ty].TypeNumElements;
   llvm_unreachable("Invalid type for typeNumElements()");
   return 1;
 }
 
 Type typeElementType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypeAttributes[Index].TypeElementType;
+  if (Ty < IceType_NUM)
+    return TypeAttributes[Ty].TypeElementType;
   llvm_unreachable("Invalid type for typeElementType()");
   return IceType_void;
 }
 
 bool isVectorType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsVectorType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsVectorType;
   llvm_unreachable("Invalid type for isVectorType()");
   return false;
 }
 
 bool isIntegerType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsIntegerType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsIntegerType;
   llvm_unreachable("Invalid type for isIntegerType()");
   return false;
 }
 
 bool isScalarIntegerType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsScalarIntegerType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsScalarIntegerType;
   llvm_unreachable("Invalid type for isScalIntegerType()");
   return false;
 }
 
 bool isVectorIntegerType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsVectorIntegerType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsVectorIntegerType;
   llvm_unreachable("Invalid type for isVectorIntegerType()");
   return false;
 }
 
 bool isIntegerArithmeticType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsIntegerArithmeticType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsIntegerArithmeticType;
   llvm_unreachable("Invalid type for isIntegerArithmeticType()");
   return false;
 }
 
 bool isFloatingType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsFloatingType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsFloatingType;
   llvm_unreachable("Invalid type for isFloatingType()");
   return false;
 }
 
 bool isScalarFloatingType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsScalarFloatingType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsScalarFloatingType;
   llvm_unreachable("Invalid type for isScalarFloatingType()");
   return false;
 }
 
 bool isVectorFloatingType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsVectorFloatingType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsVectorFloatingType;
   llvm_unreachable("Invalid type for isVectorFloatingType()");
   return false;
 }
 
 bool isLoadStoreType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsLoadStoreType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsLoadStoreType;
   llvm_unreachable("Invalid type for isLoadStoreType()");
   return false;
 }
 
 bool isCallParameterType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].TypeIsCallParameterType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].TypeIsCallParameterType;
   llvm_unreachable("Invalid type for isCallParameterType()");
   return false;
 }
 
 Type getCompareResultType(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypePropertiesTable[Index].CompareResultType;
+  if (Ty < IceType_NUM)
+    return TypePropertiesTable[Ty].CompareResultType;
   llvm_unreachable("Invalid type for getCompareResultType");
   return IceType_void;
 }
@@ -275,9 +259,8 @@
 // ======================== Dump routines ======================== //
 
 const char *typeString(Type Ty) {
-  size_t Index = static_cast<size_t>(Ty);
-  if (Index < IceType_NUM)
-    return TypeAttributes[Index].DisplayString;
+  if (Ty < IceType_NUM)
+    return TypeAttributes[Ty].DisplayString;
   llvm_unreachable("Invalid type for typeString");
   return "???";
 }
diff --git a/src/PNaClTranslator.cpp b/src/PNaClTranslator.cpp
index ed2a907..ed19f61 100644
--- a/src/PNaClTranslator.cpp
+++ b/src/PNaClTranslator.cpp
@@ -2737,7 +2737,7 @@
       appendErrorInstruction(ReturnType);
       return;
     }
-    bool IsTailCall = static_cast<bool>(CCInfo & 1);
+    const bool IsTailCall = (CCInfo & 1);
 
     // Create the call instruction.
     Ice::Variable *Dest = (ReturnType == Ice::IceType_void)