Specialize Subzero X86 backends for 32- or 64-bit

The Is64Bit trait is propagated, and where possible dead code has been
eliminated.

Most notably, x86-64 doesn't require splitting 64-bit variables and
operations into two 32-bit ones.

Bug: b/192890685
Change-Id: I2276d12f838e49f63f2c1899062ef1137d688684
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/55529
Presubmit-Ready: Nicolas Capens <nicolascapens@google.com>
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Tested-by: Nicolas Capens <nicolascapens@google.com>
Reviewed-by: Sean Risser <srisser@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
diff --git a/third_party/subzero/src/IceAssemblerX8632.cpp b/third_party/subzero/src/IceAssemblerX8632.cpp
index edd155d..492a48b 100644
--- a/third_party/subzero/src/IceAssemblerX8632.cpp
+++ b/third_party/subzero/src/IceAssemblerX8632.cpp
@@ -260,13 +260,6 @@
 }
 
 void AssemblerX8632::movzx(Type SrcTy, GPRRegister dst, GPRRegister src) {
-  if (Traits::Is64Bit && SrcTy == IceType_i32) {
-    // 32-bit mov clears the upper 32 bits, hence zero-extending the 32-bit
-    // operand to 64-bit.
-    mov(IceType_i32, dst, src);
-    return;
-  }
-
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
   bool ByteSized = isByteSizedType(SrcTy);
   assert(ByteSized || SrcTy == IceType_i16);
@@ -276,13 +269,6 @@
 }
 
 void AssemblerX8632::movzx(Type SrcTy, GPRRegister dst, const Address &src) {
-  if (Traits::Is64Bit && SrcTy == IceType_i32) {
-    // 32-bit mov clears the upper 32 bits, hence zero-extending the 32-bit
-    // operand to 64-bit.
-    mov(IceType_i32, dst, src);
-    return;
-  }
-
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
   bool ByteSized = isByteSizedType(SrcTy);
   assert(ByteSized || SrcTy == IceType_i16);
@@ -294,33 +280,24 @@
 void AssemblerX8632::movsx(Type SrcTy, GPRRegister dst, GPRRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
   bool ByteSized = isByteSizedType(SrcTy);
-  if (ByteSized || SrcTy == IceType_i16) {
-    emitUint8(0x0F);
-    emitUint8(ByteSized ? 0xBE : 0xBF);
-  } else {
-    assert(Traits::Is64Bit && SrcTy == IceType_i32);
-    emitUint8(0x63);
-  }
+  assert(ByteSized || SrcTy == IceType_i16);
+  emitUint8(0x0F);
+  emitUint8(ByteSized ? 0xBE : 0xBF);
   emitRegisterOperand(gprEncoding(dst), gprEncoding(src));
 }
 
 void AssemblerX8632::movsx(Type SrcTy, GPRRegister dst, const Address &src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
   bool ByteSized = isByteSizedType(SrcTy);
-  if (ByteSized || SrcTy == IceType_i16) {
-    emitUint8(0x0F);
-    emitUint8(ByteSized ? 0xBE : 0xBF);
-  } else {
-    assert(Traits::Is64Bit && SrcTy == IceType_i32);
-    emitUint8(0x63);
-  }
+  assert(ByteSized || SrcTy == IceType_i16);
+  emitUint8(0x0F);
+  emitUint8(ByteSized ? 0xBE : 0xBF);
   emitOperand(gprEncoding(dst), src);
 }
 
 void AssemblerX8632::lea(Type Ty, GPRRegister dst, const Address &src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitUint8(0x8D);
@@ -333,7 +310,7 @@
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   else
-    assert(Ty == IceType_i32 || (Traits::Is64Bit && Ty == IceType_i64));
+    assert(Ty == IceType_i32);
   emitUint8(0x0F);
   emitUint8(0x40 + cond);
   emitRegisterOperand(gprEncoding(dst), gprEncoding(src));
@@ -345,7 +322,7 @@
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   else
-    assert(Ty == IceType_i32 || (Traits::Is64Bit && Ty == IceType_i64));
+    assert(Ty == IceType_i32);
   emitUint8(0x0F);
   emitUint8(0x40 + cond);
   emitOperand(gprEncoding(dst), src);
@@ -2289,12 +2266,6 @@
   emitUint8(0x99);
 }
 
-template <typename T>
-typename std::enable_if<T::Is64Bit, void>::type AssemblerX8632::cqo() {
-  AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  emitUint8(0x99);
-}
-
 void AssemblerX8632::div(Type Ty, GPRRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
   if (Ty == IceType_i16)
@@ -2341,8 +2312,7 @@
 
 void AssemblerX8632::imul(Type Ty, GPRRegister dst, GPRRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitUint8(0x0F);
@@ -2352,8 +2322,7 @@
 
 void AssemblerX8632::imul(Type Ty, GPRRegister reg, const Address &address) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitUint8(0x0F);
@@ -2457,7 +2426,7 @@
   emitOperand(4, address);
 }
 
-template <typename, typename> void AssemblerX8632::incl(GPRRegister reg) {
+void AssemblerX8632::incl(GPRRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
   emitUint8(0x40 + reg);
 }
@@ -2468,7 +2437,7 @@
   emitOperand(0, address);
 }
 
-template <typename, typename> void AssemblerX8632::decl(GPRRegister reg) {
+void AssemblerX8632::decl(GPRRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
   emitUint8(0x48 + reg);
 }
@@ -2623,15 +2592,14 @@
 
 void AssemblerX8632::bswap(Type Ty, GPRRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i32 || (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i32);
   emitUint8(0x0F);
   emitUint8(0xC8 | gprEncoding(reg));
 }
 
 void AssemblerX8632::bsf(Type Ty, GPRRegister dst, GPRRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitUint8(0x0F);
@@ -2641,8 +2609,7 @@
 
 void AssemblerX8632::bsf(Type Ty, GPRRegister dst, const Address &src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitUint8(0x0F);
@@ -2652,8 +2619,7 @@
 
 void AssemblerX8632::bsr(Type Ty, GPRRegister dst, GPRRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitUint8(0x0F);
@@ -2663,8 +2629,7 @@
 
 void AssemblerX8632::bsr(Type Ty, GPRRegister dst, const Address &src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitUint8(0x0F);
diff --git a/third_party/subzero/src/IceAssemblerX8632.h b/third_party/subzero/src/IceAssemblerX8632.h
index 81a3c89..1f78bf9 100644
--- a/third_party/subzero/src/IceAssemblerX8632.h
+++ b/third_party/subzero/src/IceAssemblerX8632.h
@@ -577,12 +577,6 @@
   void cbw();
   void cwd();
   void cdq();
-  template <typename T = Traits>
-  typename std::enable_if<T::Is64Bit, void>::type cqo();
-  template <typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, void>::type cqo() {
-    llvm::report_fatal_error("CQO is only available in 64-bit x86 backends.");
-  }
 
   void div(Type Ty, GPRRegister reg);
   void div(Type Ty, const Address &address);
@@ -604,13 +598,9 @@
   void mul(Type Ty, GPRRegister reg);
   void mul(Type Ty, const Address &address);
 
-  template <class T = Traits,
-            typename = typename std::enable_if<!T::Is64Bit>::type>
   void incl(GPRRegister reg);
   void incl(const Address &address);
 
-  template <class T = Traits,
-            typename = typename std::enable_if<!T::Is64Bit>::type>
   void decl(GPRRegister reg);
   void decl(const Address &address);
 
@@ -749,18 +739,9 @@
   template <uint32_t Tag>
   void arith_int(Type Ty, const Address &address, const Immediate &imm);
 
-  // gprEncoding returns Reg encoding for operand emission. For x86-64 we mask
-  // out the 4th bit as it is encoded in the REX.[RXB] bits. No other bits are
-  // touched because we don't want to mask errors.
+  // gprEncoding returns Reg encoding for operand emission.
   template <typename RegType, typename T = Traits>
-  typename std::enable_if<T::Is64Bit, typename T::GPRRegister>::type
-  gprEncoding(const RegType Reg) {
-    return static_cast<GPRRegister>(static_cast<uint8_t>(Reg) & ~0x08);
-  }
-
-  template <typename RegType, typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, typename T::GPRRegister>::type
-  gprEncoding(const RegType Reg) {
+  typename T::GPRRegister gprEncoding(const RegType Reg) {
     return static_cast<typename T::GPRRegister>(Reg);
   }
 };
diff --git a/third_party/subzero/src/IceAssemblerX8664.cpp b/third_party/subzero/src/IceAssemblerX8664.cpp
index f493a45..4182654 100644
--- a/third_party/subzero/src/IceAssemblerX8664.cpp
+++ b/third_party/subzero/src/IceAssemblerX8664.cpp
@@ -276,7 +276,7 @@
 }
 
 void AssemblerX8664::movzx(Type SrcTy, GPRRegister dst, GPRRegister src) {
-  if (Traits::Is64Bit && SrcTy == IceType_i32) {
+  if (SrcTy == IceType_i32) {
     // 32-bit mov clears the upper 32 bits, hence zero-extending the 32-bit
     // operand to 64-bit.
     mov(IceType_i32, dst, src);
@@ -293,7 +293,7 @@
 }
 
 void AssemblerX8664::movzx(Type SrcTy, GPRRegister dst, const Address &src) {
-  if (Traits::Is64Bit && SrcTy == IceType_i32) {
+  if (SrcTy == IceType_i32) {
     // 32-bit mov clears the upper 32 bits, hence zero-extending the 32-bit
     // operand to 64-bit.
     mov(IceType_i32, dst, src);
@@ -317,7 +317,7 @@
     emitUint8(0x0F);
     emitUint8(ByteSized ? 0xBE : 0xBF);
   } else {
-    assert(Traits::Is64Bit && SrcTy == IceType_i32);
+    assert(SrcTy == IceType_i32);
     emitUint8(0x63);
   }
   emitRegisterOperand(gprEncoding(dst), gprEncoding(src));
@@ -331,7 +331,7 @@
     emitUint8(0x0F);
     emitUint8(ByteSized ? 0xBE : 0xBF);
   } else {
-    assert(Traits::Is64Bit && SrcTy == IceType_i32);
+    assert(SrcTy == IceType_i32);
     emitUint8(0x63);
   }
   emitOperand(gprEncoding(dst), src);
@@ -339,8 +339,7 @@
 
 void AssemblerX8664::lea(Type Ty, GPRRegister dst, const Address &src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32 || Ty == IceType_i64);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitRex(Ty, src, dst);
@@ -354,7 +353,7 @@
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   else
-    assert(Ty == IceType_i32 || (Traits::Is64Bit && Ty == IceType_i64));
+    assert(Ty == IceType_i32 || Ty == IceType_i64);
   emitRexRB(Ty, dst, src);
   emitUint8(0x0F);
   emitUint8(0x40 + cond);
@@ -367,7 +366,7 @@
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   else
-    assert(Ty == IceType_i32 || (Traits::Is64Bit && Ty == IceType_i64));
+    assert(Ty == IceType_i32 || Ty == IceType_i64);
   emitRex(Ty, src, dst);
   emitUint8(0x0F);
   emitUint8(0x40 + cond);
@@ -2466,8 +2465,7 @@
 
 void AssemblerX8664::imul(Type Ty, GPRRegister dst, GPRRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32 || Ty == IceType_i64);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitRexRB(Ty, dst, src);
@@ -2478,8 +2476,7 @@
 
 void AssemblerX8664::imul(Type Ty, GPRRegister reg, const Address &address) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32 || Ty == IceType_i64);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitRex(Ty, address, reg);
@@ -2758,7 +2755,7 @@
 
 void AssemblerX8664::bswap(Type Ty, GPRRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i32 || (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i32 || Ty == IceType_i64);
   emitRexB(Ty, reg);
   emitUint8(0x0F);
   emitUint8(0xC8 | gprEncoding(reg));
@@ -2766,8 +2763,7 @@
 
 void AssemblerX8664::bsf(Type Ty, GPRRegister dst, GPRRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32 || Ty == IceType_i64);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitRexRB(Ty, dst, src);
@@ -2778,8 +2774,7 @@
 
 void AssemblerX8664::bsf(Type Ty, GPRRegister dst, const Address &src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32 || Ty == IceType_i64);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitRex(Ty, src, dst);
@@ -2790,8 +2785,7 @@
 
 void AssemblerX8664::bsr(Type Ty, GPRRegister dst, GPRRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32 || Ty == IceType_i64);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitRexRB(Ty, dst, src);
@@ -2802,8 +2796,7 @@
 
 void AssemblerX8664::bsr(Type Ty, GPRRegister dst, const Address &src) {
   AssemblerBuffer::EnsureCapacity ensured(&Buffer);
-  assert(Ty == IceType_i16 || Ty == IceType_i32 ||
-         (Traits::Is64Bit && Ty == IceType_i64));
+  assert(Ty == IceType_i16 || Ty == IceType_i32 || Ty == IceType_i64);
   if (Ty == IceType_i16)
     emitOperandSizeOverride();
   emitRex(Ty, src, dst);
diff --git a/third_party/subzero/src/IceAssemblerX8664.h b/third_party/subzero/src/IceAssemblerX8664.h
index 5d1219d..1bc0d17 100644
--- a/third_party/subzero/src/IceAssemblerX8664.h
+++ b/third_party/subzero/src/IceAssemblerX8664.h
@@ -718,18 +718,10 @@
   // gprEncoding returns Reg encoding for operand emission. For x86-64 we mask
   // out the 4th bit as it is encoded in the REX.[RXB] bits. No other bits are
   // touched because we don't want to mask errors.
-  template <typename RegType, typename T = Traits>
-  typename std::enable_if<T::Is64Bit, typename T::GPRRegister>::type
-  gprEncoding(const RegType Reg) {
+  template <typename RegType> GPRRegister gprEncoding(const RegType Reg) {
     return static_cast<GPRRegister>(static_cast<uint8_t>(Reg) & ~0x08);
   }
 
-  template <typename RegType, typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, typename T::GPRRegister>::type
-  gprEncoding(const RegType Reg) {
-    return static_cast<typename T::GPRRegister>(Reg);
-  }
-
   template <typename RegType>
   bool is8BitRegisterRequiringRex(const Type Ty, const RegType Reg) {
     static constexpr bool IsGPR =
@@ -755,36 +747,30 @@
   // mov[sz]x instructions.) If Addr is not nullptr, then Rm is ignored, and
   // Rex.B is determined by Addr instead. TyRm is still used to determine
   // Addr's size.
-  template <typename RegType, typename RmType, typename T = Traits>
-  typename std::enable_if<T::Is64Bit, void>::type
-  assembleAndEmitRex(const Type TyReg, const RegType Reg, const Type TyRm,
-                     const RmType Rm,
-                     const typename T::Address *Addr = nullptr) {
+  template <typename RegType, typename RmType>
+  void assembleAndEmitRex(const Type TyReg, const RegType Reg, const Type TyRm,
+                          const RmType Rm,
+                          const Address *Addr = nullptr) {
     const uint8_t W = (TyReg == IceType_i64 || TyRm == IceType_i64)
-                          ? T::Operand::RexW
-                          : T::Operand::RexNone;
-    const uint8_t R = (Reg & 0x08) ? T::Operand::RexR : T::Operand::RexNone;
+                          ? Operand::RexW
+                          : Operand::RexNone;
+    const uint8_t R = (Reg & 0x08) ? Operand::RexR : Operand::RexNone;
     const uint8_t X = (Addr != nullptr)
-                          ? (typename T::Operand::RexBits)Addr->rexX()
-                          : T::Operand::RexNone;
+                          ? (typename Operand::RexBits)Addr->rexX()
+                          : Operand::RexNone;
     const uint8_t B = (Addr != nullptr)
-                          ? (typename T::Operand::RexBits)Addr->rexB()
-                      : (Rm & 0x08) ? T::Operand::RexB
-                                    : T::Operand::RexNone;
+                          ? (typename Operand::RexBits)Addr->rexB()
+                      : (Rm & 0x08) ? Operand::RexB
+                                    : Operand::RexNone;
     const uint8_t Prefix = W | R | X | B;
-    if (Prefix != T::Operand::RexNone) {
+    if (Prefix != Operand::RexNone) {
       emitUint8(Prefix);
     } else if (is8BitRegisterRequiringRex(TyReg, Reg) ||
                (Addr == nullptr && is8BitRegisterRequiringRex(TyRm, Rm))) {
-      emitUint8(T::Operand::RexBase);
+      emitUint8(Operand::RexBase);
     }
   }
 
-  template <typename RegType, typename RmType, typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, void>::type
-  assembleAndEmitRex(const Type, const RegType, const Type, const RmType,
-                     const typename T::Address * = nullptr) {}
-
   // emitRexRB is used for emitting a Rex prefix instructions with two
   // explicit register operands in its mod-rm byte.
   template <typename RegType, typename RmType>
diff --git a/third_party/subzero/src/IceInstX8632.cpp b/third_party/subzero/src/IceInstX8632.cpp
index 3b8fb22..5c88269 100644
--- a/third_party/subzero/src/IceInstX8632.cpp
+++ b/third_party/subzero/src/IceInstX8632.cpp
@@ -453,15 +453,6 @@
   Ostream &Str = Func->getContext()->getStrEmit();
   assert(this->getSrcSize() == 1);
   const Operand *Src = this->getSrc(0);
-  if (Traits::Is64Bit) {
-    if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src)) {
-      Str << "\t"
-             "jmp"
-             "\t"
-          << CR->getName();
-      return;
-    }
-  }
   Str << "\t"
          "jmp"
          "\t*";
@@ -630,10 +621,6 @@
                               Mem->toAsmAddress(Asm, Target, IsLea));
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
     (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
-  } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger64>(Src)) {
-    assert(Traits::Is64Bit);
-    assert(Utils::IsInt(32, Imm->getValue()));
-    (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
   } else if (const auto *Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) {
     const auto FixupKind = (Reloc->getName().hasStdString() &&
                             Reloc->getName().toString() == GlobalOffsetTable)
@@ -658,10 +645,6 @@
     (Asm->*(Emitter.AddrGPR))(Ty, Addr, SrcReg);
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
     (Asm->*(Emitter.AddrImm))(Ty, Addr, AssemblerImmediate(Imm->getValue()));
-  } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger64>(Src)) {
-    assert(Traits::Is64Bit);
-    assert(Utils::IsInt(32, Imm->getValue()));
-    (Asm->*(Emitter.AddrImm))(Ty, Addr, AssemblerImmediate(Imm->getValue()));
   } else if (const auto *Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) {
     const auto FixupKind = (Reloc->getName().hasStdString() &&
                             Reloc->getName().toString() == GlobalOffsetTable)
@@ -708,10 +691,6 @@
     (Asm->*(Emitter.GPRGPR))(Ty, VarReg, SrcReg);
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
     (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
-  } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger64>(Src)) {
-    assert(Traits::Is64Bit);
-    assert(Utils::IsInt(32, Imm->getValue()));
-    (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
   } else {
     llvm_unreachable("Unexpected operand type");
   }
@@ -919,11 +898,7 @@
   const Type SrcTy = Src->getType();
   assert(isVectorType(SrcTy));
   assert(isScalarIntegerType(DestTy));
-  if (Traits::Is64Bit) {
-    assert(DestTy == IceType_i32 || DestTy == IceType_i64);
-  } else {
-    assert(typeWidthInBytes(DestTy) <= 4);
-  }
+  assert(typeWidthInBytes(DestTy) <= 4);
   XmmRegister SrcReg = Traits::getEncodedXmm(Src->getRegNum());
   GPRRegister DestReg = Traits::getEncodedGPR(Dest->getRegNum());
   Asm->movmsk(SrcTy, DestReg, SrcReg);
@@ -1156,13 +1131,6 @@
     Str << "\t"
            "cltd";
     break;
-  case IceType_i64:
-    assert(Traits::Is64Bit);
-    assert(SrcReg == Traits::getRaxOrDie());
-    assert(DestReg == Traits::getRdxOrDie());
-    Str << "\t"
-           "cqo";
-    break;
   }
 }
 
@@ -1193,12 +1161,6 @@
     assert(DestReg == RegisterSet::Reg_edx);
     Asm->cdq();
     break;
-  case IceType_i64:
-    assert(Traits::Is64Bit);
-    assert(SrcReg == Traits::getRaxOrDie());
-    assert(DestReg == Traits::getRdxOrDie());
-    Asm->cqo();
-    break;
   }
 }
 
@@ -1335,7 +1297,7 @@
   assert(this->getSrcSize() == 2);
   Operand *Src = this->getSrc(1);
   Type SrcTy = Src->getType();
-  assert(SrcTy == IceType_i16 || SrcTy == IceType_i32 || (Traits::Is64Bit));
+  assert(SrcTy == IceType_i16 || SrcTy == IceType_i32);
   Assembler *Asm = Func->getAssembler<Assembler>();
   auto *Target = InstX86Base::getTarget(Func);
   if (const auto *SrcVar = llvm::dyn_cast<Variable>(Src)) {
@@ -1520,11 +1482,7 @@
   switch (Variant) {
   case Si2ss: {
     assert(isScalarIntegerType(SrcTy));
-    if (!Traits::Is64Bit) {
-      assert(typeWidthInBytes(SrcTy) <= 4);
-    } else {
-      assert(SrcTy == IceType_i32 || SrcTy == IceType_i64);
-    }
+    assert(typeWidthInBytes(SrcTy) <= 4);
     assert(isScalarFloatingType(DestTy));
     static const CastEmitterRegOp<XmmRegister, GPRRegister> Emitter = {
         &Assembler::cvtsi2ss, &Assembler::cvtsi2ss};
@@ -1536,11 +1494,7 @@
   case Tss2si: {
     assert(isScalarFloatingType(SrcTy));
     assert(isScalarIntegerType(DestTy));
-    if (Traits::Is64Bit) {
-      assert(DestTy == IceType_i32 || DestTy == IceType_i64);
-    } else {
-      assert(typeWidthInBytes(DestTy) <= 4);
-    }
+    assert(typeWidthInBytes(DestTy) <= 4);
     static const CastEmitterRegOp<GPRRegister, XmmRegister> Emitter = {
         &Assembler::cvttss2si, &Assembler::cvttss2si};
     emitIASCastRegOp<GPRRegister, XmmRegister, Traits::getEncodedGPR,
@@ -1551,11 +1505,7 @@
   case Ss2si: {
     assert(isScalarFloatingType(SrcTy));
     assert(isScalarIntegerType(DestTy));
-    if (Traits::Is64Bit) {
-      assert(DestTy == IceType_i32 || DestTy == IceType_i64);
-    } else {
-      assert(typeWidthInBytes(DestTy) <= 4);
-    }
+    assert(typeWidthInBytes(DestTy) <= 4);
     static const CastEmitterRegOp<GPRRegister, XmmRegister> Emitter = {
         &Assembler::cvtss2si, &Assembler::cvtss2si};
     emitIASCastRegOp<GPRRegister, XmmRegister, Traits::getEncodedGPR,
@@ -2047,20 +1997,12 @@
   Operand *Src = this->getSrc(0);
   Type SrcTy = Src->getType();
   Type DestTy = this->getDest()->getType();
-  if (Traits::Is64Bit && DestTy == IceType_i64 &&
-      llvm::isa<ConstantInteger64>(Src) &&
-      !Utils::IsInt(32, llvm::cast<ConstantInteger64>(Src)->getValue())) {
-    Str << "\t"
-           "movabs"
-           "\t";
-  } else {
-    Str << "\t"
-           "mov"
-        << (!isScalarFloatingType(DestTy)
-                ? this->getWidthString(DestTy)
-                : Traits::TypeAttributes[DestTy].SdSsString)
-        << "\t";
-  }
+  Str << "\t"
+         "mov"
+      << (!isScalarFloatingType(DestTy)
+              ? this->getWidthString(DestTy)
+              : Traits::TypeAttributes[DestTy].SdSsString)
+      << "\t";
   // For an integer truncation operation, src is wider than dest. In this case,
   // we use a mov instruction whose data width matches the narrower dest.
   // TODO: This assert disallows usages such as copying a floating
@@ -2180,8 +2122,7 @@
   // For insert/extract element (one of Src/Dest is an Xmm vector and the other
   // is an int type).
   if (const auto *SrcVar = llvm::dyn_cast<Variable>(this->getSrc(0))) {
-    if (SrcVar->getType() == IceType_i32 ||
-        (Traits::Is64Bit && SrcVar->getType() == IceType_i64)) {
+    if (SrcVar->getType() == IceType_i32) {
       assert(isVectorType(Dest->getType()) ||
              (isScalarFloatingType(Dest->getType()) &&
               typeWidthInBytes(SrcVar->getType()) ==
@@ -2201,8 +2142,7 @@
               typeWidthInBytes(SrcVar->getType()) ==
                   typeWidthInBytes(Dest->getType())));
       assert(SrcVar->hasReg());
-      assert(Dest->getType() == IceType_i32 ||
-             (Traits::Is64Bit && Dest->getType() == IceType_i64));
+      assert(Dest->getType() == IceType_i32);
       XmmRegister SrcReg = Traits::getEncodedXmm(SrcVar->getRegNum());
       if (Dest->hasReg()) {
         Asm->movd(Dest->getType(), Traits::getEncodedGPR(Dest->getRegNum()),
@@ -2299,56 +2239,9 @@
   emitIASRegOpTyGPR<false, true>(Func, NotLea, SrcTy, Dest, Src, this->Emitter);
 }
 
-bool InstX86Movzx::mayBeElided(const Variable *Dest,
-                               const Operand *SrcOpnd) const {
-  assert(Traits::Is64Bit);
-  const auto *Src = llvm::dyn_cast<Variable>(SrcOpnd);
-
-  // Src is not a Variable, so it does not have a register. Movzx can't be
-  // elided.
-  if (Src == nullptr)
-    return false;
-
-  // Movzx to/from memory can't be elided.
-  if (!Src->hasReg() || !Dest->hasReg())
-    return false;
-
-  // Reg/reg move with different source and dest can't be elided.
-  if (Traits::getEncodedGPR(Src->getRegNum()) !=
-      Traits::getEncodedGPR(Dest->getRegNum()))
-    return false;
-
-  // A must-keep movzx 32- to 64-bit is sometimes needed in x86-64 sandboxing.
-  return !MustKeep;
-}
-
 void InstX86Movzx::emit(const Cfg *Func) const {
   if (!BuildDefs::dump())
     return;
-  if (Traits::Is64Bit) {
-    // There's no movzx %eXX, %rXX. To zero extend 32- to 64-bits, we emit a
-    // mov %eXX, %eXX. The processor will still do a movzx[bw]q.
-    assert(this->getSrcSize() == 1);
-    const Operand *Src = this->getSrc(0);
-    const Variable *Dest = this->Dest;
-    if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64) {
-      Ostream &Str = Func->getContext()->getStrEmit();
-      if (mayBeElided(Dest, Src)) {
-        Str << "\t/* elided movzx */";
-      } else {
-        Str << "\t"
-               "mov"
-               "\t";
-        Src->emit(Func);
-        Str << ", ";
-        Dest->asType(Func, IceType_i32,
-                     Traits::getGprForType(IceType_i32, Dest->getRegNum()))
-            ->emit(Func);
-        Str << " /* movzx */";
-      }
-      return;
-    }
-  }
   InstX86BaseUnaryopGPR<InstX86Base::Movzx>::emit(Func);
 }
 
@@ -2359,12 +2252,6 @@
   Type SrcTy = Src->getType();
   assert(typeWidthInBytes(Dest->getType()) > 1);
   assert(typeWidthInBytes(Dest->getType()) > typeWidthInBytes(SrcTy));
-  if (Traits::Is64Bit) {
-    if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64 &&
-        mayBeElided(Dest, Src)) {
-      return;
-    }
-  }
   constexpr bool NotLea = false;
   emitIASRegOpTyGPR<false, true>(Func, NotLea, SrcTy, Dest, Src, this->Emitter);
 }
diff --git a/third_party/subzero/src/IceInstX8632.h b/third_party/subzero/src/IceInstX8632.h
index f0117c9..96e13cd 100644
--- a/third_party/subzero/src/IceInstX8632.h
+++ b/third_party/subzero/src/IceInstX8632.h
@@ -28,7 +28,6 @@
 #include "IceTargetLoweringX86.h"
 
 namespace Ice {
-
 namespace X8632 {
 
 using Traits = TargetX8632Traits;
@@ -1183,8 +1182,6 @@
 
   InstX86Movzx(Cfg *Func, Variable *Dest, Operand *Src)
       : InstX86BaseUnaryopGPR<InstX86Base::Movzx>(Func, Dest, Src) {}
-
-  bool mayBeElided(const Variable *Dest, const Operand *Src) const;
 };
 
 class InstX86Movd : public InstX86BaseUnaryopXmm<InstX86Base::Movd> {
diff --git a/third_party/subzero/src/IceInstX8664.cpp b/third_party/subzero/src/IceInstX8664.cpp
index 36fbb25..e519dbf 100644
--- a/third_party/subzero/src/IceInstX8664.cpp
+++ b/third_party/subzero/src/IceInstX8664.cpp
@@ -446,14 +446,12 @@
   Ostream &Str = Func->getContext()->getStrEmit();
   assert(this->getSrcSize() == 1);
   const Operand *Src = this->getSrc(0);
-  if (Traits::Is64Bit) {
-    if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src)) {
-      Str << "\t"
-             "jmp"
-             "\t"
-          << CR->getName();
-      return;
-    }
+  if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src)) {
+    Str << "\t"
+           "jmp"
+           "\t"
+        << CR->getName();
+    return;
   }
   Str << "\t"
          "jmp"
@@ -624,7 +622,6 @@
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
     (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger64>(Src)) {
-    assert(Traits::Is64Bit);
     assert(Utils::IsInt(32, Imm->getValue()));
     (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
   } else if (const auto *Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) {
@@ -652,7 +649,6 @@
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
     (Asm->*(Emitter.AddrImm))(Ty, Addr, AssemblerImmediate(Imm->getValue()));
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger64>(Src)) {
-    assert(Traits::Is64Bit);
     assert(Utils::IsInt(32, Imm->getValue()));
     (Asm->*(Emitter.AddrImm))(Ty, Addr, AssemblerImmediate(Imm->getValue()));
   } else if (const auto *Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) {
@@ -702,7 +698,6 @@
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
     (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
   } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger64>(Src)) {
-    assert(Traits::Is64Bit);
     assert(Utils::IsInt(32, Imm->getValue()));
     (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
   } else {
@@ -912,11 +907,7 @@
   const Type SrcTy = Src->getType();
   assert(isVectorType(SrcTy));
   assert(isScalarIntegerType(DestTy));
-  if (Traits::Is64Bit) {
-    assert(DestTy == IceType_i32 || DestTy == IceType_i64);
-  } else {
-    assert(typeWidthInBytes(DestTy) <= 4);
-  }
+  assert(DestTy == IceType_i32 || DestTy == IceType_i64);
   XmmRegister SrcReg = Traits::getEncodedXmm(Src->getRegNum());
   GPRRegister DestReg = Traits::getEncodedGPR(Dest->getRegNum());
   Asm->movmsk(SrcTy, DestReg, SrcReg);
@@ -1150,7 +1141,6 @@
            "cltd";
     break;
   case IceType_i64:
-    assert(Traits::Is64Bit);
     assert(SrcReg == Traits::getRaxOrDie());
     assert(DestReg == Traits::getRdxOrDie());
     Str << "\t"
@@ -1187,7 +1177,6 @@
     Asm->cdq();
     break;
   case IceType_i64:
-    assert(Traits::Is64Bit);
     assert(SrcReg == Traits::getRaxOrDie());
     assert(DestReg == Traits::getRdxOrDie());
     Asm->cqo();
@@ -1328,7 +1317,6 @@
   assert(this->getSrcSize() == 2);
   Operand *Src = this->getSrc(1);
   Type SrcTy = Src->getType();
-  assert(SrcTy == IceType_i16 || SrcTy == IceType_i32 || (Traits::Is64Bit));
   Assembler *Asm = Func->getAssembler<Assembler>();
   auto *Target = InstX86Base::getTarget(Func);
   if (const auto *SrcVar = llvm::dyn_cast<Variable>(Src)) {
@@ -1513,11 +1501,7 @@
   switch (Variant) {
   case Si2ss: {
     assert(isScalarIntegerType(SrcTy));
-    if (!Traits::Is64Bit) {
-      assert(typeWidthInBytes(SrcTy) <= 4);
-    } else {
-      assert(SrcTy == IceType_i32 || SrcTy == IceType_i64);
-    }
+    assert(SrcTy == IceType_i32 || SrcTy == IceType_i64);
     assert(isScalarFloatingType(DestTy));
     static const CastEmitterRegOp<XmmRegister, GPRRegister> Emitter = {
         &Assembler::cvtsi2ss, &Assembler::cvtsi2ss};
@@ -1529,11 +1513,7 @@
   case Tss2si: {
     assert(isScalarFloatingType(SrcTy));
     assert(isScalarIntegerType(DestTy));
-    if (Traits::Is64Bit) {
-      assert(DestTy == IceType_i32 || DestTy == IceType_i64);
-    } else {
-      assert(typeWidthInBytes(DestTy) <= 4);
-    }
+    assert(DestTy == IceType_i32 || DestTy == IceType_i64);
     static const CastEmitterRegOp<GPRRegister, XmmRegister> Emitter = {
         &Assembler::cvttss2si, &Assembler::cvttss2si};
     emitIASCastRegOp<GPRRegister, XmmRegister, Traits::getEncodedGPR,
@@ -1544,11 +1524,7 @@
   case Ss2si: {
     assert(isScalarFloatingType(SrcTy));
     assert(isScalarIntegerType(DestTy));
-    if (Traits::Is64Bit) {
-      assert(DestTy == IceType_i32 || DestTy == IceType_i64);
-    } else {
-      assert(typeWidthInBytes(DestTy) <= 4);
-    }
+    assert(DestTy == IceType_i32 || DestTy == IceType_i64);
     static const CastEmitterRegOp<GPRRegister, XmmRegister> Emitter = {
         &Assembler::cvtss2si, &Assembler::cvtss2si};
     emitIASCastRegOp<GPRRegister, XmmRegister, Traits::getEncodedGPR,
@@ -2040,8 +2016,7 @@
   Operand *Src = this->getSrc(0);
   Type SrcTy = Src->getType();
   Type DestTy = this->getDest()->getType();
-  if (Traits::Is64Bit && DestTy == IceType_i64 &&
-      llvm::isa<ConstantInteger64>(Src) &&
+  if (DestTy == IceType_i64 && llvm::isa<ConstantInteger64>(Src) &&
       !Utils::IsInt(32, llvm::cast<ConstantInteger64>(Src)->getValue())) {
     Str << "\t"
            "movabs"
@@ -2108,7 +2083,7 @@
       assert(isScalarIntegerType(DestTy));
       // Widen DestTy for truncation (see above note). We should only do this
       // when both Src and Dest are integer types.
-      if (Traits::Is64Bit && DestTy == IceType_i64) {
+      if (DestTy == IceType_i64) {
         if (const auto *C64 = llvm::dyn_cast<ConstantInteger64>(Src)) {
           Func->getAssembler<Assembler>()->movabs(
               Traits::getEncodedGPR(Dest->getRegNum()), C64->getValue());
@@ -2180,8 +2155,7 @@
   // For insert/extract element (one of Src/Dest is an Xmm vector and the other
   // is an int type).
   if (const auto *SrcVar = llvm::dyn_cast<Variable>(this->getSrc(0))) {
-    if (SrcVar->getType() == IceType_i32 ||
-        (Traits::Is64Bit && SrcVar->getType() == IceType_i64)) {
+    if (SrcVar->getType() == IceType_i32 || SrcVar->getType() == IceType_i64) {
       assert(isVectorType(Dest->getType()) ||
              (isScalarFloatingType(Dest->getType()) &&
               typeWidthInBytes(SrcVar->getType()) ==
@@ -2201,8 +2175,7 @@
               typeWidthInBytes(SrcVar->getType()) ==
                   typeWidthInBytes(Dest->getType())));
       assert(SrcVar->hasReg());
-      assert(Dest->getType() == IceType_i32 ||
-             (Traits::Is64Bit && Dest->getType() == IceType_i64));
+      assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i64);
       XmmRegister SrcReg = Traits::getEncodedXmm(SrcVar->getRegNum());
       if (Dest->hasReg()) {
         Asm->movd(Dest->getType(), Traits::getEncodedGPR(Dest->getRegNum()),
@@ -2301,7 +2274,6 @@
 
 bool InstX86Movzx::mayBeElided(const Variable *Dest,
                                const Operand *SrcOpnd) const {
-  assert(Traits::Is64Bit);
   const auto *Src = llvm::dyn_cast<Variable>(SrcOpnd);
 
   // Src is not a Variable, so it does not have a register. Movzx can't be
@@ -2325,29 +2297,27 @@
 void InstX86Movzx::emit(const Cfg *Func) const {
   if (!BuildDefs::dump())
     return;
-  if (Traits::Is64Bit) {
-    // There's no movzx %eXX, %rXX. To zero extend 32- to 64-bits, we emit a
-    // mov %eXX, %eXX. The processor will still do a movzx[bw]q.
-    assert(this->getSrcSize() == 1);
-    const Operand *Src = this->getSrc(0);
-    const Variable *Dest = this->Dest;
-    if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64) {
-      Ostream &Str = Func->getContext()->getStrEmit();
-      if (mayBeElided(Dest, Src)) {
-        Str << "\t/* elided movzx */";
-      } else {
-        Str << "\t"
-               "mov"
-               "\t";
-        Src->emit(Func);
-        Str << ", ";
-        Dest->asType(Func, IceType_i32,
-                     Traits::getGprForType(IceType_i32, Dest->getRegNum()))
-            ->emit(Func);
-        Str << " /* movzx */";
-      }
-      return;
+  // There's no movzx %eXX, %rXX. To zero extend 32- to 64-bits, we emit a
+  // mov %eXX, %eXX. The processor will still do a movzx[bw]q.
+  assert(this->getSrcSize() == 1);
+  const Operand *Src = this->getSrc(0);
+  const Variable *Dest = this->Dest;
+  if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64) {
+    Ostream &Str = Func->getContext()->getStrEmit();
+    if (mayBeElided(Dest, Src)) {
+      Str << "\t/* elided movzx */";
+    } else {
+      Str << "\t"
+             "mov"
+             "\t";
+      Src->emit(Func);
+      Str << ", ";
+      Dest->asType(Func, IceType_i32,
+                   Traits::getGprForType(IceType_i32, Dest->getRegNum()))
+          ->emit(Func);
+      Str << " /* movzx */";
     }
+    return;
   }
   InstX86BaseUnaryopGPR<InstX86Base::Movzx>::emit(Func);
 }
@@ -2359,11 +2329,9 @@
   Type SrcTy = Src->getType();
   assert(typeWidthInBytes(Dest->getType()) > 1);
   assert(typeWidthInBytes(Dest->getType()) > typeWidthInBytes(SrcTy));
-  if (Traits::Is64Bit) {
-    if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64 &&
-        mayBeElided(Dest, Src)) {
-      return;
-    }
+  if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64 &&
+      mayBeElided(Dest, Src)) {
+    return;
   }
   constexpr bool NotLea = false;
   emitIASRegOpTyGPR<false, true>(Func, NotLea, SrcTy, Dest, Src, this->Emitter);
diff --git a/third_party/subzero/src/IceInstX8664.h b/third_party/subzero/src/IceInstX8664.h
index 5cbb0e9c..40094cc 100644
--- a/third_party/subzero/src/IceInstX8664.h
+++ b/third_party/subzero/src/IceInstX8664.h
@@ -2081,8 +2081,7 @@
                                  InstX86Base::SseSuffix::Integral> {
 public:
   static InstX86Pcmpgt *create(Cfg *Func, Variable *Dest, Operand *Source) {
-    assert(Dest->getType() != IceType_f64 ||
-           getInstructionSet(Func) >= SSE4_1);
+    assert(Dest->getType() != IceType_f64 || getInstructionSet(Func) >= SSE4_1);
     return new (Func->allocate<InstX86Pcmpgt>())
         InstX86Pcmpgt(Func, Dest, Source);
   }
diff --git a/third_party/subzero/src/IceTargetLoweringX8632.cpp b/third_party/subzero/src/IceTargetLoweringX8632.cpp
index 5092a70..9245df8 100644
--- a/third_party/subzero/src/IceTargetLoweringX8632.cpp
+++ b/third_party/subzero/src/IceTargetLoweringX8632.cpp
@@ -62,9 +62,7 @@
   return ::Ice::X8632::TargetX8632::shouldBePooled(C);
 }
 
-::Ice::Type getPointerType() {
-  return ::Ice::X8632::TargetX8632::getPointerType();
-}
+::Ice::Type getPointerType() { return ::Ice::Type::IceType_i32; }
 
 } // end of namespace X8632
 
@@ -141,33 +139,20 @@
 const char *PoolTypeConverter<uint8_t>::AsmTag = ".byte";
 const char *PoolTypeConverter<uint8_t>::PrintfString = "0x%x";
 
-// The Microsoft x64 ABI requires the caller to allocate a minimum 32 byte
-// "shadow store" (aka "home space") so that the callee may copy the 4
-// register args to it.
-SizeT getShadowStoreSize() {
-#if defined(_WIN64)
-  static const SizeT ShadowStoreSize =
-      Traits::Is64Bit ? 4 * typeWidthInBytes(Traits::WordType) : 0;
-  return ShadowStoreSize;
-#else
-  return 0;
-#endif
-}
-
 BoolFoldingEntry::BoolFoldingEntry(Inst *I)
     : Instr(I), IsComplex(BoolFolding::hasComplexLowering(I)) {}
 
 typename BoolFolding::BoolFoldingProducerKind
 BoolFolding::getProducerKind(const Inst *Instr) {
   if (llvm::isa<InstIcmp>(Instr)) {
-    if (Traits::Is64Bit || Instr->getSrc(0)->getType() != IceType_i64)
+    if (Instr->getSrc(0)->getType() != IceType_i64)
       return PK_Icmp32;
     return PK_Icmp64;
   }
   if (llvm::isa<InstFcmp>(Instr))
     return PK_Fcmp;
   if (auto *Arith = llvm::dyn_cast<InstArithmetic>(Instr)) {
-    if (Traits::Is64Bit || Arith->getSrc(0)->getType() != IceType_i64) {
+    if (Arith->getSrc(0)->getType() != IceType_i64) {
       switch (Arith->getOp()) {
       default:
         return PK_None;
@@ -222,7 +207,7 @@
   default:
     return false;
   case PK_Icmp64:
-    return !Traits::Is64Bit;
+    return true;
   case PK_Fcmp:
     return Traits::TableFcmp[llvm::cast<InstFcmp>(Instr)->getCondition()].C2 !=
            CondX86::Br_None;
@@ -399,12 +384,7 @@
   return false;
 }
 
-::Ice::Type TargetX8632::getPointerType() {
-  if (!Traits::Is64Bit) {
-    return ::Ice::IceType_i32;
-  }
-  return ::Ice::IceType_i64;
-}
+Type TargetX8632::getPointerType() { return IceType_i32; }
 
 void TargetX8632::translateO2() {
   TimerMarker T(TimerStack::TT_O2, Func);
@@ -753,19 +733,17 @@
         // An InstLoad qualifies unless it uses a 64-bit absolute address,
         // which requires legalization to insert a copy to register.
         // TODO(b/148272103): Fold these after legalization.
-        if (!Traits::Is64Bit || !llvm::isa<Constant>(Load->getLoadAddress())) {
-          LoadDest = Load->getDest();
-          constexpr bool DoLegalize = false;
-          LoadSrc = formMemoryOperand(Load->getLoadAddress(),
-                                      LoadDest->getType(), DoLegalize);
-        }
+        LoadDest = Load->getDest();
+        constexpr bool DoLegalize = false;
+        LoadSrc = formMemoryOperand(Load->getLoadAddress(), LoadDest->getType(),
+                                    DoLegalize);
       } else if (auto *Intrin = llvm::dyn_cast<InstIntrinsic>(CurInst)) {
         // An AtomicLoad intrinsic qualifies as long as it has a valid memory
         // ordering, and can be implemented in a single instruction (i.e., not
         // i64 on x86-32).
         Intrinsics::IntrinsicID ID = Intrin->getIntrinsicID();
         if (ID == Intrinsics::AtomicLoad &&
-            (Traits::Is64Bit || Intrin->getDest()->getType() != IceType_i64) &&
+            (Intrin->getDest()->getType() != IceType_i64) &&
             Intrinsics::isMemoryOrderValid(
                 ID, getConstantMemoryOrder(Intrin->getArg(1)))) {
           LoadDest = Intrin->getDest();
@@ -968,8 +946,6 @@
   // space on the frame for globals (variables with multi-block lifetime), and
   // one block to share for locals (single-block lifetime).
 
-  const SizeT ShadowStoreSize = getShadowStoreSize();
-
   // StackPointer: points just past return address of calling function
 
   Context.init(Node);
@@ -1087,16 +1063,13 @@
   if (PrologEmitsFixedAllocas)
     SpillAreaSizeBytes += FixedAllocaSizeBytes;
 
-  // Win64 ABI: add space for shadow store (aka home space)
-  SpillAreaSizeBytes += ShadowStoreSize;
-
   // Entering the function has made the stack pointer unaligned. Re-align it by
   // adjusting the stack size.
   // Note that StackOffset does not include spill area. It's the offset from the
   // base stack pointer (epb), whether we set it or not, to the the first stack
   // arg (if any). StackSize, on the other hand, does include the spill area.
   const uint32_t StackOffset =
-      ShadowStoreSize + Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes;
+      Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes;
   uint32_t StackSize = Utils::applyAlignment(StackOffset + SpillAreaSizeBytes,
                                              RequiredStackAlignment);
   StackSize = Utils::applyAlignment(StackSize + maxOutArgsSizeBytes(),
@@ -1245,16 +1218,14 @@
                                          size_t BasicFrameOffset,
                                          size_t StackAdjBytes,
                                          size_t &InArgsSizeBytes) {
-  if (!Traits::Is64Bit) {
-    if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) {
-      Variable *Lo = Arg64On32->getLo();
-      Variable *Hi = Arg64On32->getHi();
-      finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, StackAdjBytes,
-                             InArgsSizeBytes);
-      finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, StackAdjBytes,
-                             InArgsSizeBytes);
-      return;
-    }
+  if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) {
+    Variable *Lo = Arg64On32->getLo();
+    Variable *Hi = Arg64On32->getHi();
+    finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, StackAdjBytes,
+                           InArgsSizeBytes);
+    finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, StackAdjBytes,
+                           InArgsSizeBytes);
+    return;
   }
   Type Ty = Arg->getType();
   if (isVectorType(Ty)) {
@@ -1263,7 +1234,7 @@
   Arg->setStackOffset(BasicFrameOffset + InArgsSizeBytes);
   InArgsSizeBytes += typeWidthInBytesOnStack(Ty);
   if (Arg->hasReg()) {
-    assert(Ty != IceType_i64 || Traits::Is64Bit);
+    assert(Ty != IceType_i64);
     auto *Mem = X86OperandMem::create(
         Func, Ty, FramePtr,
         Ctx->getConstantInt32(Arg->getStackOffset() + StackAdjBytes));
@@ -1328,9 +1299,7 @@
 
 Type TargetX8632::stackSlotType() { return Traits::WordType; }
 
-template <typename T>
-typename std::enable_if<!T::Is64Bit, Operand>::type *
-TargetX8632::loOperand(Operand *Operand) {
+Operand *TargetX8632::loOperand(Operand *Operand) {
   assert(Operand->getType() == IceType_i64 ||
          Operand->getType() == IceType_f64);
   if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
@@ -1356,9 +1325,7 @@
   return nullptr;
 }
 
-template <typename T>
-typename std::enable_if<!T::Is64Bit, Operand>::type *
-TargetX8632::hiOperand(Operand *Operand) {
+Operand *TargetX8632::hiOperand(Operand *Operand) {
   assert(Operand->getType() == IceType_i64 ||
          Operand->getType() == IceType_f64);
   if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
@@ -1452,14 +1419,8 @@
   } else {
     // Non-constant sizes need to be adjusted to the next highest multiple of
     // the required alignment at runtime.
-    Variable *T = nullptr;
-    if (Traits::Is64Bit && TotalSize->getType() != IceType_i64) {
-      T = makeReg(IceType_i64);
-      _movzx(T, TotalSize);
-    } else {
-      T = makeReg(IceType_i32);
-      _mov(T, TotalSize);
-    }
+    Variable *T = makeReg(IceType_i32);
+    _mov(T, TotalSize);
     _add(T, Ctx->getConstantInt32(Alignment - 1));
     _and(T, Ctx->getConstantInt32(-Alignment));
     _sub_sp(T);
@@ -1617,8 +1578,7 @@
     }
   }
   // Lea optimization only works for i16 and i32 types, not i8.
-  if (Ty != IceType_i32 && !(Traits::Is64Bit && Ty == IceType_i64) &&
-      (Count3 || Count5 || Count9))
+  if (Ty != IceType_i32 && (Count3 || Count5 || Count9))
     return false;
   // Limit the number of lea/shl operations for a single multiply, to a
   // somewhat arbitrary choice of 3.
@@ -1893,7 +1853,7 @@
     assert(SwapCount <= 1);
     (void)SwapCount;
   }
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
+  if (Ty == IceType_i64) {
     // These x86-32 helper-call-involved instructions are lowered in this
     // separate switch. This is because loOperand() and hiOperand() may insert
     // redundant instructions for constant blinding and pooling. Such redundant
@@ -2172,8 +2132,7 @@
     llvm_unreachable("Unknown arithmetic operator");
     break;
   case InstArithmetic::Add: {
-    const bool ValidType =
-        Ty == IceType_i32 || (Ty == IceType_i64 && Traits::Is64Bit);
+    const bool ValidType = Ty == IceType_i32;
     auto *Const = llvm::dyn_cast<Constant>(Instr->getSrc(1));
     const bool ValidKind =
         Const != nullptr && (llvm::isa<ConstantInteger32>(Const) ||
@@ -2573,8 +2532,6 @@
   OperandList StackArgs, StackArgLocations;
   uint32_t ParameterAreaSizeBytes = 0;
 
-  ParameterAreaSizeBytes += getShadowStoreSize();
-
   // Classify each argument operand according to the location where the argument
   // is passed.
   for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) {
@@ -2679,12 +2636,8 @@
       ReturnReg = makeReg(DestTy, Traits::RegisterSet::Reg_eax);
       break;
     case IceType_i64:
-      if (Traits::Is64Bit) {
-        ReturnReg = makeReg(IceType_i64, Traits::getRaxOrDie());
-      } else {
-        ReturnReg = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
-        ReturnRegHi = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
-      }
+      ReturnReg = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
+      ReturnRegHi = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
       break;
     case IceType_f32:
     case IceType_f64:
@@ -2751,7 +2704,7 @@
   } else {
     assert(isScalarIntegerType(DestTy));
     assert(ReturnReg && "Integer type requires a return register");
-    if (DestTy == IceType_i64 && !Traits::Is64Bit) {
+    if (DestTy == IceType_i64) {
       assert(ReturnRegHi && "64-bit type requires two return registers");
       auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
       Variable *DestLo = Dest64On32->getLo();
@@ -2807,7 +2760,7 @@
         _psra(T, ShiftConstant);
         _movp(Dest, T);
       }
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
+    } else if (DestTy == IceType_i64) {
       // t1=movsx src; t2=t1; t2=sar t2, 31; dst.lo=t1; dst.hi=t2
       Constant *Shift = Ctx->getConstantInt32(31);
       auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
@@ -2864,7 +2817,7 @@
       _movp(T, Src0RM);
       _pand(T, OneMask);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
+    } else if (DestTy == IceType_i64) {
       // t1=movzx src; dst.lo=t1; dst.hi=0
       Constant *Zero = Ctx->getConstantZero(IceType_i32);
       auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
@@ -2884,7 +2837,7 @@
         _mov(T, Src0RM);
       } else {
         assert(DestTy != IceType_i1);
-        assert(Traits::Is64Bit || DestTy != IceType_i64);
+        assert(DestTy != IceType_i64);
         // Use 32-bit for both 16-bit and 32-bit, since 32-bit ops are shorter.
         // In x86-64 we need to widen T to 64-bits to ensure that T -- if
         // written to the stack (i.e., in -Om1) will be fully zero-extended.
@@ -2913,7 +2866,7 @@
     } else if (DestTy == IceType_i1 || DestTy == IceType_i8) {
       // Make sure we truncate from and into valid registers.
       Operand *Src0 = legalizeUndef(Instr->getSrc(0));
-      if (!Traits::Is64Bit && Src0->getType() == IceType_i64)
+      if (Src0->getType() == IceType_i64)
         Src0 = loOperand(Src0);
       Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
       Variable *T = copyToReg8(Src0RM);
@@ -2922,7 +2875,7 @@
       _mov(Dest, T);
     } else {
       Operand *Src0 = legalizeUndef(Instr->getSrc(0));
-      if (!Traits::Is64Bit && Src0->getType() == IceType_i64)
+      if (Src0->getType() == IceType_i64)
         Src0 = loOperand(Src0);
       Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
       // t1 = trunc Src0RM; Dest = t1
@@ -2949,18 +2902,14 @@
       Variable *T = makeReg(DestTy);
       _cvt(T, Src0R, Insts::Cvt::Tps2dq);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
+    } else if (DestTy == IceType_i64) {
       llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem);
       // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && DestTy == IceType_i64) {
-        T_1 = makeReg(IceType_i64);
-      } else {
-        assert(DestTy != IceType_i64);
-        T_1 = makeReg(IceType_i32);
-      }
+      assert(DestTy != IceType_i64);
+      T_1 = makeReg(IceType_i32);
       // cvt() requires its integer argument to be a GPR.
       Variable *T_2 = makeReg(DestTy);
       if (isByteSizedType(DestTy)) {
@@ -2978,20 +2927,15 @@
   case InstCast::Fptoui:
     if (isVectorType(DestTy)) {
       llvm::report_fatal_error("Helper call was expected");
-    } else if (DestTy == IceType_i64 ||
-               (!Traits::Is64Bit && DestTy == IceType_i32)) {
+    } else if (DestTy == IceType_i64 || DestTy == IceType_i32) {
       llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem);
       // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
       assert(DestTy != IceType_i64);
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && DestTy == IceType_i32) {
-        T_1 = makeReg(IceType_i64);
-      } else {
-        assert(DestTy != IceType_i32);
-        T_1 = makeReg(IceType_i32);
-      }
+      assert(DestTy != IceType_i32);
+      T_1 = makeReg(IceType_i32);
       Variable *T_2 = makeReg(DestTy);
       if (isByteSizedType(DestTy)) {
         assert(T_1->getType() == IceType_i32);
@@ -3013,19 +2957,15 @@
       Variable *T = makeReg(DestTy);
       _cvt(T, Src0R, Insts::Cvt::Dq2ps);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && Instr->getSrc(0)->getType() == IceType_i64) {
+    } else if (Instr->getSrc(0)->getType() == IceType_i64) {
       llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem);
       // Sign-extend the operand.
       // t1.i32 = movsx Src0RM; t2 = Cvt t1.i32; Dest = t2
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && Src0RM->getType() == IceType_i64) {
-        T_1 = makeReg(IceType_i64);
-      } else {
-        assert(Src0RM->getType() != IceType_i64);
-        T_1 = makeReg(IceType_i32);
-      }
+      assert(Src0RM->getType() != IceType_i64);
+      T_1 = makeReg(IceType_i32);
       Variable *T_2 = makeReg(DestTy);
       if (Src0RM->getType() == T_1->getType())
         _mov(T_1, Src0RM);
@@ -3040,20 +2980,16 @@
     if (isVectorType(Src0->getType())) {
       llvm::report_fatal_error("Helper call was expected");
     } else if (Src0->getType() == IceType_i64 ||
-               (!Traits::Is64Bit && Src0->getType() == IceType_i32)) {
+               Src0->getType() == IceType_i32) {
       llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
       // Zero-extend the operand.
       // t1.i32 = movzx Src0RM; t2 = Cvt t1.i32; Dest = t2
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && Src0RM->getType() == IceType_i32) {
-        T_1 = makeReg(IceType_i64);
-      } else {
-        assert(Src0RM->getType() != IceType_i64);
-        assert(Traits::Is64Bit || Src0RM->getType() != IceType_i32);
-        T_1 = makeReg(IceType_i32);
-      }
+      assert(Src0RM->getType() != IceType_i64);
+      assert(Src0RM->getType() != IceType_i32);
+      T_1 = makeReg(IceType_i32);
       Variable *T_2 = makeReg(DestTy);
       if (Src0RM->getType() == T_1->getType())
         _mov(T_1, Src0RM);
@@ -3089,86 +3025,72 @@
     } break;
     case IceType_i64: {
       assert(Src0->getType() == IceType_f64);
-      if (Traits::Is64Bit) {
-        Variable *Src0R = legalizeToReg(Src0);
-        Variable *T = makeReg(IceType_i64);
-        _movd(T, Src0R);
-        _mov(Dest, T);
+      Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
+      // a.i64 = bitcast b.f64 ==>
+      //   s.f64 = spill b.f64
+      //   t_lo.i32 = lo(s.f64)
+      //   a_lo.i32 = t_lo.i32
+      //   t_hi.i32 = hi(s.f64)
+      //   a_hi.i32 = t_hi.i32
+      Operand *SpillLo, *SpillHi;
+      if (auto *Src0Var = llvm::dyn_cast<Variable>(Src0RM)) {
+        Variable *Spill = Func->makeVariable(IceType_f64);
+        Spill->setLinkedTo(Src0Var);
+        Spill->setMustNotHaveReg();
+        _movq(Spill, Src0RM);
+        SpillLo = Traits::VariableSplit::create(Func, Spill,
+                                                Traits::VariableSplit::Low);
+        SpillHi = Traits::VariableSplit::create(Func, Spill,
+                                                Traits::VariableSplit::High);
       } else {
-        Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
-        // a.i64 = bitcast b.f64 ==>
-        //   s.f64 = spill b.f64
-        //   t_lo.i32 = lo(s.f64)
-        //   a_lo.i32 = t_lo.i32
-        //   t_hi.i32 = hi(s.f64)
-        //   a_hi.i32 = t_hi.i32
-        Operand *SpillLo, *SpillHi;
-        if (auto *Src0Var = llvm::dyn_cast<Variable>(Src0RM)) {
-          Variable *Spill = Func->makeVariable(IceType_f64);
-          Spill->setLinkedTo(Src0Var);
-          Spill->setMustNotHaveReg();
-          _movq(Spill, Src0RM);
-          SpillLo = Traits::VariableSplit::create(Func, Spill,
-                                                  Traits::VariableSplit::Low);
-          SpillHi = Traits::VariableSplit::create(Func, Spill,
-                                                  Traits::VariableSplit::High);
-        } else {
-          SpillLo = loOperand(Src0RM);
-          SpillHi = hiOperand(Src0RM);
-        }
-
-        auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-        auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-        Variable *T_Lo = makeReg(IceType_i32);
-        Variable *T_Hi = makeReg(IceType_i32);
-
-        _mov(T_Lo, SpillLo);
-        _mov(DestLo, T_Lo);
-        _mov(T_Hi, SpillHi);
-        _mov(DestHi, T_Hi);
+        SpillLo = loOperand(Src0RM);
+        SpillHi = hiOperand(Src0RM);
       }
+
+      auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
+      auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
+      Variable *T_Lo = makeReg(IceType_i32);
+      Variable *T_Hi = makeReg(IceType_i32);
+
+      _mov(T_Lo, SpillLo);
+      _mov(DestLo, T_Lo);
+      _mov(T_Hi, SpillHi);
+      _mov(DestHi, T_Hi);
     } break;
     case IceType_f64: {
       assert(Src0->getType() == IceType_i64);
-      if (Traits::Is64Bit) {
-        Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
-        Variable *T = makeReg(IceType_f64);
-        _movd(T, Src0RM);
-        _mov(Dest, T);
-      } else {
-        Src0 = legalize(Src0);
-        if (llvm::isa<X86OperandMem>(Src0)) {
-          Variable *T = makeReg(DestTy);
-          _movq(T, Src0);
-          _movq(Dest, T);
-          break;
-        }
-        // a.f64 = bitcast b.i64 ==>
-        //   t_lo.i32 = b_lo.i32
-        //   FakeDef(s.f64)
-        //   lo(s.f64) = t_lo.i32
-        //   t_hi.i32 = b_hi.i32
-        //   hi(s.f64) = t_hi.i32
-        //   a.f64 = s.f64
-        Variable *Spill = Func->makeVariable(IceType_f64);
-        Spill->setLinkedTo(Dest);
-        Spill->setMustNotHaveReg();
-
-        Variable *T_Lo = nullptr, *T_Hi = nullptr;
-        auto *SpillLo = Traits::VariableSplit::create(
-            Func, Spill, Traits::VariableSplit::Low);
-        auto *SpillHi = Traits::VariableSplit::create(
-            Func, Spill, Traits::VariableSplit::High);
-        _mov(T_Lo, loOperand(Src0));
-        // Technically, the Spill is defined after the _store happens, but
-        // SpillLo is considered a "use" of Spill so define Spill before it is
-        // used.
-        Context.insert<InstFakeDef>(Spill);
-        _store(T_Lo, SpillLo);
-        _mov(T_Hi, hiOperand(Src0));
-        _store(T_Hi, SpillHi);
-        _movq(Dest, Spill);
+      Src0 = legalize(Src0);
+      if (llvm::isa<X86OperandMem>(Src0)) {
+        Variable *T = makeReg(DestTy);
+        _movq(T, Src0);
+        _movq(Dest, T);
+        break;
       }
+      // a.f64 = bitcast b.i64 ==>
+      //   t_lo.i32 = b_lo.i32
+      //   FakeDef(s.f64)
+      //   lo(s.f64) = t_lo.i32
+      //   t_hi.i32 = b_hi.i32
+      //   hi(s.f64) = t_hi.i32
+      //   a.f64 = s.f64
+      Variable *Spill = Func->makeVariable(IceType_f64);
+      Spill->setLinkedTo(Dest);
+      Spill->setMustNotHaveReg();
+
+      Variable *T_Lo = nullptr, *T_Hi = nullptr;
+      auto *SpillLo = Traits::VariableSplit::create(Func, Spill,
+                                                    Traits::VariableSplit::Low);
+      auto *SpillHi = Traits::VariableSplit::create(
+          Func, Spill, Traits::VariableSplit::High);
+      _mov(T_Lo, loOperand(Src0));
+      // Technically, the Spill is defined after the _store happens, but
+      // SpillLo is considered a "use" of Spill so define Spill before it is
+      // used.
+      Context.insert<InstFakeDef>(Spill);
+      _store(T_Lo, SpillLo);
+      _mov(T_Hi, hiOperand(Src0));
+      _store(T_Hi, SpillHi);
+      _movq(Dest, Spill);
     } break;
     case IceType_v8i1: {
       llvm::report_fatal_error("Helper call was expected");
@@ -3477,7 +3399,7 @@
     return;
   }
 
-  if (!Traits::Is64Bit && Src0->getType() == IceType_i64) {
+  if (Src0->getType() == IceType_i64) {
     lowerIcmp64(Icmp, Consumer);
     return;
   }
@@ -3616,9 +3538,7 @@
   eliminateNextVectorSextInstruction(Dest);
 }
 
-template <typename T>
-typename std::enable_if<!T::Is64Bit, void>::type
-TargetX8632::lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer) {
+void TargetX8632::lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer) {
   // a=icmp cond, b, c ==> cmp b,c; a=1; br cond,L1; FakeUse(a); a=0; L1:
   Operand *Src0 = legalize(Icmp->getSrc(0));
   Operand *Src1 = legalize(Icmp->getSrc(1));
@@ -4051,24 +3971,22 @@
       return;
     }
     Variable *Dest = Instr->getDest();
-    if (!Traits::Is64Bit) {
-      if (auto *Dest64On32 = llvm::dyn_cast<Variable64On32>(Dest)) {
-        // Follow what GCC does and use a movq instead of what lowerLoad()
-        // normally does (split the load into two). Thus, this skips
-        // load/arithmetic op folding. Load/arithmetic folding can't happen
-        // anyway, since this is x86-32 and integer arithmetic only happens on
-        // 32-bit quantities.
-        Variable *T = makeReg(IceType_f64);
-        X86OperandMem *Addr = formMemoryOperand(Instr->getArg(0), IceType_f64);
-        _movq(T, Addr);
-        // Then cast the bits back out of the XMM register to the i64 Dest.
-        auto *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T);
-        lowerCast(Cast);
-        // Make sure that the atomic load isn't elided when unused.
-        Context.insert<InstFakeUse>(Dest64On32->getLo());
-        Context.insert<InstFakeUse>(Dest64On32->getHi());
-        return;
-      }
+    if (auto *Dest64On32 = llvm::dyn_cast<Variable64On32>(Dest)) {
+      // Follow what GCC does and use a movq instead of what lowerLoad()
+      // normally does (split the load into two). Thus, this skips
+      // load/arithmetic op folding. Load/arithmetic folding can't happen
+      // anyway, since this is x86-32 and integer arithmetic only happens on
+      // 32-bit quantities.
+      Variable *T = makeReg(IceType_f64);
+      X86OperandMem *Addr = formMemoryOperand(Instr->getArg(0), IceType_f64);
+      _movq(T, Addr);
+      // Then cast the bits back out of the XMM register to the i64 Dest.
+      auto *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T);
+      lowerCast(Cast);
+      // Make sure that the atomic load isn't elided when unused.
+      Context.insert<InstFakeUse>(Dest64On32->getLo());
+      Context.insert<InstFakeUse>(Dest64On32->getHi());
+      return;
     }
     auto *Load = InstLoad::create(Func, Dest, Instr->getArg(0));
     lowerLoad(Load);
@@ -4101,7 +4019,7 @@
     // it visible.
     Operand *Value = Instr->getArg(0);
     Operand *Ptr = Instr->getArg(1);
-    if (!Traits::Is64Bit && Value->getType() == IceType_i64) {
+    if (Value->getType() == IceType_i64) {
       // Use a movq instead of what lowerStore() normally does (split the store
       // into two), following what GCC does. Cast the bits from int -> to an
       // xmm register first.
@@ -4124,7 +4042,7 @@
     Operand *Val = Instr->getArg(0);
     // In 32-bit mode, bswap only works on 32-bit arguments, and the argument
     // must be a register. Use rotate left for 16-bit bswap.
-    if (!Traits::Is64Bit && Val->getType() == IceType_i64) {
+    if (Val->getType() == IceType_i64) {
       Val = legalizeUndef(Val);
       Variable *T_Lo = legalizeToReg(loOperand(Val));
       Variable *T_Hi = legalizeToReg(hiOperand(Val));
@@ -4134,8 +4052,7 @@
       _bswap(T_Hi);
       _mov(DestLo, T_Hi);
       _mov(DestHi, T_Lo);
-    } else if ((Traits::Is64Bit && Val->getType() == IceType_i64) ||
-               Val->getType() == IceType_i32) {
+    } else if (Val->getType() == IceType_i32) {
       Variable *T = legalizeToReg(Val);
       _bswap(T);
       _mov(Dest, T);
@@ -4152,31 +4069,14 @@
   }
   case Intrinsics::Ctpop: {
     Variable *Dest = Instr->getDest();
-    Variable *T = nullptr;
     Operand *Val = Instr->getArg(0);
     Type ValTy = Val->getType();
     assert(ValTy == IceType_i32 || ValTy == IceType_i64);
 
-    if (!Traits::Is64Bit) {
-      T = Dest;
-    } else {
-      T = makeReg(IceType_i64);
-      if (ValTy == IceType_i32) {
-        // in x86-64, __popcountsi2 is not defined, so we cheat a bit by
-        // converting it to a 64-bit value, and using ctpop_i64. _movzx should
-        // ensure we will not have any bits set on Val's upper 32 bits.
-        Variable *V = makeReg(IceType_i64);
-        Operand *ValRM = legalize(Val, Legal_Reg | Legal_Mem);
-        _movzx(V, ValRM);
-        Val = V;
-      }
-      ValTy = IceType_i64;
-    }
-
     InstCall *Call =
         makeHelperCall(ValTy == IceType_i32 ? RuntimeHelper::H_call_ctpop_i32
                                             : RuntimeHelper::H_call_ctpop_i64,
-                       T, 1);
+                       Dest, 1);
     Call->addArg(Val);
     lowerCall(Call);
     // The popcount helpers always return 32-bit values, while the intrinsic's
@@ -4184,33 +4084,10 @@
     // (in 64-bit mode). Thus, clear the upper bits of the dest just in case
     // the user doesn't do that in the IR. If the user does that in the IR,
     // then this zero'ing instruction is dead and gets optimized out.
-    if (!Traits::Is64Bit) {
-      assert(T == Dest);
-      if (Val->getType() == IceType_i64) {
-        auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-        Constant *Zero = Ctx->getConstantZero(IceType_i32);
-        _mov(DestHi, Zero);
-      }
-    } else {
-      assert(Val->getType() == IceType_i64);
-      // T is 64 bit. It needs to be copied to dest. We need to:
-      //
-      // T_1.32 = trunc T.64 to i32
-      // T_2.64 = zext T_1.32 to i64
-      // Dest.<<right_size>> = T_2.<<right_size>>
-      //
-      // which ensures the upper 32 bits will always be cleared. Just doing a
-      //
-      // mov Dest.32 = trunc T.32 to i32
-      //
-      // is dangerous because there's a chance the compiler will optimize this
-      // copy out. To use _movzx we need two new registers (one 32-, and
-      // another 64-bit wide.)
-      Variable *T_1 = makeReg(IceType_i32);
-      _mov(T_1, T);
-      Variable *T_2 = makeReg(IceType_i64);
-      _movzx(T_2, T_1);
-      _mov(Dest, T_2);
+    if (Val->getType() == IceType_i64) {
+      auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
+      Constant *Zero = Ctx->getConstantZero(IceType_i32);
+      _mov(DestHi, Zero);
     }
     return;
   }
@@ -4220,7 +4097,7 @@
     Operand *Val = legalize(Instr->getArg(0));
     Operand *FirstVal;
     Operand *SecondVal = nullptr;
-    if (!Traits::Is64Bit && Val->getType() == IceType_i64) {
+    if (Val->getType() == IceType_i64) {
       FirstVal = loOperand(Val);
       SecondVal = hiOperand(Val);
     } else {
@@ -4237,7 +4114,7 @@
     Operand *Val = legalize(Instr->getArg(0));
     Operand *FirstVal;
     Operand *SecondVal = nullptr;
-    if (!Traits::Is64Bit && Val->getType() == IceType_i64) {
+    if (Val->getType() == IceType_i64) {
       FirstVal = hiOperand(Val);
       SecondVal = loOperand(Val);
     } else {
@@ -4510,18 +4387,13 @@
       Variable *T = makeReg(DestTy);
       _cvt(T, Src0R, Insts::Cvt::Ps2dq);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
+    } else if (DestTy == IceType_i64) {
       llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Src, Legal_Reg | Legal_Mem);
       // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
-      Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && DestTy == IceType_i64) {
-        T_1 = makeReg(IceType_i64);
-      } else {
-        assert(DestTy != IceType_i64);
-        T_1 = makeReg(IceType_i32);
-      }
+      assert(DestTy != IceType_i64);
+      Variable *T_1 = makeReg(IceType_i32);
       // cvt() requires its integer argument to be a GPR.
       Variable *T_2 = makeReg(DestTy);
       if (isByteSizedType(DestTy)) {
@@ -4563,7 +4435,7 @@
 void TargetX8632::lowerAtomicCmpxchg(Variable *DestPrev, Operand *Ptr,
                                      Operand *Expected, Operand *Desired) {
   Type Ty = Expected->getType();
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
+  if (Ty == IceType_i64) {
     // Reserve the pre-colored registers first, before adding any more
     // infinite-weight variables from formMemoryOperand's legalization.
     Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
@@ -4635,9 +4507,9 @@
   Inst *NextInst = Context.getNextInst(I);
   if (!NextInst)
     return false;
-  // There might be phi assignments right before the compare+branch, since this
-  // could be a backward branch for a loop. This placement of assignments is
-  // determined by placePhiStores().
+  // There might be phi assignments right before the compare+branch, since
+  // this could be a backward branch for a loop. This placement of assignments
+  // is determined by placePhiStores().
   CfgVector<InstAssign *> PhiAssigns;
   while (auto *PhiAssign = llvm::dyn_cast<InstAssign>(NextInst)) {
     if (PhiAssign->getDest() == Dest)
@@ -4692,7 +4564,7 @@
     Func->setError("Unknown AtomicRMW operation");
     return;
   case Intrinsics::AtomicAdd: {
-    if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
+    if (Dest->getType() == IceType_i64) {
       // All the fall-through paths must set this to true, but use this
       // for asserting.
       NeedsCmpxchg = true;
@@ -4709,7 +4581,7 @@
     return;
   }
   case Intrinsics::AtomicSub: {
-    if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
+    if (Dest->getType() == IceType_i64) {
       NeedsCmpxchg = true;
       Op_Lo = &TargetX8632::_sub;
       Op_Hi = &TargetX8632::_sbb;
@@ -4745,7 +4617,7 @@
     Op_Hi = &TargetX8632::_xor;
     break;
   case Intrinsics::AtomicExchange:
-    if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
+    if (Dest->getType() == IceType_i64) {
       NeedsCmpxchg = true;
       // NeedsCmpxchg, but no real Op_Lo/Op_Hi need to be done. The values
       // just need to be moved to the ecx and ebx registers.
@@ -4795,7 +4667,7 @@
   // If Op_{Lo,Hi} are nullptr, then just copy the value.
   Val = legalize(Val);
   Type Ty = Val->getType();
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
+  if (Ty == IceType_i64) {
     Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
     Variable *T_eax = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
     X86OperandMem *Addr = formMemoryOperand(Ptr, Ty);
@@ -4912,8 +4784,8 @@
   // like (M - N) for N <= M, and converts 63 to 32, and 127 to 64 (for the
   // all-zeros case).
   //
-  // X8632 only: Similar for 64-bit, but start w/ speculating that the upper 32
-  // bits are all zero, and compute the result for that case (checking the
+  // X8632 only: Similar for 64-bit, but start w/ speculating that the upper
+  // 32 bits are all zero, and compute the result for that case (checking the
   // lower 32 bits). Then actually compute the result for the upper bits and
   // cmov in the result from the lower computation if the earlier speculation
   // was correct.
@@ -4923,7 +4795,7 @@
 
   // TODO(jpp): refactor this method.
   assert(Ty == IceType_i32 || Ty == IceType_i64);
-  const Type DestTy = Traits::Is64Bit ? Dest->getType() : IceType_i32;
+  const Type DestTy = IceType_i32;
   Variable *T = makeReg(DestTy);
   Operand *FirstValRM = legalize(FirstVal, Legal_Mem | Legal_Reg);
   if (Cttz) {
@@ -4953,15 +4825,15 @@
   _cmov(T_Dest, T, CondX86::Br_ne);
   if (!Cttz) {
     if (DestTy == IceType_i64) {
-      // Even though there's a _63 available at this point, that constant might
-      // not be an i32, which will cause the xor emission to fail.
+      // Even though there's a _63 available at this point, that constant
+      // might not be an i32, which will cause the xor emission to fail.
       Constant *_63 = Ctx->getConstantInt32(63);
       _xor(T_Dest, _63);
     } else {
       _xor(T_Dest, _31);
     }
   }
-  if (Traits::Is64Bit || Ty == IceType_i32) {
+  if (Ty == IceType_i32) {
     _mov(Dest, T_Dest);
     return;
   }
@@ -5045,10 +4917,10 @@
     Variable *SrcBase = legalizeToReg(Src);
     Variable *DestBase = legalizeToReg(Dest);
 
-    // Find the largest type that can be used and use it as much as possible in
-    // reverse order. Then handle any remainder with overlapping copies. Since
-    // the remainder will be at the end, there will be reduced pressure on the
-    // memory unit as the accesses to the same memory are far apart.
+    // Find the largest type that can be used and use it as much as possible
+    // in reverse order. Then handle any remainder with overlapping copies.
+    // Since the remainder will be at the end, there will be reduced pressure
+    // on the memory unit as the accesses to the same memory are far apart.
     Type Ty = largestTypeInSize(CountValue);
     uint32_t TyWidth = typeWidthInBytes(Ty);
 
@@ -5102,9 +4974,10 @@
     Constant *Offset;
     Variable *Reg;
 
-    // Copy the data into registers as the source and destination could overlap
-    // so make sure not to clobber the memory. This also means overlapping
-    // moves can be used as we are taking a safe snapshot of the memory.
+    // Copy the data into registers as the source and destination could
+    // overlap so make sure not to clobber the memory. This also means
+    // overlapping moves can be used as we are taking a safe snapshot of the
+    // memory.
     Type Ty = largestTypeInSize(CountValue);
     uint32_t TyWidth = typeWidthInBytes(Ty);
 
@@ -5122,8 +4995,8 @@
     }
 
     if (RemainingBytes != 0) {
-      // Lower the remaining bytes. Adjust to larger types in order to make use
-      // of overlaps in the copies.
+      // Lower the remaining bytes. Adjust to larger types in order to make
+      // use of overlaps in the copies.
       assert(N <= Traits::MEMMOVE_UNROLL_LIMIT);
       Ty = firstTypeThatFitsSize(RemainingBytes);
       Offset = Ctx->getConstantInt32(CountValue - typeWidthInBytes(Ty));
@@ -5167,9 +5040,9 @@
   if (IsCountConst && CountValue == 0)
     return;
 
-  // TODO(ascull): if the count is constant but val is not it would be possible
-  // to inline by spreading the value across 4 bytes and accessing subregs e.g.
-  // eax, ax and al.
+  // TODO(ascull): if the count is constant but val is not it would be
+  // possible to inline by spreading the value across 4 bytes and accessing
+  // subregs e.g. eax, ax and al.
   if (shouldOptimizeMemIntrins() && IsCountConst && IsValConst) {
     Variable *Base = nullptr;
     Variable *VecReg = nullptr;
@@ -5196,15 +5069,15 @@
       }
     };
 
-    // Find the largest type that can be used and use it as much as possible in
-    // reverse order. Then handle any remainder with overlapping copies. Since
-    // the remainder will be at the end, there will be reduces pressure on the
-    // memory unit as the access to the same memory are far apart.
+    // Find the largest type that can be used and use it as much as possible
+    // in reverse order. Then handle any remainder with overlapping copies.
+    // Since the remainder will be at the end, there will be reduces pressure
+    // on the memory unit as the access to the same memory are far apart.
     Type Ty = IceType_void;
     if (ValValue == 0 && CountValue >= BytesPerStoreq &&
         CountValue <= BytesPerStorep * Traits::MEMSET_UNROLL_LIMIT) {
-      // When the value is zero it can be loaded into a vector register cheaply
-      // using the xor trick.
+      // When the value is zero it can be loaded into a vector register
+      // cheaply using the xor trick.
       Base = legalizeToReg(Dest);
       VecReg = makeVectorOfZeros(IceType_v16i8);
       Ty = largestTypeInSize(CountValue);
@@ -5230,8 +5103,8 @@
       if (RemainingBytes == 0)
         return;
 
-      // Lower the remaining bytes. Adjust to larger types in order to make use
-      // of overlaps in the copies.
+      // Lower the remaining bytes. Adjust to larger types in order to make
+      // use of overlaps in the copies.
       Type LeftOverTy = firstTypeThatFitsSize(RemainingBytes);
       Offset = CountValue - typeWidthInBytes(LeftOverTy);
       lowerSet(LeftOverTy, Offset);
@@ -5351,8 +5224,8 @@
       } else if (auto *AddReloc = llvm::dyn_cast<ConstantRelocatable>(SrcOp)) {
         if (*Relocatable == nullptr) {
           // It is always safe to fold a relocatable through assignment -- the
-          // assignment frees a slot in the address operand that can be used to
-          // hold the Sandbox Pointer -- if any.
+          // assignment frees a slot in the address operand that can be used
+          // to hold the Sandbox Pointer -- if any.
           *Var = nullptr;
           *Relocatable = AddReloc;
           return VarAssign;
@@ -5656,8 +5529,8 @@
   bool AddressWasOptimized = false;
   // The following unnamed struct identifies the address mode formation steps
   // that could potentially create an invalid memory operand (i.e., no free
-  // slots for RebasePtr.) We add all those variables to this struct so that we
-  // can use memset() to reset all members to false.
+  // slots for RebasePtr.) We add all those variables to this struct so that
+  // we can use memset() to reset all members to false.
   struct {
     bool AssignBase = false;
     bool AssignIndex = false;
@@ -5826,9 +5699,9 @@
   auto *Var = llvm::dyn_cast_or_null<Variable>(Opnd);
   if (Var == nullptr)
     return;
-  // We use lowerStore() to copy out-args onto the stack.  This creates a memory
-  // operand with the stack pointer as the base register.  Don't do bounds
-  // checks on that.
+  // We use lowerStore() to copy out-args onto the stack.  This creates a
+  // memory operand with the stack pointer as the base register.  Don't do
+  // bounds checks on that.
   if (Var->getRegNum() == getStackReg())
     return;
 
@@ -5841,10 +5714,10 @@
 }
 
 void TargetX8632::lowerLoad(const InstLoad *Load) {
-  // A Load instruction can be treated the same as an Assign instruction, after
-  // the source operand is transformed into an X86OperandMem operand.  Note that
-  // the address mode optimization already creates an X86OperandMem operand, so
-  // it doesn't need another level of transformation.
+  // A Load instruction can be treated the same as an Assign instruction,
+  // after the source operand is transformed into an X86OperandMem operand.
+  // Note that the address mode optimization already creates an X86OperandMem
+  // operand, so it doesn't need another level of transformation.
   Variable *DestLoad = Load->getDest();
   Type Ty = DestLoad->getType();
   Operand *Src0 = formMemoryOperand(Load->getLoadAddress(), Ty);
@@ -5924,8 +5797,8 @@
     Reg = moveReturnValueToRegister(RetValue, ReturnType);
   }
   // Add a ret instruction even if sandboxing is enabled, because addEpilog
-  // explicitly looks for a ret instruction as a marker for where to insert the
-  // frame removal instructions.
+  // explicitly looks for a ret instruction as a marker for where to insert
+  // the frame removal instructions.
   _ret(Reg);
   // Add a fake use of esp to make sure esp stays alive for the entire
   // function. Otherwise post-call esp adjustments get dead-code eliminated.
@@ -6518,7 +6391,7 @@
     std::swap(SrcT, SrcF);
     Cond = InstX86Base::getOppositeCondition(Cond);
   }
-  if (!Traits::Is64Bit && DestTy == IceType_i64) {
+  if (DestTy == IceType_i64) {
     SrcT = legalizeUndef(SrcT);
     SrcF = legalizeUndef(SrcF);
     // Set the low portion.
@@ -6530,8 +6403,7 @@
     return;
   }
 
-  assert(DestTy == IceType_i16 || DestTy == IceType_i32 ||
-         (Traits::Is64Bit && DestTy == IceType_i64));
+  assert(DestTy == IceType_i16 || DestTy == IceType_i32);
   lowerSelectIntMove(Dest, Cond, SrcT, SrcF);
 }
 
@@ -6548,7 +6420,7 @@
 void TargetX8632::lowerMove(Variable *Dest, Operand *Src, bool IsRedefinition) {
   assert(Dest->getType() == Src->getType());
   assert(!Dest->isRematerializable());
-  if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
+  if (Dest->getType() == IceType_i64) {
     Src = legalize(Src);
     Operand *SrcLo = loOperand(Src);
     Operand *SrcHi = hiOperand(Src);
@@ -6715,7 +6587,7 @@
   doMockBoundsCheck(NewAddr);
   Type Ty = NewAddr->getType();
 
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
+  if (Ty == IceType_i64) {
     Value = legalizeUndef(Value);
     Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm);
     _store(ValueHi, llvm::cast<X86OperandMem>(hiOperand(NewAddr)));
@@ -6761,7 +6633,7 @@
                                     uint64_t Max) {
   // TODO(ascull): 64-bit should not reach here but only because it is not
   // implemented yet. This should be able to handle the 64-bit case.
-  assert(Traits::Is64Bit || Comparison->getType() != IceType_i64);
+  assert(Comparison->getType() != IceType_i64);
   // Subtracting 0 is a nop so don't do it
   if (Min != 0) {
     // Avoid clobbering the comparison by copying it
@@ -6797,16 +6669,12 @@
 
     // Make sure the index is a register of the same width as the base
     Variable *Index;
-    const Type PointerType = getPointerType();
+    const Type PointerType = IceType_i32;
     if (RangeIndex->getType() != PointerType) {
       Index = makeReg(PointerType);
-      if (RangeIndex->getType() == IceType_i64) {
-        assert(Traits::Is64Bit);
-        _mov(Index, RangeIndex); // trunc
-      } else {
-        Operand *RangeIndexRM = legalize(RangeIndex, Legal_Reg | Legal_Mem);
-        _movzx(Index, RangeIndexRM);
-      }
+      assert(RangeIndex->getType() != IceType_i64);
+      Operand *RangeIndexRM = legalize(RangeIndex, Legal_Reg | Legal_Mem);
+      _movzx(Index, RangeIndexRM);
     } else {
       Index = legalizeToReg(RangeIndex);
     }
@@ -6870,7 +6738,7 @@
 
   assert(CaseClusters.size() != 0); // Should always be at least one
 
-  if (!Traits::Is64Bit && Src0->getType() == IceType_i64) {
+  if (Src0->getType() == IceType_i64) {
     Src0 = legalize(Src0); // get Base/Index into physical registers
     Operand *Src0Lo = loOperand(Src0);
     Operand *Src0Hi = hiOperand(Src0);
@@ -7003,8 +6871,8 @@
 ///   %cmp.ext = sext <n x i1> %cmp to <n x ty>
 ///
 /// We can eliminate the sext operation by copying the result of pcmpeqd,
-/// pcmpgtd, or cmpps (which produce sign extended results) to the result of the
-/// sext operation.
+/// pcmpgtd, or cmpps (which produce sign extended results) to the result of
+/// the sext operation.
 
 void TargetX8632::eliminateNextVectorSextInstruction(
     Variable *SignExtendedResult) {
@@ -7030,19 +6898,19 @@
 void TargetX8632::lowerBreakpoint(const InstBreakpoint * /*Instr*/) { _int3(); }
 
 void TargetX8632::lowerRMW(const InstX86FakeRMW *RMW) {
-  // If the beacon variable's live range does not end in this instruction, then
-  // it must end in the modified Store instruction that follows. This means
-  // that the original Store instruction is still there, either because the
-  // value being stored is used beyond the Store instruction, or because dead
-  // code elimination did not happen. In either case, we cancel RMW lowering
-  // (and the caller deletes the RMW instruction).
+  // If the beacon variable's live range does not end in this instruction,
+  // then it must end in the modified Store instruction that follows. This
+  // means that the original Store instruction is still there, either because
+  // the value being stored is used beyond the Store instruction, or because
+  // dead code elimination did not happen. In either case, we cancel RMW
+  // lowering (and the caller deletes the RMW instruction).
   if (!RMW->isLastUse(RMW->getBeacon()))
     return;
   Operand *Src = RMW->getData();
   Type Ty = Src->getType();
   X86OperandMem *Addr = formMemoryOperand(RMW->getAddr(), Ty);
   doMockBoundsCheck(Addr);
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
+  if (Ty == IceType_i64) {
     Src = legalizeUndef(Src);
     Operand *SrcLo = legalize(loOperand(Src), Legal_Reg | Legal_Imm);
     Operand *SrcHi = legalize(hiOperand(Src), Legal_Reg | Legal_Imm);
@@ -7113,16 +6981,10 @@
   }
 }
 
-/// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve
-/// integrity of liveness analysis. Undef values are also turned into zeroes,
-/// since loOperand() and hiOperand() don't expect Undef input.
+/// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to
+/// preserve integrity of liveness analysis. Undef values are also turned into
+/// zeroes, since loOperand() and hiOperand() don't expect Undef input.
 void TargetX8632::prelowerPhis() {
-  if (Traits::Is64Bit) {
-    // On x86-64 we don't need to prelower phis -- the architecture can handle
-    // 64-bit integer natively.
-    return;
-  }
-
   PhiLowering::prelowerPhis32Bit<TargetX8632>(this, Context.getNode(), Func);
 }
 
@@ -7132,7 +6994,7 @@
     RuntimeHelper HelperID = RuntimeHelper::H_Num;
     Variable *Dest = Arith->getDest();
     Type DestTy = Dest->getType();
-    if (!Traits::Is64Bit && DestTy == IceType_i64) {
+    if (DestTy == IceType_i64) {
       switch (Arith->getOp()) {
       default:
         return;
@@ -7207,7 +7069,7 @@
     default:
       return;
     case InstCast::Fptosi:
-      if (!Traits::Is64Bit && DestTy == IceType_i64) {
+      if (DestTy == IceType_i64) {
         HelperID = isFloat32Asserting32Or64(SrcType)
                        ? RuntimeHelper::H_fptosi_f32_i64
                        : RuntimeHelper::H_fptosi_f64_i64;
@@ -7220,13 +7082,8 @@
         assert(DestTy == IceType_v4i32);
         assert(SrcType == IceType_v4f32);
         HelperID = RuntimeHelper::H_fptoui_4xi32_f32;
-      } else if (DestTy == IceType_i64 ||
-                 (!Traits::Is64Bit && DestTy == IceType_i32)) {
-        if (Traits::Is64Bit) {
-          HelperID = isFloat32Asserting32Or64(SrcType)
-                         ? RuntimeHelper::H_fptoui_f32_i64
-                         : RuntimeHelper::H_fptoui_f64_i64;
-        } else if (isInt32Asserting32Or64(DestTy)) {
+      } else if (DestTy == IceType_i64 || DestTy == IceType_i32) {
+        if (isInt32Asserting32Or64(DestTy)) {
           HelperID = isFloat32Asserting32Or64(SrcType)
                          ? RuntimeHelper::H_fptoui_f32_i32
                          : RuntimeHelper::H_fptoui_f64_i32;
@@ -7240,7 +7097,7 @@
       }
       break;
     case InstCast::Sitofp:
-      if (!Traits::Is64Bit && SrcType == IceType_i64) {
+      if (SrcType == IceType_i64) {
         HelperID = isFloat32Asserting32Or64(DestTy)
                        ? RuntimeHelper::H_sitofp_i64_f32
                        : RuntimeHelper::H_sitofp_i64_f64;
@@ -7253,8 +7110,7 @@
         assert(DestTy == IceType_v4f32);
         assert(SrcType == IceType_v4i32);
         HelperID = RuntimeHelper::H_uitofp_4xi32_4xf32;
-      } else if (SrcType == IceType_i64 ||
-                 (!Traits::Is64Bit && SrcType == IceType_i32)) {
+      } else if (SrcType == IceType_i64 || SrcType == IceType_i32) {
         if (isInt32Asserting32Or64(SrcType)) {
           HelperID = isFloat32Asserting32Or64(DestTy)
                          ? RuntimeHelper::H_uitofp_i32_f32
@@ -7308,8 +7164,8 @@
     Call->addArg(Src0);
     StackArgumentsSize = getCallStackArgumentsSizeBytes(Call);
     Context.insert(Call);
-    // The PNaCl ABI disallows i8/i16 return types, so truncate the helper call
-    // result to the appropriate type as necessary.
+    // The PNaCl ABI disallows i8/i16 return types, so truncate the helper
+    // call result to the appropriate type as necessary.
     if (CallDest->getType() != Dest->getType())
       Context.insert<InstCast>(InstCast::Trunc, Dest, CallDest);
     Cast->setDeleted();
@@ -7400,10 +7256,9 @@
       OutArgumentsSizeBytes += typeWidthInBytesOnStack(Ty);
     }
   }
-  if (Traits::Is64Bit)
-    return OutArgumentsSizeBytes;
-  // The 32 bit ABI requires floating point values to be returned on the x87 FP
-  // stack. Ensure there is enough space for the fstp/movs for floating returns.
+  // The 32 bit ABI requires floating point values to be returned on the x87
+  // FP stack. Ensure there is enough space for the fstp/movs for floating
+  // returns.
   if (isScalarFloatingType(ReturnType)) {
     OutArgumentsSizeBytes =
         std::max(OutArgumentsSizeBytes,
@@ -7426,8 +7281,7 @@
   Variable *Dest = Instr->getDest();
   if (Dest != nullptr)
     ReturnType = Dest->getType();
-  return getShadowStoreSize() +
-         getCallStackArgumentsSizeBytes(ArgTypes, ReturnType);
+  return getCallStackArgumentsSizeBytes(ArgTypes, ReturnType);
 }
 
 Variable *TargetX8632::makeZeroedRegister(Type Ty, RegNumT RegNum) {
@@ -7472,10 +7326,10 @@
   // Insert a FakeDef so the live range of MinusOnes is not overestimated.
   Context.insert<InstFakeDef>(MinusOnes);
   if (Ty == IceType_f64)
-    // Making a vector of minus ones of type f64 is currently only used for the
-    // fabs intrinsic.  To use the f64 type to create this mask with pcmpeqq
-    // requires SSE 4.1.  Since we're just creating a mask, pcmpeqd does the
-    // same job and only requires SSE2.
+    // Making a vector of minus ones of type f64 is currently only used for
+    // the fabs intrinsic.  To use the f64 type to create this mask with
+    // pcmpeqq requires SSE 4.1.  Since we're just creating a mask, pcmpeqd
+    // does the same job and only requires SSE2.
     _pcmpeq(MinusOnes, MinusOnes, IceType_f32);
   else
     _pcmpeq(MinusOnes, MinusOnes);
@@ -7532,21 +7386,21 @@
   // TODO(wala,stichnot): lea should not
   // be required. The address of the stack slot is known at compile time
   // (although not until after addProlog()).
-  const Type PointerType = getPointerType();
+  const Type PointerType = IceType_i32;
   Variable *Loc = makeReg(PointerType);
   _lea(Loc, Slot);
   Constant *ConstantOffset = Ctx->getConstantInt32(Offset);
   return X86OperandMem::create(Func, Ty, Loc, ConstantOffset);
 }
 
-/// Lowering helper to copy a scalar integer source operand into some 8-bit GPR.
-/// Src is assumed to already be legalized.  If the source operand is known to
-/// be a memory or immediate operand, a simple mov will suffice.  But if the
-/// source operand can be a physical register, then it must first be copied into
-/// a physical register that is truncable to 8-bit, then truncated into a
-/// physical register that can receive a truncation, and finally copied into the
-/// result 8-bit register (which in general can be any 8-bit register).  For
-/// example, moving %ebp into %ah may be accomplished as:
+/// Lowering helper to copy a scalar integer source operand into some 8-bit
+/// GPR. Src is assumed to already be legalized.  If the source operand is
+/// known to be a memory or immediate operand, a simple mov will suffice.  But
+/// if the source operand can be a physical register, then it must first be
+/// copied into a physical register that is truncable to 8-bit, then truncated
+/// into a physical register that can receive a truncation, and finally copied
+/// into the result 8-bit register (which in general can be any 8-bit
+/// register).  For example, moving %ebp into %ah may be accomplished as:
 ///   movl %ebp, %edx
 ///   mov_trunc %edx, %dl  // this redundant assignment is ultimately elided
 ///   movb %dl, %ah
@@ -7565,8 +7419,8 @@
 ///
 /// Note #3.  If Src is a Variable, the result will be an infinite-weight i8
 /// Variable with the RCX86_IsTrunc8Rcvr register class.  As such, this helper
-/// is a convenient way to prevent ah/bh/ch/dh from being an (invalid) argument
-/// to the pinsrb instruction.
+/// is a convenient way to prevent ah/bh/ch/dh from being an (invalid)
+/// argument to the pinsrb instruction.
 
 Variable *TargetX8632::copyToReg8(Operand *Src, RegNumT RegNum) {
   Type Ty = Src->getType();
@@ -7627,8 +7481,8 @@
   // the shl shift amount to be either an immediate or in ecx.)
   assert(RegNum.hasNoValue() || Allowed == Legal_Reg);
 
-  // Substitute with an available infinite-weight variable if possible.  Only do
-  // this when we are not asking for a specific register, and when the
+  // Substitute with an available infinite-weight variable if possible.  Only
+  // do this when we are not asking for a specific register, and when the
   // substitution is not locked to a specific register, and when the types
   // match, in order to capture the vast majority of opportunities and avoid
   // corner cases in the lowering.
@@ -7688,19 +7542,6 @@
     // There should be no constants of vector type (other than undef).
     assert(!isVectorType(Ty));
 
-    // If the operand is a 64 bit constant integer we need to legalize it to a
-    // register in x86-64.
-    if (Traits::Is64Bit) {
-      if (auto *C64 = llvm::dyn_cast<ConstantInteger64>(Const)) {
-        if (!Utils::IsInt(32, C64->getValue())) {
-          if (RegNum.hasValue()) {
-            assert(Traits::getGprForType(IceType_i64, RegNum) == RegNum);
-          }
-          return copyToReg(Const, RegNum);
-        }
-      }
-    }
-
     if (!llvm::dyn_cast<ConstantRelocatable>(Const)) {
       if (isScalarFloatingType(Ty)) {
         // Convert a scalar floating point constant into an explicit memory
@@ -7735,21 +7576,22 @@
   }
 
   if (auto *Var = llvm::dyn_cast<Variable>(From)) {
-    // Check if the variable is guaranteed a physical register. This can happen
-    // either when the variable is pre-colored or when it is assigned infinite
-    // weight.
+    // Check if the variable is guaranteed a physical register. This can
+    // happen either when the variable is pre-colored or when it is assigned
+    // infinite weight.
     bool MustHaveRegister = (Var->hasReg() || Var->mustHaveReg());
     bool MustRematerialize =
         (Var->isRematerializable() && !(Allowed & Legal_Rematerializable));
     // We need a new physical register for the operand if:
     // - Mem is not allowed and Var isn't guaranteed a physical register, or
     // - RegNum is required and Var->getRegNum() doesn't match, or
-    // - Var is a rematerializable variable and rematerializable pass-through is
+    // - Var is a rematerializable variable and rematerializable pass-through
+    // is
     //   not allowed (in which case we need a lea instruction).
     if (MustRematerialize) {
       Variable *NewVar = makeReg(Ty, RegNum);
-      // Since Var is rematerializable, the offset will be added when the lea is
-      // emitted.
+      // Since Var is rematerializable, the offset will be added when the lea
+      // is emitted.
       constexpr Constant *NoOffset = nullptr;
       auto *Mem = X86OperandMem::create(Func, Ty, Var, NoOffset);
       _lea(NewVar, Mem);
@@ -7777,8 +7619,8 @@
   Type Ty = From->getType();
   if (llvm::isa<ConstantUndef>(From)) {
     // Lower undefs to zero.  Another option is to lower undefs to an
-    // uninitialized register; however, using an uninitialized register results
-    // in less predictable code.
+    // uninitialized register; however, using an uninitialized register
+    // results in less predictable code.
     //
     // If in the future the implementation is changed to lower undef values to
     // uninitialized registers, a FakeDef will be needed:
@@ -7794,11 +7636,11 @@
   return From;
 }
 
-/// For the cmp instruction, if Src1 is an immediate, or known to be a physical
-/// register, we can allow Src0 to be a memory operand. Otherwise, Src0 must be
-/// copied into a physical register. (Actually, either Src0 or Src1 can be
-/// chosen for the physical register, but unfortunately we have to commit to one
-/// or the other before register allocation.)
+/// For the cmp instruction, if Src1 is an immediate, or known to be a
+/// physical register, we can allow Src0 to be a memory operand. Otherwise,
+/// Src0 must be copied into a physical register. (Actually, either Src0 or
+/// Src1 can be chosen for the physical register, but unfortunately we have to
+/// commit to one or the other before register allocation.)
 
 Operand *TargetX8632::legalizeSrc0ForCmp(Operand *Src0, Operand *Src1) {
   bool IsSrc1ImmOrReg = false;
@@ -7843,7 +7685,7 @@
 
 Variable *TargetX8632::makeReg(Type Type, RegNumT RegNum) {
   // There aren't any 64-bit integer registers for x86-32.
-  assert(Traits::Is64Bit || Type != IceType_i64);
+  assert(Type != IceType_i64);
   Variable *Reg = Func->makeVariable(Type);
   if (RegNum.hasValue())
     Reg->setRegNum(RegNum);
@@ -7890,14 +7732,7 @@
 }
 
 void TargetX8632::emit(const ConstantInteger64 *C) const {
-  if (!Traits::Is64Bit) {
-    llvm::report_fatal_error("Not expecting to emit 64-bit integers");
-  } else {
-    if (!BuildDefs::dump())
-      return;
-    Ostream &Str = Ctx->getStrEmit();
-    Str << "$" << C->getValue();
-  }
+  llvm::report_fatal_error("Not expecting to emit 64-bit integers");
 }
 
 void TargetX8632::emit(const ConstantFloat *C) const {
@@ -7934,7 +7769,7 @@
   Str << "\t.section\t.rodata." << JumpTable->getSectionName()
       << ",\"a\",@progbits\n"
          "\t.align\t"
-      << typeWidthInBytes(getPointerType()) << "\n"
+      << typeWidthInBytes(IceType_i32) << "\n"
       << JumpTable->getName() << ":";
 
   for (SizeT I = 0; I < JumpTable->getNumTargets(); ++I)
@@ -8009,9 +7844,7 @@
   switch (getFlags().getOutFileType()) {
   case FT_Elf: {
     ELFObjectWriter *Writer = Ctx->getObjectWriter();
-    constexpr FixupKind FK_Abs64 = llvm::ELF::R_X86_64_64;
-    const FixupKind RelocationKind =
-        (getPointerType() == IceType_i32) ? Traits::FK_Abs : FK_Abs64;
+    const FixupKind RelocationKind = Traits::FK_Abs;
     for (const JumpTableData &JT : Ctx->getJumpTables())
       Writer->writeJumpTable(JT, RelocationKind, IsPIC);
   } break;
@@ -8027,7 +7860,7 @@
       Str << "\t.section\t" << Prefix << JT.getSectionName()
           << ",\"a\",@progbits\n"
              "\t.align\t"
-          << typeWidthInBytes(getPointerType()) << "\n"
+          << typeWidthInBytes(IceType_i32) << "\n"
           << JT.getName().toString() << ":";
 
       // On X8664 ILP32 pointers are 32-bit hence the use of .long
@@ -8143,8 +7976,9 @@
 void TargetX8632::_sub_sp(Operand *Adjustment) {
   Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
   _sub(esp, Adjustment);
-  // Add a fake use of the stack pointer, to prevent the stack pointer adustment
-  // from being dead-code eliminated in a function that doesn't return.
+  // Add a fake use of the stack pointer, to prevent the stack pointer
+  // adustment from being dead-code eliminated in a function that doesn't
+  // return.
   Context.insert<InstFakeUse>(esp);
 }
 
@@ -8242,9 +8076,9 @@
 // In some cases, there are x-macros tables for both high-level and low-level
 // instructions/operands that use the same enum key value. The tables are kept
 // separate to maintain a proper separation between abstraction layers. There
-// is a risk that the tables could get out of sync if enum values are reordered
-// or if entries are added or deleted. The following dummy namespaces use
-// static_asserts to ensure everything is kept in sync.
+// is a risk that the tables could get out of sync if enum values are
+// reordered or if entries are added or deleted. The following dummy
+// namespaces use static_asserts to ensure everything is kept in sync.
 
 namespace {
 // Validate the enum values in FCMPX8632_TABLE.
diff --git a/third_party/subzero/src/IceTargetLoweringX8632.h b/third_party/subzero/src/IceTargetLoweringX8632.h
index de77c1b..7b0075d 100644
--- a/third_party/subzero/src/IceTargetLoweringX8632.h
+++ b/third_party/subzero/src/IceTargetLoweringX8632.h
@@ -248,7 +248,7 @@
   }
 
   bool shouldSplitToVariable64On32(Type Ty) const override {
-    return Traits::Is64Bit ? false : Ty == IceType_i64;
+    return Ty == IceType_i64;
   }
 
   SizeT getMinJumpTableSize() const override { return 4; }
@@ -264,23 +264,8 @@
 
   void initNodeForLowering(CfgNode *Node) override;
 
-  template <typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, Operand>::type *
-  loOperand(Operand *Operand);
-  template <typename T = Traits>
-  typename std::enable_if<T::Is64Bit, Operand>::type *loOperand(Operand *) {
-    llvm::report_fatal_error(
-        "Hey, yo! This is x86-64. Watcha doin'? (loOperand)");
-  }
-
-  template <typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, Operand>::type *
-  hiOperand(Operand *Operand);
-  template <typename T = Traits>
-  typename std::enable_if<T::Is64Bit, Operand>::type *hiOperand(Operand *) {
-    llvm::report_fatal_error(
-        "Hey, yo! This is x86-64. Watcha doin'? (hiOperand)");
-  }
+  Operand *loOperand(Operand *Operand);
+  Operand *hiOperand(Operand *Operand);
 
   void addProlog(CfgNode *Node) override;
   void finishArgumentLowering(Variable *Arg, Variable *FramePtr,
@@ -530,9 +515,7 @@
   void _bsr(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Bsr>(Dest, Src0);
   }
-  void _bswap(Variable *SrcDest) {
-    Context.insert<Insts::Bswap>(SrcDest);
-  }
+  void _bswap(Variable *SrcDest) { Context.insert<Insts::Bswap>(SrcDest); }
   void _cbwdq(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Cbwdq>(Dest, Src0);
   }
@@ -547,8 +530,7 @@
   }
   void _cmpxchg(Operand *DestOrAddr, Variable *Eax, Variable *Desired,
                 bool Locked) {
-    Context.insert<Insts::Cmpxchg>(DestOrAddr, Eax, Desired,
-                                                    Locked);
+    Context.insert<Insts::Cmpxchg>(DestOrAddr, Eax, Desired, Locked);
     // Mark eax as possibly modified by cmpxchg.
     Context.insert<InstFakeDef>(Eax, llvm::dyn_cast<Variable>(DestOrAddr));
     _set_dest_redefined();
@@ -556,8 +538,7 @@
   }
   void _cmpxchg8b(X86OperandMem *Addr, Variable *Edx, Variable *Eax,
                   Variable *Ecx, Variable *Ebx, bool Locked) {
-    Context.insert<Insts::Cmpxchg8b>(Addr, Edx, Eax, Ecx, Ebx,
-                                                      Locked);
+    Context.insert<Insts::Cmpxchg8b>(Addr, Edx, Eax, Ecx, Ebx, Locked);
     // Mark edx, and eax as possibly modified by cmpxchg8b.
     Context.insert<InstFakeDef>(Edx);
     _set_dest_redefined();
@@ -566,8 +547,7 @@
     _set_dest_redefined();
     Context.insert<InstFakeUse>(Eax);
   }
-  void _cvt(Variable *Dest, Operand *Src0,
-            Insts::Cvt::CvtVariant Variant) {
+  void _cvt(Variable *Dest, Operand *Src0, Insts::Cvt::CvtVariant Variant) {
     Context.insert<Insts::Cvt>(Dest, Src0, Variant);
   }
   void _round(Variable *Dest, Operand *Src0, Operand *Imm) {
@@ -582,12 +562,8 @@
   void _divss(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Divss>(Dest, Src0);
   }
-  void _fld(Operand *Src0) {
-    Context.insert<Insts::Fld>(Src0);
-  }
-  void _fstp(Variable *Dest) {
-    Context.insert<Insts::Fstp>(Dest);
-  }
+  void _fld(Operand *Src0) { Context.insert<Insts::Fld>(Src0); }
+  void _fstp(Variable *Dest) { Context.insert<Insts::Fstp>(Dest); }
   void _idiv(Variable *Dest, Operand *Src0, Operand *Src1) {
     Context.insert<Insts::Idiv>(Dest, Src0, Src1);
   }
@@ -601,9 +577,7 @@
     Context.insert<Insts::Insertps>(Dest, Src0, Src1);
   }
   void _int3() { Context.insert<Insts::Int3>(); }
-  void _jmp(Operand *Target) {
-    Context.insert<Insts::Jmp>(Target);
-  }
+  void _jmp(Operand *Target) { Context.insert<Insts::Jmp>(Target); }
   void _lea(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Lea>(Dest, Src0);
   }
@@ -620,8 +594,7 @@
   /// If Dest=nullptr is passed in, then a new variable is created, marked as
   /// infinite register allocation weight, and returned through the in/out Dest
   /// argument.
-  Insts::Mov *_mov(Variable *&Dest, Operand *Src0,
-                                    RegNumT RegNum = RegNumT()) {
+  Insts::Mov *_mov(Variable *&Dest, Operand *Src0, RegNumT RegNum = RegNumT()) {
     if (Dest == nullptr)
       Dest = makeReg(Src0->getType(), RegNum);
     return Context.insert<Insts::Mov>(Dest, Src0);
@@ -666,12 +639,8 @@
   void _mulss(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Mulss>(Dest, Src0);
   }
-  void _neg(Variable *SrcDest) {
-    Context.insert<Insts::Neg>(SrcDest);
-  }
-  void _nop(SizeT Variant) {
-    Context.insert<Insts::Nop>(Variant);
-  }
+  void _neg(Variable *SrcDest) { Context.insert<Insts::Neg>(SrcDest); }
+  void _nop(SizeT Variant) { Context.insert<Insts::Nop>(Variant); }
   void _or(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Or>(Dest, Src0);
   }
@@ -701,8 +670,7 @@
   }
   void _pcmpeq(Variable *Dest, Operand *Src0,
                Type ArithmeticTypeOverride = IceType_void) {
-    Context.insert<Insts::Pcmpeq>(Dest, Src0,
-                                                   ArithmeticTypeOverride);
+    Context.insert<Insts::Pcmpeq>(Dest, Src0, ArithmeticTypeOverride);
   }
   void _pcmpgt(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Pcmpgt>(Dest, Src0);
@@ -728,9 +696,7 @@
   void _pmuludq(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Pmuludq>(Dest, Src0);
   }
-  void _pop(Variable *Dest) {
-    Context.insert<Insts::Pop>(Dest);
-  }
+  void _pop(Variable *Dest) { Context.insert<Insts::Pop>(Dest); }
   void _por(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Por>(Dest, Src0);
   }
@@ -770,15 +736,11 @@
   void _psubus(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Psubus>(Dest, Src0);
   }
-  void _push(Operand *Src0) {
-    Context.insert<Insts::Push>(Src0);
-  }
+  void _push(Operand *Src0) { Context.insert<Insts::Push>(Src0); }
   void _pxor(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Pxor>(Dest, Src0);
   }
-  void _ret(Variable *Src0 = nullptr) {
-    Context.insert<Insts::Ret>(Src0);
-  }
+  void _ret(Variable *Src0 = nullptr) { Context.insert<Insts::Ret>(Src0); }
   void _rol(Variable *Dest, Operand *Src0) {
     Context.insert<Insts::Rol>(Dest, Src0);
   }
@@ -962,18 +924,8 @@
   /// Optimizations for idiom recognition.
   bool lowerOptimizeFcmpSelect(const InstFcmp *Fcmp, const InstSelect *Select);
 
-  /// Complains loudly if invoked because the cpu can handle 64-bit types
-  /// natively.
-  template <typename T = Traits>
-  typename std::enable_if<T::Is64Bit, void>::type lowerIcmp64(const InstIcmp *,
-                                                              const Inst *) {
-    llvm::report_fatal_error(
-        "Hey, yo! This is x86-64. Watcha doin'? (lowerIcmp64)");
-  }
   /// x86lowerIcmp64 handles 64-bit icmp lowering.
-  template <typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, void>::type
-  lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer);
+  void lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer);
 
   BoolFolding FoldingInfo;
 
diff --git a/third_party/subzero/src/IceTargetLoweringX8632Traits.h b/third_party/subzero/src/IceTargetLoweringX8632Traits.h
index 9637d63..72ba759 100644
--- a/third_party/subzero/src/IceTargetLoweringX8632Traits.h
+++ b/third_party/subzero/src/IceTargetLoweringX8632Traits.h
@@ -29,7 +29,6 @@
 #include <array>
 
 namespace Ice {
-
 namespace X8632 {
 using namespace ::Ice::X86;
 
@@ -48,7 +47,6 @@
   //      \/_/\/_/\/_____/\/_/  \/_/
   //
   //----------------------------------------------------------------------------
-  static constexpr bool Is64Bit = false;
   static constexpr ::Ice::RegX8632::GPRRegister Last8BitGPR =
       ::Ice::RegX8632::GPRRegister::Encoded_Reg_ebx;
 
diff --git a/third_party/subzero/src/IceTargetLoweringX8664.cpp b/third_party/subzero/src/IceTargetLoweringX8664.cpp
index 09cf4f9..0a48f13 100644
--- a/third_party/subzero/src/IceTargetLoweringX8664.cpp
+++ b/third_party/subzero/src/IceTargetLoweringX8664.cpp
@@ -149,8 +149,7 @@
 // register args to it.
 SizeT getShadowStoreSize() {
 #if defined(_WIN64)
-  static const SizeT ShadowStoreSize =
-      Traits::Is64Bit ? 4 * typeWidthInBytes(Traits::WordType) : 0;
+  static const SizeT ShadowStoreSize = 4 * typeWidthInBytes(Traits::WordType);
   return ShadowStoreSize;
 #else
   return 0;
@@ -163,21 +162,17 @@
 typename BoolFolding::BoolFoldingProducerKind
 BoolFolding::getProducerKind(const Inst *Instr) {
   if (llvm::isa<InstIcmp>(Instr)) {
-    if (Traits::Is64Bit || Instr->getSrc(0)->getType() != IceType_i64)
-      return PK_Icmp32;
-    return PK_Icmp64;
+    return PK_Icmp32;
   }
   if (llvm::isa<InstFcmp>(Instr))
     return PK_Fcmp;
   if (auto *Arith = llvm::dyn_cast<InstArithmetic>(Instr)) {
-    if (Traits::Is64Bit || Arith->getSrc(0)->getType() != IceType_i64) {
-      switch (Arith->getOp()) {
-      default:
-        return PK_None;
-      case InstArithmetic::And:
-      case InstArithmetic::Or:
-        return PK_Arith;
-      }
+    switch (Arith->getOp()) {
+    default:
+      return PK_None;
+    case InstArithmetic::And:
+    case InstArithmetic::Or:
+      return PK_Arith;
     }
   }
   return PK_None; // TODO(stichnot): remove this
@@ -225,7 +220,7 @@
   default:
     return false;
   case PK_Icmp64:
-    return !Traits::Is64Bit;
+    return false;
   case PK_Fcmp:
     return Traits::TableFcmp[llvm::cast<InstFcmp>(Instr)->getCondition()].C2 !=
            CondX86::Br_None;
@@ -402,12 +397,7 @@
   return false;
 }
 
-::Ice::Type TargetX8664::getPointerType() {
-  if (!Traits::Is64Bit) {
-    return ::Ice::IceType_i32;
-  }
-  return ::Ice::IceType_i64;
-}
+::Ice::Type TargetX8664::getPointerType() { return ::Ice::IceType_i64; }
 
 void TargetX8664::translateO2() {
   TimerMarker T(TimerStack::TT_O2, Func);
@@ -756,7 +746,7 @@
         // An InstLoad qualifies unless it uses a 64-bit absolute address,
         // which requires legalization to insert a copy to register.
         // TODO(b/148272103): Fold these after legalization.
-        if (!Traits::Is64Bit || !llvm::isa<Constant>(Load->getLoadAddress())) {
+        if (!llvm::isa<Constant>(Load->getLoadAddress())) {
           LoadDest = Load->getDest();
           constexpr bool DoLegalize = false;
           LoadSrc = formMemoryOperand(Load->getLoadAddress(),
@@ -768,7 +758,6 @@
         // i64 on x86-32).
         Intrinsics::IntrinsicID ID = Intrin->getIntrinsicID();
         if (ID == Intrinsics::AtomicLoad &&
-            (Traits::Is64Bit || Intrin->getDest()->getType() != IceType_i64) &&
             Intrinsics::isMemoryOrderValid(
                 ID, getConstantMemoryOrder(Intrin->getArg(1)))) {
           LoadDest = Intrin->getDest();
@@ -1248,17 +1237,6 @@
                                          size_t BasicFrameOffset,
                                          size_t StackAdjBytes,
                                          size_t &InArgsSizeBytes) {
-  if (!Traits::Is64Bit) {
-    if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) {
-      Variable *Lo = Arg64On32->getLo();
-      Variable *Hi = Arg64On32->getHi();
-      finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, StackAdjBytes,
-                             InArgsSizeBytes);
-      finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, StackAdjBytes,
-                             InArgsSizeBytes);
-      return;
-    }
-  }
   Type Ty = Arg->getType();
   if (isVectorType(Ty)) {
     InArgsSizeBytes = Traits::applyStackAlignment(InArgsSizeBytes);
@@ -1266,7 +1244,6 @@
   Arg->setStackOffset(BasicFrameOffset + InArgsSizeBytes);
   InArgsSizeBytes += typeWidthInBytesOnStack(Ty);
   if (Arg->hasReg()) {
-    assert(Ty != IceType_i64 || Traits::Is64Bit);
     auto *Mem = X86OperandMem::create(
         Func, Ty, FramePtr,
         Ctx->getConstantInt32(Arg->getStackOffset() + StackAdjBytes));
@@ -1331,72 +1308,6 @@
 
 Type TargetX8664::stackSlotType() { return Traits::WordType; }
 
-template <typename T>
-typename std::enable_if<!T::Is64Bit, Operand>::type *
-TargetX8664::loOperand(Operand *Operand) {
-  assert(Operand->getType() == IceType_i64 ||
-         Operand->getType() == IceType_f64);
-  if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
-    return Operand;
-  if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
-    return Var64On32->getLo();
-  if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
-    auto *ConstInt = llvm::dyn_cast<ConstantInteger32>(
-        Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue())));
-    // Check if we need to blind/pool the constant.
-    return legalize(ConstInt);
-  }
-  if (auto *Mem = llvm::dyn_cast<X86OperandMem>(Operand)) {
-    auto *MemOperand = X86OperandMem::create(
-        Func, IceType_i32, Mem->getBase(), Mem->getOffset(), Mem->getIndex(),
-        Mem->getShift(), Mem->getSegmentRegister(), Mem->getIsRebased());
-    // Test if we should randomize or pool the offset, if so randomize it or
-    // pool it then create mem operand with the blinded/pooled constant.
-    // Otherwise, return the mem operand as ordinary mem operand.
-    return legalize(MemOperand);
-  }
-  llvm_unreachable("Unsupported operand type");
-  return nullptr;
-}
-
-template <typename T>
-typename std::enable_if<!T::Is64Bit, Operand>::type *
-TargetX8664::hiOperand(Operand *Operand) {
-  assert(Operand->getType() == IceType_i64 ||
-         Operand->getType() == IceType_f64);
-  if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
-    return Operand;
-  if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
-    return Var64On32->getHi();
-  if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
-    auto *ConstInt = llvm::dyn_cast<ConstantInteger32>(
-        Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue() >> 32)));
-    // Check if we need to blind/pool the constant.
-    return legalize(ConstInt);
-  }
-  if (auto *Mem = llvm::dyn_cast<X86OperandMem>(Operand)) {
-    Constant *Offset = Mem->getOffset();
-    if (Offset == nullptr) {
-      Offset = Ctx->getConstantInt32(4);
-    } else if (auto *IntOffset = llvm::dyn_cast<ConstantInteger32>(Offset)) {
-      Offset = Ctx->getConstantInt32(4 + IntOffset->getValue());
-    } else if (auto *SymOffset = llvm::dyn_cast<ConstantRelocatable>(Offset)) {
-      assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4));
-      Offset =
-          Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName());
-    }
-    auto *MemOperand = X86OperandMem::create(
-        Func, IceType_i32, Mem->getBase(), Offset, Mem->getIndex(),
-        Mem->getShift(), Mem->getSegmentRegister(), Mem->getIsRebased());
-    // Test if the Offset is an eligible i32 constants for randomization and
-    // pooling. Blind/pool it if it is. Otherwise return as oridinary mem
-    // operand.
-    return legalize(MemOperand);
-  }
-  llvm_unreachable("Unsupported operand type");
-  return nullptr;
-}
-
 SmallBitVector TargetX8664::getRegisterSet(RegSetMask Include,
                                            RegSetMask Exclude) const {
   return Traits::getRegisterSet(getFlags(), Include, Exclude);
@@ -1456,7 +1367,7 @@
     // Non-constant sizes need to be adjusted to the next highest multiple of
     // the required alignment at runtime.
     Variable *T = nullptr;
-    if (Traits::Is64Bit && TotalSize->getType() != IceType_i64) {
+    if (TotalSize->getType() != IceType_i64) {
       T = makeReg(IceType_i64);
       _movzx(T, TotalSize);
     } else {
@@ -1620,8 +1531,7 @@
     }
   }
   // Lea optimization only works for i16 and i32 types, not i8.
-  if (Ty != IceType_i32 && !(Traits::Is64Bit && Ty == IceType_i64) &&
-      (Count3 || Count5 || Count9))
+  if (Ty != IceType_i32 && Ty != IceType_i64 && (Count3 || Count5 || Count9))
     return false;
   // Limit the number of lea/shl operations for a single multiply, to a
   // somewhat arbitrary choice of 3.
@@ -1896,129 +1806,6 @@
     assert(SwapCount <= 1);
     (void)SwapCount;
   }
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
-    // These x86-32 helper-call-involved instructions are lowered in this
-    // separate switch. This is because loOperand() and hiOperand() may insert
-    // redundant instructions for constant blinding and pooling. Such redundant
-    // instructions will fail liveness analysis under -Om1 setting. And,
-    // actually these arguments do not need to be processed with loOperand()
-    // and hiOperand() to be used.
-    switch (Instr->getOp()) {
-    case InstArithmetic::Udiv:
-    case InstArithmetic::Sdiv:
-    case InstArithmetic::Urem:
-    case InstArithmetic::Srem:
-      llvm::report_fatal_error("Helper call was expected");
-      return;
-    default:
-      break;
-    }
-
-    auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-    auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-    Operand *Src0Lo = loOperand(Src0);
-    Operand *Src0Hi = hiOperand(Src0);
-    Operand *Src1Lo = loOperand(Src1);
-    Operand *Src1Hi = hiOperand(Src1);
-    Variable *T_Lo = nullptr, *T_Hi = nullptr;
-    switch (Instr->getOp()) {
-    case InstArithmetic::_num:
-      llvm_unreachable("Unknown arithmetic operator");
-      break;
-    case InstArithmetic::Add:
-      _mov(T_Lo, Src0Lo);
-      _add(T_Lo, Src1Lo);
-      _mov(DestLo, T_Lo);
-      _mov(T_Hi, Src0Hi);
-      _adc(T_Hi, Src1Hi);
-      _mov(DestHi, T_Hi);
-      break;
-    case InstArithmetic::And:
-      _mov(T_Lo, Src0Lo);
-      _and(T_Lo, Src1Lo);
-      _mov(DestLo, T_Lo);
-      _mov(T_Hi, Src0Hi);
-      _and(T_Hi, Src1Hi);
-      _mov(DestHi, T_Hi);
-      break;
-    case InstArithmetic::Or:
-      _mov(T_Lo, Src0Lo);
-      _or(T_Lo, Src1Lo);
-      _mov(DestLo, T_Lo);
-      _mov(T_Hi, Src0Hi);
-      _or(T_Hi, Src1Hi);
-      _mov(DestHi, T_Hi);
-      break;
-    case InstArithmetic::Xor:
-      _mov(T_Lo, Src0Lo);
-      _xor(T_Lo, Src1Lo);
-      _mov(DestLo, T_Lo);
-      _mov(T_Hi, Src0Hi);
-      _xor(T_Hi, Src1Hi);
-      _mov(DestHi, T_Hi);
-      break;
-    case InstArithmetic::Sub:
-      _mov(T_Lo, Src0Lo);
-      _sub(T_Lo, Src1Lo);
-      _mov(DestLo, T_Lo);
-      _mov(T_Hi, Src0Hi);
-      _sbb(T_Hi, Src1Hi);
-      _mov(DestHi, T_Hi);
-      break;
-    case InstArithmetic::Mul: {
-      Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr;
-      Variable *T_4Lo = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
-      Variable *T_4Hi = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
-      // gcc does the following:
-      // a=b*c ==>
-      //   t1 = b.hi; t1 *=(imul) c.lo
-      //   t2 = c.hi; t2 *=(imul) b.lo
-      //   t3:eax = b.lo
-      //   t4.hi:edx,t4.lo:eax = t3:eax *(mul) c.lo
-      //   a.lo = t4.lo
-      //   t4.hi += t1
-      //   t4.hi += t2
-      //   a.hi = t4.hi
-      // The mul instruction cannot take an immediate operand.
-      Src1Lo = legalize(Src1Lo, Legal_Reg | Legal_Mem);
-      _mov(T_1, Src0Hi);
-      _imul(T_1, Src1Lo);
-      _mov(T_3, Src0Lo, Traits::RegisterSet::Reg_eax);
-      _mul(T_4Lo, T_3, Src1Lo);
-      // The mul instruction produces two dest variables, edx:eax. We create a
-      // fake definition of edx to account for this.
-      Context.insert<InstFakeDef>(T_4Hi, T_4Lo);
-      Context.insert<InstFakeUse>(T_4Hi);
-      _mov(DestLo, T_4Lo);
-      _add(T_4Hi, T_1);
-      _mov(T_2, Src1Hi);
-      Src0Lo = legalize(Src0Lo, Legal_Reg | Legal_Mem);
-      _imul(T_2, Src0Lo);
-      _add(T_4Hi, T_2);
-      _mov(DestHi, T_4Hi);
-    } break;
-    case InstArithmetic::Shl:
-    case InstArithmetic::Lshr:
-    case InstArithmetic::Ashr:
-      lowerShift64(Instr->getOp(), Src0Lo, Src0Hi, Src1Lo, DestLo, DestHi);
-      break;
-    case InstArithmetic::Fadd:
-    case InstArithmetic::Fsub:
-    case InstArithmetic::Fmul:
-    case InstArithmetic::Fdiv:
-    case InstArithmetic::Frem:
-      llvm_unreachable("FP instruction with i64 type");
-      break;
-    case InstArithmetic::Udiv:
-    case InstArithmetic::Sdiv:
-    case InstArithmetic::Urem:
-    case InstArithmetic::Srem:
-      llvm_unreachable("Call-helper-involved instruction for i64 type \
-                       should have already been handled before");
-      break;
-    }
-    return;
-  }
   if (isVectorType(Ty)) {
     // TODO: Trap on integer divide and integer modulo by zero. See:
     // https://code.google.com/p/nativeclient/issues/detail?id=3899
@@ -2175,8 +1962,7 @@
     llvm_unreachable("Unknown arithmetic operator");
     break;
   case InstArithmetic::Add: {
-    const bool ValidType =
-        Ty == IceType_i32 || (Ty == IceType_i64 && Traits::Is64Bit);
+    const bool ValidType = Ty == IceType_i32 || Ty == IceType_i64;
     auto *Const = llvm::dyn_cast<Constant>(Instr->getSrc(1));
     const bool ValidKind =
         Const != nullptr && (llvm::isa<ConstantInteger32>(Const) ||
@@ -2682,12 +2468,8 @@
       ReturnReg = makeReg(DestTy, Traits::RegisterSet::Reg_eax);
       break;
     case IceType_i64:
-      if (Traits::Is64Bit) {
-        ReturnReg = makeReg(IceType_i64, Traits::getRaxOrDie());
-      } else {
-        ReturnReg = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
-        ReturnRegHi = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
-      }
+      ReturnReg = makeReg(IceType_i64, Traits::getRaxOrDie());
+
       break;
     case IceType_f32:
     case IceType_f64:
@@ -2740,20 +2522,8 @@
   } else {
     assert(isScalarIntegerType(DestTy));
     assert(ReturnReg && "Integer type requires a return register");
-    if (DestTy == IceType_i64 && !Traits::Is64Bit) {
-      assert(ReturnRegHi && "64-bit type requires two return registers");
-      auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
-      Variable *DestLo = Dest64On32->getLo();
-      Variable *DestHi = Dest64On32->getHi();
-      _mov(Tmp, ReturnReg);
-      _mov(DestLo, Tmp);
-      Variable *TmpHi = nullptr;
-      _mov(TmpHi, ReturnRegHi);
-      _mov(DestHi, TmpHi);
-    } else {
-      _mov(Tmp, ReturnReg);
-      _mov(Dest, Tmp);
-    }
+    _mov(Tmp, ReturnReg);
+    _mov(Dest, Tmp);
   }
 }
 
@@ -2796,28 +2566,6 @@
         _psra(T, ShiftConstant);
         _movp(Dest, T);
       }
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
-      // t1=movsx src; t2=t1; t2=sar t2, 31; dst.lo=t1; dst.hi=t2
-      Constant *Shift = Ctx->getConstantInt32(31);
-      auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-      auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-      Variable *T_Lo = makeReg(DestLo->getType());
-      if (Src0RM->getType() == IceType_i32) {
-        _mov(T_Lo, Src0RM);
-      } else if (Src0RM->getType() == IceType_i1) {
-        _movzx(T_Lo, Src0RM);
-        _shl(T_Lo, Shift);
-        _sar(T_Lo, Shift);
-      } else {
-        _movsx(T_Lo, Src0RM);
-      }
-      _mov(DestLo, T_Lo);
-      Variable *T_Hi = nullptr;
-      _mov(T_Hi, T_Lo);
-      if (Src0RM->getType() != IceType_i1)
-        // For i1, the sar instruction is already done above.
-        _sar(T_Hi, Shift);
-      _mov(DestHi, T_Hi);
     } else if (Src0RM->getType() == IceType_i1) {
       // t1 = src
       // shl t1, dst_bitwidth - 1
@@ -2853,19 +2601,6 @@
       _movp(T, Src0RM);
       _pand(T, OneMask);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
-      // t1=movzx src; dst.lo=t1; dst.hi=0
-      Constant *Zero = Ctx->getConstantZero(IceType_i32);
-      auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-      auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-      Variable *Tmp = makeReg(DestLo->getType());
-      if (Src0RM->getType() == IceType_i32) {
-        _mov(Tmp, Src0RM);
-      } else {
-        _movzx(Tmp, Src0RM);
-      }
-      _mov(DestLo, Tmp);
-      _mov(DestHi, Zero);
     } else if (Src0RM->getType() == IceType_i1) {
       // t = Src0RM; Dest = t
       Variable *T = nullptr;
@@ -2873,7 +2608,6 @@
         _mov(T, Src0RM);
       } else {
         assert(DestTy != IceType_i1);
-        assert(Traits::Is64Bit || DestTy != IceType_i64);
         // Use 32-bit for both 16-bit and 32-bit, since 32-bit ops are shorter.
         // In x86-64 we need to widen T to 64-bits to ensure that T -- if
         // written to the stack (i.e., in -Om1) will be fully zero-extended.
@@ -2902,8 +2636,6 @@
     } else if (DestTy == IceType_i1 || DestTy == IceType_i8) {
       // Make sure we truncate from and into valid registers.
       Operand *Src0 = legalizeUndef(Instr->getSrc(0));
-      if (!Traits::Is64Bit && Src0->getType() == IceType_i64)
-        Src0 = loOperand(Src0);
       Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
       Variable *T = copyToReg8(Src0RM);
       if (DestTy == IceType_i1)
@@ -2911,8 +2643,6 @@
       _mov(Dest, T);
     } else {
       Operand *Src0 = legalizeUndef(Instr->getSrc(0));
-      if (!Traits::Is64Bit && Src0->getType() == IceType_i64)
-        Src0 = loOperand(Src0);
       Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
       // t1 = trunc Src0RM; Dest = t1
       Variable *T = makeReg(DestTy);
@@ -2938,13 +2668,11 @@
       Variable *T = makeReg(DestTy);
       _cvt(T, Src0R, Insts::Cvt::Tps2dq);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
-      llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem);
       // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && DestTy == IceType_i64) {
+      if (DestTy == IceType_i64) {
         T_1 = makeReg(IceType_i64);
       } else {
         assert(DestTy != IceType_i64);
@@ -2967,15 +2695,14 @@
   case InstCast::Fptoui:
     if (isVectorType(DestTy)) {
       llvm::report_fatal_error("Helper call was expected");
-    } else if (DestTy == IceType_i64 ||
-               (!Traits::Is64Bit && DestTy == IceType_i32)) {
+    } else if (DestTy == IceType_i64) {
       llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem);
       // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
       assert(DestTy != IceType_i64);
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && DestTy == IceType_i32) {
+      if (DestTy == IceType_i32) {
         T_1 = makeReg(IceType_i64);
       } else {
         assert(DestTy != IceType_i32);
@@ -3002,14 +2729,12 @@
       Variable *T = makeReg(DestTy);
       _cvt(T, Src0R, Insts::Cvt::Dq2ps);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && Instr->getSrc(0)->getType() == IceType_i64) {
-      llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Instr->getSrc(0), Legal_Reg | Legal_Mem);
       // Sign-extend the operand.
       // t1.i32 = movsx Src0RM; t2 = Cvt t1.i32; Dest = t2
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && Src0RM->getType() == IceType_i64) {
+      if (Src0RM->getType() == IceType_i64) {
         T_1 = makeReg(IceType_i64);
       } else {
         assert(Src0RM->getType() != IceType_i64);
@@ -3028,19 +2753,17 @@
     Operand *Src0 = Instr->getSrc(0);
     if (isVectorType(Src0->getType())) {
       llvm::report_fatal_error("Helper call was expected");
-    } else if (Src0->getType() == IceType_i64 ||
-               (!Traits::Is64Bit && Src0->getType() == IceType_i32)) {
+    } else if (Src0->getType() == IceType_i64) {
       llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
       // Zero-extend the operand.
       // t1.i32 = movzx Src0RM; t2 = Cvt t1.i32; Dest = t2
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && Src0RM->getType() == IceType_i32) {
+      if (Src0RM->getType() == IceType_i32) {
         T_1 = makeReg(IceType_i64);
       } else {
         assert(Src0RM->getType() != IceType_i64);
-        assert(Traits::Is64Bit || Src0RM->getType() != IceType_i32);
         T_1 = makeReg(IceType_i32);
       }
       Variable *T_2 = makeReg(DestTy);
@@ -3078,86 +2801,17 @@
     } break;
     case IceType_i64: {
       assert(Src0->getType() == IceType_f64);
-      if (Traits::Is64Bit) {
-        Variable *Src0R = legalizeToReg(Src0);
-        Variable *T = makeReg(IceType_i64);
-        _movd(T, Src0R);
-        _mov(Dest, T);
-      } else {
-        Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
-        // a.i64 = bitcast b.f64 ==>
-        //   s.f64 = spill b.f64
-        //   t_lo.i32 = lo(s.f64)
-        //   a_lo.i32 = t_lo.i32
-        //   t_hi.i32 = hi(s.f64)
-        //   a_hi.i32 = t_hi.i32
-        Operand *SpillLo, *SpillHi;
-        if (auto *Src0Var = llvm::dyn_cast<Variable>(Src0RM)) {
-          Variable *Spill = Func->makeVariable(IceType_f64);
-          Spill->setLinkedTo(Src0Var);
-          Spill->setMustNotHaveReg();
-          _movq(Spill, Src0RM);
-          SpillLo = Traits::VariableSplit::create(Func, Spill,
-                                                  Traits::VariableSplit::Low);
-          SpillHi = Traits::VariableSplit::create(Func, Spill,
-                                                  Traits::VariableSplit::High);
-        } else {
-          SpillLo = loOperand(Src0RM);
-          SpillHi = hiOperand(Src0RM);
-        }
-
-        auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-        auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-        Variable *T_Lo = makeReg(IceType_i32);
-        Variable *T_Hi = makeReg(IceType_i32);
-
-        _mov(T_Lo, SpillLo);
-        _mov(DestLo, T_Lo);
-        _mov(T_Hi, SpillHi);
-        _mov(DestHi, T_Hi);
-      }
+      Variable *Src0R = legalizeToReg(Src0);
+      Variable *T = makeReg(IceType_i64);
+      _movd(T, Src0R);
+      _mov(Dest, T);
     } break;
     case IceType_f64: {
       assert(Src0->getType() == IceType_i64);
-      if (Traits::Is64Bit) {
-        Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
-        Variable *T = makeReg(IceType_f64);
-        _movd(T, Src0RM);
-        _mov(Dest, T);
-      } else {
-        Src0 = legalize(Src0);
-        if (llvm::isa<X86OperandMem>(Src0)) {
-          Variable *T = makeReg(DestTy);
-          _movq(T, Src0);
-          _movq(Dest, T);
-          break;
-        }
-        // a.f64 = bitcast b.i64 ==>
-        //   t_lo.i32 = b_lo.i32
-        //   FakeDef(s.f64)
-        //   lo(s.f64) = t_lo.i32
-        //   t_hi.i32 = b_hi.i32
-        //   hi(s.f64) = t_hi.i32
-        //   a.f64 = s.f64
-        Variable *Spill = Func->makeVariable(IceType_f64);
-        Spill->setLinkedTo(Dest);
-        Spill->setMustNotHaveReg();
-
-        Variable *T_Lo = nullptr, *T_Hi = nullptr;
-        auto *SpillLo = Traits::VariableSplit::create(
-            Func, Spill, Traits::VariableSplit::Low);
-        auto *SpillHi = Traits::VariableSplit::create(
-            Func, Spill, Traits::VariableSplit::High);
-        _mov(T_Lo, loOperand(Src0));
-        // Technically, the Spill is defined after the _store happens, but
-        // SpillLo is considered a "use" of Spill so define Spill before it is
-        // used.
-        Context.insert<InstFakeDef>(Spill);
-        _store(T_Lo, SpillLo);
-        _mov(T_Hi, hiOperand(Src0));
-        _store(T_Hi, SpillHi);
-        _movq(Dest, Spill);
-      }
+      Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
+      Variable *T = makeReg(IceType_f64);
+      _movd(T, Src0RM);
+      _mov(Dest, T);
     } break;
     case IceType_v8i1: {
       llvm::report_fatal_error("Helper call was expected");
@@ -3466,11 +3120,6 @@
     return;
   }
 
-  if (!Traits::Is64Bit && Src0->getType() == IceType_i64) {
-    lowerIcmp64(Icmp, Consumer);
-    return;
-  }
-
   // cmp b, c
   if (isZero(Src1)) {
     switch (Icmp->getCondition()) {
@@ -3605,151 +3254,6 @@
   eliminateNextVectorSextInstruction(Dest);
 }
 
-template <typename T>
-typename std::enable_if<!T::Is64Bit, void>::type
-TargetX8664::lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer) {
-  // a=icmp cond, b, c ==> cmp b,c; a=1; br cond,L1; FakeUse(a); a=0; L1:
-  Operand *Src0 = legalize(Icmp->getSrc(0));
-  Operand *Src1 = legalize(Icmp->getSrc(1));
-  Variable *Dest = Icmp->getDest();
-  InstIcmp::ICond Condition = Icmp->getCondition();
-  assert(static_cast<size_t>(Condition) < Traits::TableIcmp64Size);
-  Operand *Src0LoRM = nullptr;
-  Operand *Src0HiRM = nullptr;
-  // Legalize the portions of Src0 that are going to be needed.
-  if (isZero(Src1)) {
-    switch (Condition) {
-    default:
-      llvm_unreachable("unexpected condition");
-      break;
-    // These two are not optimized, so we fall through to the general case,
-    // which needs the upper and lower halves legalized.
-    case InstIcmp::Sgt:
-    case InstIcmp::Sle:
-    // These four compare after performing an "or" of the high and low half, so
-    // they need the upper and lower halves legalized.
-    case InstIcmp::Eq:
-    case InstIcmp::Ule:
-    case InstIcmp::Ne:
-    case InstIcmp::Ugt:
-      Src0LoRM = legalize(loOperand(Src0), Legal_Reg | Legal_Mem);
-    // These two test only the high half's sign bit, so they need only
-    // the upper half legalized.
-    case InstIcmp::Sge:
-    case InstIcmp::Slt:
-      Src0HiRM = legalize(hiOperand(Src0), Legal_Reg | Legal_Mem);
-      break;
-
-    // These two move constants and hence need no legalization.
-    case InstIcmp::Uge:
-    case InstIcmp::Ult:
-      break;
-    }
-  } else {
-    Src0LoRM = legalize(loOperand(Src0), Legal_Reg | Legal_Mem);
-    Src0HiRM = legalize(hiOperand(Src0), Legal_Reg | Legal_Mem);
-  }
-  // Optimize comparisons with zero.
-  if (isZero(Src1)) {
-    Constant *SignMask = Ctx->getConstantInt32(0x80000000);
-    Variable *Temp = nullptr;
-    switch (Condition) {
-    default:
-      llvm_unreachable("unexpected condition");
-      break;
-    case InstIcmp::Eq:
-    case InstIcmp::Ule:
-      // Mov Src0HiRM first, because it was legalized most recently, and will
-      // sometimes avoid a move before the OR.
-      _mov(Temp, Src0HiRM);
-      _or(Temp, Src0LoRM);
-      Context.insert<InstFakeUse>(Temp);
-      setccOrConsumer(CondX86::Br_e, Dest, Consumer);
-      return;
-    case InstIcmp::Ne:
-    case InstIcmp::Ugt:
-      // Mov Src0HiRM first, because it was legalized most recently, and will
-      // sometimes avoid a move before the OR.
-      _mov(Temp, Src0HiRM);
-      _or(Temp, Src0LoRM);
-      Context.insert<InstFakeUse>(Temp);
-      setccOrConsumer(CondX86::Br_ne, Dest, Consumer);
-      return;
-    case InstIcmp::Uge:
-      movOrConsumer(true, Dest, Consumer);
-      return;
-    case InstIcmp::Ult:
-      movOrConsumer(false, Dest, Consumer);
-      return;
-    case InstIcmp::Sgt:
-      break;
-    case InstIcmp::Sge:
-      _test(Src0HiRM, SignMask);
-      setccOrConsumer(CondX86::Br_e, Dest, Consumer);
-      return;
-    case InstIcmp::Slt:
-      _test(Src0HiRM, SignMask);
-      setccOrConsumer(CondX86::Br_ne, Dest, Consumer);
-      return;
-    case InstIcmp::Sle:
-      break;
-    }
-  }
-  // Handle general compares.
-  Operand *Src1LoRI = legalize(loOperand(Src1), Legal_Reg | Legal_Imm);
-  Operand *Src1HiRI = legalize(hiOperand(Src1), Legal_Reg | Legal_Imm);
-  if (Consumer == nullptr) {
-    Constant *Zero = Ctx->getConstantInt(Dest->getType(), 0);
-    Constant *One = Ctx->getConstantInt(Dest->getType(), 1);
-    InstX86Label *LabelFalse = InstX86Label::create(Func, this);
-    InstX86Label *LabelTrue = InstX86Label::create(Func, this);
-    _mov(Dest, One);
-    _cmp(Src0HiRM, Src1HiRI);
-    if (Traits::TableIcmp64[Condition].C1 != CondX86::Br_None)
-      _br(Traits::TableIcmp64[Condition].C1, LabelTrue);
-    if (Traits::TableIcmp64[Condition].C2 != CondX86::Br_None)
-      _br(Traits::TableIcmp64[Condition].C2, LabelFalse);
-    _cmp(Src0LoRM, Src1LoRI);
-    _br(Traits::TableIcmp64[Condition].C3, LabelTrue);
-    Context.insert(LabelFalse);
-    _redefined(_mov(Dest, Zero));
-    Context.insert(LabelTrue);
-    return;
-  }
-  if (const auto *Br = llvm::dyn_cast<InstBr>(Consumer)) {
-    _cmp(Src0HiRM, Src1HiRI);
-    if (Traits::TableIcmp64[Condition].C1 != CondX86::Br_None)
-      _br(Traits::TableIcmp64[Condition].C1, Br->getTargetTrue());
-    if (Traits::TableIcmp64[Condition].C2 != CondX86::Br_None)
-      _br(Traits::TableIcmp64[Condition].C2, Br->getTargetFalse());
-    _cmp(Src0LoRM, Src1LoRI);
-    _br(Traits::TableIcmp64[Condition].C3, Br->getTargetTrue(),
-        Br->getTargetFalse());
-    return;
-  }
-  if (auto *Select = llvm::dyn_cast<InstSelect>(Consumer)) {
-    Operand *SrcT = Select->getTrueOperand();
-    Operand *SrcF = Select->getFalseOperand();
-    Variable *SelectDest = Select->getDest();
-    InstX86Label *LabelFalse = InstX86Label::create(Func, this);
-    InstX86Label *LabelTrue = InstX86Label::create(Func, this);
-    lowerMove(SelectDest, SrcT, false);
-    _cmp(Src0HiRM, Src1HiRI);
-    if (Traits::TableIcmp64[Condition].C1 != CondX86::Br_None)
-      _br(Traits::TableIcmp64[Condition].C1, LabelTrue);
-    if (Traits::TableIcmp64[Condition].C2 != CondX86::Br_None)
-      _br(Traits::TableIcmp64[Condition].C2, LabelFalse);
-    _cmp(Src0LoRM, Src1LoRI);
-    _br(Traits::TableIcmp64[Condition].C3, LabelTrue);
-    Context.insert(LabelFalse);
-    static constexpr bool IsRedefinition = true;
-    lowerMove(SelectDest, SrcF, IsRedefinition);
-    Context.insert(LabelTrue);
-    return;
-  }
-  llvm::report_fatal_error("Unexpected consumer type");
-}
-
 void TargetX8664::setccOrConsumer(BrCond Condition, Variable *Dest,
                                   const Inst *Consumer) {
   if (Consumer == nullptr) {
@@ -3860,8 +3364,7 @@
     ElementToInsertNotLegalized = Expanded;
   }
 
-  if (Ty == IceType_v8i16 || Ty == IceType_v8i1 ||
-      InstructionSet >= SSE4_1) {
+  if (Ty == IceType_v8i16 || Ty == IceType_v8i1 || InstructionSet >= SSE4_1) {
     // Use insertps, pinsrb, pinsrw, or pinsrd.
     Operand *ElementRM =
         legalize(ElementToInsertNotLegalized, Legal_Reg | Legal_Mem);
@@ -4041,25 +3544,6 @@
       return;
     }
     Variable *Dest = Instr->getDest();
-    if (!Traits::Is64Bit) {
-      if (auto *Dest64On32 = llvm::dyn_cast<Variable64On32>(Dest)) {
-        // Follow what GCC does and use a movq instead of what lowerLoad()
-        // normally does (split the load into two). Thus, this skips
-        // load/arithmetic op folding. Load/arithmetic folding can't happen
-        // anyway, since this is x86-32 and integer arithmetic only happens on
-        // 32-bit quantities.
-        Variable *T = makeReg(IceType_f64);
-        X86OperandMem *Addr = formMemoryOperand(Instr->getArg(0), IceType_f64);
-        _movq(T, Addr);
-        // Then cast the bits back out of the XMM register to the i64 Dest.
-        auto *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T);
-        lowerCast(Cast);
-        // Make sure that the atomic load isn't elided when unused.
-        Context.insert<InstFakeUse>(Dest64On32->getLo());
-        Context.insert<InstFakeUse>(Dest64On32->getHi());
-        return;
-      }
-    }
     auto *Load = InstLoad::create(Func, Dest, Instr->getArg(0));
     lowerLoad(Load);
     // Make sure the atomic load isn't elided when unused, by adding a FakeUse.
@@ -4091,19 +3575,6 @@
     // it visible.
     Operand *Value = Instr->getArg(0);
     Operand *Ptr = Instr->getArg(1);
-    if (!Traits::Is64Bit && Value->getType() == IceType_i64) {
-      // Use a movq instead of what lowerStore() normally does (split the store
-      // into two), following what GCC does. Cast the bits from int -> to an
-      // xmm register first.
-      Variable *T = makeReg(IceType_f64);
-      auto *Cast = InstCast::create(Func, InstCast::Bitcast, T, Value);
-      lowerCast(Cast);
-      // Then store XMM w/ a movq.
-      X86OperandMem *Addr = formMemoryOperand(Ptr, IceType_f64);
-      _storeq(T, Addr);
-      _mfence();
-      return;
-    }
     auto *Store = InstStore::create(Func, Value, Ptr);
     lowerStore(Store);
     _mfence();
@@ -4112,20 +3583,8 @@
   case Intrinsics::Bswap: {
     Variable *Dest = Instr->getDest();
     Operand *Val = Instr->getArg(0);
-    // In 32-bit mode, bswap only works on 32-bit arguments, and the argument
-    // must be a register. Use rotate left for 16-bit bswap.
-    if (!Traits::Is64Bit && Val->getType() == IceType_i64) {
-      Val = legalizeUndef(Val);
-      Variable *T_Lo = legalizeToReg(loOperand(Val));
-      Variable *T_Hi = legalizeToReg(hiOperand(Val));
-      auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-      auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-      _bswap(T_Lo);
-      _bswap(T_Hi);
-      _mov(DestLo, T_Hi);
-      _mov(DestHi, T_Lo);
-    } else if ((Traits::Is64Bit && Val->getType() == IceType_i64) ||
-               Val->getType() == IceType_i32) {
+    // Use rotate left for 16-bit bswap.
+    if (Val->getType() == IceType_i64 || Val->getType() == IceType_i32) {
       Variable *T = legalizeToReg(Val);
       _bswap(T);
       _mov(Dest, T);
@@ -4147,21 +3606,17 @@
     Type ValTy = Val->getType();
     assert(ValTy == IceType_i32 || ValTy == IceType_i64);
 
-    if (!Traits::Is64Bit) {
-      T = Dest;
-    } else {
-      T = makeReg(IceType_i64);
-      if (ValTy == IceType_i32) {
-        // in x86-64, __popcountsi2 is not defined, so we cheat a bit by
-        // converting it to a 64-bit value, and using ctpop_i64. _movzx should
-        // ensure we will not have any bits set on Val's upper 32 bits.
-        Variable *V = makeReg(IceType_i64);
-        Operand *ValRM = legalize(Val, Legal_Reg | Legal_Mem);
-        _movzx(V, ValRM);
-        Val = V;
-      }
-      ValTy = IceType_i64;
+    T = makeReg(IceType_i64);
+    if (ValTy == IceType_i32) {
+      // in x86-64, __popcountsi2 is not defined, so we cheat a bit by
+      // converting it to a 64-bit value, and using ctpop_i64. _movzx should
+      // ensure we will not have any bits set on Val's upper 32 bits.
+      Variable *V = makeReg(IceType_i64);
+      Operand *ValRM = legalize(Val, Legal_Reg | Legal_Mem);
+      _movzx(V, ValRM);
+      Val = V;
     }
+    ValTy = IceType_i64;
 
     InstCall *Call =
         makeHelperCall(ValTy == IceType_i32 ? RuntimeHelper::H_call_ctpop_i32
@@ -4174,48 +3629,33 @@
     // (in 64-bit mode). Thus, clear the upper bits of the dest just in case
     // the user doesn't do that in the IR. If the user does that in the IR,
     // then this zero'ing instruction is dead and gets optimized out.
-    if (!Traits::Is64Bit) {
-      assert(T == Dest);
-      if (Val->getType() == IceType_i64) {
-        auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-        Constant *Zero = Ctx->getConstantZero(IceType_i32);
-        _mov(DestHi, Zero);
-      }
-    } else {
-      assert(Val->getType() == IceType_i64);
-      // T is 64 bit. It needs to be copied to dest. We need to:
-      //
-      // T_1.32 = trunc T.64 to i32
-      // T_2.64 = zext T_1.32 to i64
-      // Dest.<<right_size>> = T_2.<<right_size>>
-      //
-      // which ensures the upper 32 bits will always be cleared. Just doing a
-      //
-      // mov Dest.32 = trunc T.32 to i32
-      //
-      // is dangerous because there's a chance the compiler will optimize this
-      // copy out. To use _movzx we need two new registers (one 32-, and
-      // another 64-bit wide.)
-      Variable *T_1 = makeReg(IceType_i32);
-      _mov(T_1, T);
-      Variable *T_2 = makeReg(IceType_i64);
-      _movzx(T_2, T_1);
-      _mov(Dest, T_2);
-    }
+    assert(Val->getType() == IceType_i64);
+    // T is 64 bit. It needs to be copied to dest. We need to:
+    //
+    // T_1.32 = trunc T.64 to i32
+    // T_2.64 = zext T_1.32 to i64
+    // Dest.<<right_size>> = T_2.<<right_size>>
+    //
+    // which ensures the upper 32 bits will always be cleared. Just doing a
+    //
+    // mov Dest.32 = trunc T.32 to i32
+    //
+    // is dangerous because there's a chance the compiler will optimize this
+    // copy out. To use _movzx we need two new registers (one 32-, and
+    // another 64-bit wide.)
+    Variable *T_1 = makeReg(IceType_i32);
+    _mov(T_1, T);
+    Variable *T_2 = makeReg(IceType_i64);
+    _movzx(T_2, T_1);
+    _mov(Dest, T_2);
     return;
   }
   case Intrinsics::Ctlz: {
     // The "is zero undef" parameter is ignored and we always return a
     // well-defined value.
     Operand *Val = legalize(Instr->getArg(0));
-    Operand *FirstVal;
+    Operand *FirstVal = Val;
     Operand *SecondVal = nullptr;
-    if (!Traits::Is64Bit && Val->getType() == IceType_i64) {
-      FirstVal = loOperand(Val);
-      SecondVal = hiOperand(Val);
-    } else {
-      FirstVal = Val;
-    }
     constexpr bool IsCttz = false;
     lowerCountZeros(IsCttz, Val->getType(), Instr->getDest(), FirstVal,
                     SecondVal);
@@ -4225,14 +3665,8 @@
     // The "is zero undef" parameter is ignored and we always return a
     // well-defined value.
     Operand *Val = legalize(Instr->getArg(0));
-    Operand *FirstVal;
+    Operand *FirstVal = Val;
     Operand *SecondVal = nullptr;
-    if (!Traits::Is64Bit && Val->getType() == IceType_i64) {
-      FirstVal = hiOperand(Val);
-      SecondVal = loOperand(Val);
-    } else {
-      FirstVal = Val;
-    }
     constexpr bool IsCttz = true;
     lowerCountZeros(IsCttz, Val->getType(), Instr->getDest(), FirstVal,
                     SecondVal);
@@ -4500,13 +3934,11 @@
       Variable *T = makeReg(DestTy);
       _cvt(T, Src0R, Insts::Cvt::Ps2dq);
       _movp(Dest, T);
-    } else if (!Traits::Is64Bit && DestTy == IceType_i64) {
-      llvm::report_fatal_error("Helper call was expected");
     } else {
       Operand *Src0RM = legalize(Src, Legal_Reg | Legal_Mem);
       // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
       Variable *T_1 = nullptr;
-      if (Traits::Is64Bit && DestTy == IceType_i64) {
+      if (DestTy == IceType_i64) {
         T_1 = makeReg(IceType_i64);
       } else {
         assert(DestTy != IceType_i64);
@@ -4553,26 +3985,6 @@
 void TargetX8664::lowerAtomicCmpxchg(Variable *DestPrev, Operand *Ptr,
                                      Operand *Expected, Operand *Desired) {
   Type Ty = Expected->getType();
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
-    // Reserve the pre-colored registers first, before adding any more
-    // infinite-weight variables from formMemoryOperand's legalization.
-    Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
-    Variable *T_eax = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
-    Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx);
-    Variable *T_ebx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ebx);
-    _mov(T_eax, loOperand(Expected));
-    _mov(T_edx, hiOperand(Expected));
-    _mov(T_ebx, loOperand(Desired));
-    _mov(T_ecx, hiOperand(Desired));
-    X86OperandMem *Addr = formMemoryOperand(Ptr, Ty);
-    constexpr bool Locked = true;
-    _cmpxchg8b(Addr, T_edx, T_eax, T_ecx, T_ebx, Locked);
-    auto *DestLo = llvm::cast<Variable>(loOperand(DestPrev));
-    auto *DestHi = llvm::cast<Variable>(hiOperand(DestPrev));
-    _mov(DestLo, T_eax);
-    _mov(DestHi, T_edx);
-    return;
-  }
   RegNumT Eax;
   switch (Ty) {
   default:
@@ -4682,14 +4094,6 @@
     Func->setError("Unknown AtomicRMW operation");
     return;
   case Intrinsics::AtomicAdd: {
-    if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
-      // All the fall-through paths must set this to true, but use this
-      // for asserting.
-      NeedsCmpxchg = true;
-      Op_Lo = &TargetX8664::_add;
-      Op_Hi = &TargetX8664::_adc;
-      break;
-    }
     X86OperandMem *Addr = formMemoryOperand(Ptr, Dest->getType());
     constexpr bool Locked = true;
     Variable *T = nullptr;
@@ -4699,12 +4103,6 @@
     return;
   }
   case Intrinsics::AtomicSub: {
-    if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
-      NeedsCmpxchg = true;
-      Op_Lo = &TargetX8664::_sub;
-      Op_Hi = &TargetX8664::_sbb;
-      break;
-    }
     X86OperandMem *Addr = formMemoryOperand(Ptr, Dest->getType());
     constexpr bool Locked = true;
     Variable *T = nullptr;
@@ -4735,14 +4133,6 @@
     Op_Hi = &TargetX8664::_xor;
     break;
   case Intrinsics::AtomicExchange:
-    if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
-      NeedsCmpxchg = true;
-      // NeedsCmpxchg, but no real Op_Lo/Op_Hi need to be done. The values
-      // just need to be moved to the ecx and ebx registers.
-      Op_Lo = nullptr;
-      Op_Hi = nullptr;
-      break;
-    }
     X86OperandMem *Addr = formMemoryOperand(Ptr, Dest->getType());
     Variable *T = nullptr;
     _mov(T, Val);
@@ -4785,56 +4175,6 @@
   // If Op_{Lo,Hi} are nullptr, then just copy the value.
   Val = legalize(Val);
   Type Ty = Val->getType();
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
-    Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
-    Variable *T_eax = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
-    X86OperandMem *Addr = formMemoryOperand(Ptr, Ty);
-    _mov(T_eax, loOperand(Addr));
-    _mov(T_edx, hiOperand(Addr));
-    Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx);
-    Variable *T_ebx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ebx);
-    InstX86Label *Label = InstX86Label::create(Func, this);
-    const bool IsXchg8b = Op_Lo == nullptr && Op_Hi == nullptr;
-    if (!IsXchg8b) {
-      Context.insert(Label);
-      _mov(T_ebx, T_eax);
-      (this->*Op_Lo)(T_ebx, loOperand(Val));
-      _mov(T_ecx, T_edx);
-      (this->*Op_Hi)(T_ecx, hiOperand(Val));
-    } else {
-      // This is for xchg, which doesn't need an actual Op_Lo/Op_Hi.
-      // It just needs the Val loaded into ebx and ecx.
-      // That can also be done before the loop.
-      _mov(T_ebx, loOperand(Val));
-      _mov(T_ecx, hiOperand(Val));
-      Context.insert(Label);
-    }
-    constexpr bool Locked = true;
-    _cmpxchg8b(Addr, T_edx, T_eax, T_ecx, T_ebx, Locked);
-    _br(CondX86::Br_ne, Label);
-    if (!IsXchg8b) {
-      // If Val is a variable, model the extended live range of Val through
-      // the end of the loop, since it will be re-used by the loop.
-      if (auto *ValVar = llvm::dyn_cast<Variable>(Val)) {
-        auto *ValLo = llvm::cast<Variable>(loOperand(ValVar));
-        auto *ValHi = llvm::cast<Variable>(hiOperand(ValVar));
-        Context.insert<InstFakeUse>(ValLo);
-        Context.insert<InstFakeUse>(ValHi);
-      }
-    } else {
-      // For xchg, the loop is slightly smaller and ebx/ecx are used.
-      Context.insert<InstFakeUse>(T_ebx);
-      Context.insert<InstFakeUse>(T_ecx);
-    }
-    // The address base (if any) is also reused in the loop.
-    if (Variable *Base = Addr->getBase())
-      Context.insert<InstFakeUse>(Base);
-    auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-    auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-    _mov(DestLo, T_eax);
-    _mov(DestHi, T_edx);
-    return;
-  }
   X86OperandMem *Addr = formMemoryOperand(Ptr, Ty);
   RegNumT Eax;
   switch (Ty) {
@@ -4902,18 +4242,12 @@
   // like (M - N) for N <= M, and converts 63 to 32, and 127 to 64 (for the
   // all-zeros case).
   //
-  // X8632 only: Similar for 64-bit, but start w/ speculating that the upper 32
-  // bits are all zero, and compute the result for that case (checking the
-  // lower 32 bits). Then actually compute the result for the upper bits and
-  // cmov in the result from the lower computation if the earlier speculation
-  // was correct.
-  //
   // Cttz, is similar, but uses bsf instead, and doesn't require the xor
   // bit position conversion, and the speculation is reversed.
 
   // TODO(jpp): refactor this method.
   assert(Ty == IceType_i32 || Ty == IceType_i64);
-  const Type DestTy = Traits::Is64Bit ? Dest->getType() : IceType_i32;
+  const Type DestTy = Dest->getType();
   Variable *T = makeReg(DestTy);
   Operand *FirstValRM = legalize(FirstVal, Legal_Mem | Legal_Reg);
   if (Cttz) {
@@ -4951,26 +4285,7 @@
       _xor(T_Dest, _31);
     }
   }
-  if (Traits::Is64Bit || Ty == IceType_i32) {
-    _mov(Dest, T_Dest);
-    return;
-  }
-  _add(T_Dest, _32);
-  auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-  auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-  // Will be using "test" on this, so we need a registerized variable.
-  Variable *SecondVar = legalizeToReg(SecondVal);
-  Variable *T_Dest2 = makeReg(IceType_i32);
-  if (Cttz) {
-    _bsf(T_Dest2, SecondVar);
-  } else {
-    _bsr(T_Dest2, SecondVar);
-    _xor(T_Dest2, _31);
-  }
-  _test(SecondVar, SecondVar);
-  _cmov(T_Dest2, T_Dest, CondX86::Br_e);
-  _mov(DestLo, T_Dest2);
-  _mov(DestHi, Ctx->getConstantZero(IceType_i32));
+  _mov(Dest, T_Dest);
 }
 
 void TargetX8664::typedLoad(Type Ty, Variable *Dest, Variable *Base,
@@ -6497,20 +5812,9 @@
     std::swap(SrcT, SrcF);
     Cond = InstX86Base::getOppositeCondition(Cond);
   }
-  if (!Traits::Is64Bit && DestTy == IceType_i64) {
-    SrcT = legalizeUndef(SrcT);
-    SrcF = legalizeUndef(SrcF);
-    // Set the low portion.
-    auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-    lowerSelectIntMove(DestLo, Cond, loOperand(SrcT), loOperand(SrcF));
-    // Set the high portion.
-    auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-    lowerSelectIntMove(DestHi, Cond, hiOperand(SrcT), hiOperand(SrcF));
-    return;
-  }
 
   assert(DestTy == IceType_i16 || DestTy == IceType_i32 ||
-         (Traits::Is64Bit && DestTy == IceType_i64));
+         DestTy == IceType_i64);
   lowerSelectIntMove(Dest, Cond, SrcT, SrcF);
 }
 
@@ -6527,34 +5831,21 @@
 void TargetX8664::lowerMove(Variable *Dest, Operand *Src, bool IsRedefinition) {
   assert(Dest->getType() == Src->getType());
   assert(!Dest->isRematerializable());
-  if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
-    Src = legalize(Src);
-    Operand *SrcLo = loOperand(Src);
-    Operand *SrcHi = hiOperand(Src);
-    auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
-    auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
-    Variable *T_Lo = nullptr, *T_Hi = nullptr;
-    _mov(T_Lo, SrcLo);
-    _redefined(_mov(DestLo, T_Lo), IsRedefinition);
-    _mov(T_Hi, SrcHi);
-    _redefined(_mov(DestHi, T_Hi), IsRedefinition);
+  Operand *SrcLegal;
+  if (Dest->hasReg()) {
+    // If Dest already has a physical register, then only basic legalization
+    // is needed, as the source operand can be a register, immediate, or
+    // memory.
+    SrcLegal = legalize(Src, Legal_Reg, Dest->getRegNum());
   } else {
-    Operand *SrcLegal;
-    if (Dest->hasReg()) {
-      // If Dest already has a physical register, then only basic legalization
-      // is needed, as the source operand can be a register, immediate, or
-      // memory.
-      SrcLegal = legalize(Src, Legal_Reg, Dest->getRegNum());
-    } else {
-      // If Dest could be a stack operand, then RI must be a physical register
-      // or a scalar integer immediate.
-      SrcLegal = legalize(Src, Legal_Reg | Legal_Imm);
-    }
-    if (isVectorType(Dest->getType())) {
-      _redefined(_movp(Dest, SrcLegal), IsRedefinition);
-    } else {
-      _redefined(_mov(Dest, SrcLegal), IsRedefinition);
-    }
+    // If Dest could be a stack operand, then RI must be a physical register
+    // or a scalar integer immediate.
+    SrcLegal = legalize(Src, Legal_Reg | Legal_Imm);
+  }
+  if (isVectorType(Dest->getType())) {
+    _redefined(_movp(Dest, SrcLegal), IsRedefinition);
+  } else {
+    _redefined(_mov(Dest, SrcLegal), IsRedefinition);
   }
 }
 
@@ -6694,13 +5985,7 @@
   doMockBoundsCheck(NewAddr);
   Type Ty = NewAddr->getType();
 
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
-    Value = legalizeUndef(Value);
-    Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm);
-    _store(ValueHi, llvm::cast<X86OperandMem>(hiOperand(NewAddr)));
-    Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm);
-    _store(ValueLo, llvm::cast<X86OperandMem>(loOperand(NewAddr)));
-  } else if (isVectorType(Ty)) {
+  if (isVectorType(Ty)) {
     _storep(legalizeToReg(Value), NewAddr);
   } else {
     Value = legalize(Value, Legal_Reg | Legal_Imm);
@@ -6738,9 +6023,6 @@
 
 Operand *TargetX8664::lowerCmpRange(Operand *Comparison, uint64_t Min,
                                     uint64_t Max) {
-  // TODO(ascull): 64-bit should not reach here but only because it is not
-  // implemented yet. This should be able to handle the 64-bit case.
-  assert(Traits::Is64Bit || Comparison->getType() != IceType_i64);
   // Subtracting 0 is a nop so don't do it
   if (Min != 0) {
     // Avoid clobbering the comparison by copying it
@@ -6780,7 +6062,6 @@
     if (RangeIndex->getType() != PointerType) {
       Index = makeReg(PointerType);
       if (RangeIndex->getType() == IceType_i64) {
-        assert(Traits::Is64Bit);
         _mov(Index, RangeIndex); // trunc
       } else {
         Operand *RangeIndexRM = legalize(RangeIndex, Legal_Reg | Legal_Mem);
@@ -6849,46 +6130,6 @@
 
   assert(CaseClusters.size() != 0); // Should always be at least one
 
-  if (!Traits::Is64Bit && Src0->getType() == IceType_i64) {
-    Src0 = legalize(Src0); // get Base/Index into physical registers
-    Operand *Src0Lo = loOperand(Src0);
-    Operand *Src0Hi = hiOperand(Src0);
-    if (CaseClusters.back().getHigh() > UINT32_MAX) {
-      // TODO(ascull): handle 64-bit case properly (currently naive version)
-      // This might be handled by a higher level lowering of switches.
-      SizeT NumCases = Instr->getNumCases();
-      if (NumCases >= 2) {
-        Src0Lo = legalizeToReg(Src0Lo);
-        Src0Hi = legalizeToReg(Src0Hi);
-      } else {
-        Src0Lo = legalize(Src0Lo, Legal_Reg | Legal_Mem);
-        Src0Hi = legalize(Src0Hi, Legal_Reg | Legal_Mem);
-      }
-      for (SizeT I = 0; I < NumCases; ++I) {
-        Constant *ValueLo = Ctx->getConstantInt32(Instr->getValue(I));
-        Constant *ValueHi = Ctx->getConstantInt32(Instr->getValue(I) >> 32);
-        InstX86Label *Label = InstX86Label::create(Func, this);
-        _cmp(Src0Lo, ValueLo);
-        _br(CondX86::Br_ne, Label);
-        _cmp(Src0Hi, ValueHi);
-        _br(CondX86::Br_e, Instr->getLabel(I));
-        Context.insert(Label);
-      }
-      _br(Instr->getLabelDefault());
-      return;
-    } else {
-      // All the values are 32-bit so just check the operand is too and then
-      // fall through to the 32-bit implementation. This is a common case.
-      Src0Hi = legalize(Src0Hi, Legal_Reg | Legal_Mem);
-      Constant *Zero = Ctx->getConstantInt32(0);
-      _cmp(Src0Hi, Zero);
-      _br(CondX86::Br_ne, DefaultTarget);
-      Src0 = Src0Lo;
-    }
-  }
-
-  // 32-bit lowering
-
   if (CaseClusters.size() == 1) {
     // Jump straight to default if needed. Currently a common case as jump
     // tables occur on their own.
@@ -7021,65 +6262,30 @@
   Type Ty = Src->getType();
   X86OperandMem *Addr = formMemoryOperand(RMW->getAddr(), Ty);
   doMockBoundsCheck(Addr);
-  if (!Traits::Is64Bit && Ty == IceType_i64) {
-    Src = legalizeUndef(Src);
-    Operand *SrcLo = legalize(loOperand(Src), Legal_Reg | Legal_Imm);
-    Operand *SrcHi = legalize(hiOperand(Src), Legal_Reg | Legal_Imm);
-    auto *AddrLo = llvm::cast<X86OperandMem>(loOperand(Addr));
-    auto *AddrHi = llvm::cast<X86OperandMem>(hiOperand(Addr));
-    switch (RMW->getOp()) {
-    default:
-      // TODO(stichnot): Implement other arithmetic operators.
-      break;
-    case InstArithmetic::Add:
-      _add_rmw(AddrLo, SrcLo);
-      _adc_rmw(AddrHi, SrcHi);
-      return;
-    case InstArithmetic::Sub:
-      _sub_rmw(AddrLo, SrcLo);
-      _sbb_rmw(AddrHi, SrcHi);
-      return;
-    case InstArithmetic::And:
-      _and_rmw(AddrLo, SrcLo);
-      _and_rmw(AddrHi, SrcHi);
-      return;
-    case InstArithmetic::Or:
-      _or_rmw(AddrLo, SrcLo);
-      _or_rmw(AddrHi, SrcHi);
-      return;
-    case InstArithmetic::Xor:
-      _xor_rmw(AddrLo, SrcLo);
-      _xor_rmw(AddrHi, SrcHi);
-      return;
-    }
-  } else {
-    // x86-32: i8, i16, i32
-    // x86-64: i8, i16, i32, i64
-    switch (RMW->getOp()) {
-    default:
-      // TODO(stichnot): Implement other arithmetic operators.
-      break;
-    case InstArithmetic::Add:
-      Src = legalize(Src, Legal_Reg | Legal_Imm);
-      _add_rmw(Addr, Src);
-      return;
-    case InstArithmetic::Sub:
-      Src = legalize(Src, Legal_Reg | Legal_Imm);
-      _sub_rmw(Addr, Src);
-      return;
-    case InstArithmetic::And:
-      Src = legalize(Src, Legal_Reg | Legal_Imm);
-      _and_rmw(Addr, Src);
-      return;
-    case InstArithmetic::Or:
-      Src = legalize(Src, Legal_Reg | Legal_Imm);
-      _or_rmw(Addr, Src);
-      return;
-    case InstArithmetic::Xor:
-      Src = legalize(Src, Legal_Reg | Legal_Imm);
-      _xor_rmw(Addr, Src);
-      return;
-    }
+  switch (RMW->getOp()) {
+  default:
+    // TODO(stichnot): Implement other arithmetic operators.
+    break;
+  case InstArithmetic::Add:
+    Src = legalize(Src, Legal_Reg | Legal_Imm);
+    _add_rmw(Addr, Src);
+    return;
+  case InstArithmetic::Sub:
+    Src = legalize(Src, Legal_Reg | Legal_Imm);
+    _sub_rmw(Addr, Src);
+    return;
+  case InstArithmetic::And:
+    Src = legalize(Src, Legal_Reg | Legal_Imm);
+    _and_rmw(Addr, Src);
+    return;
+  case InstArithmetic::Or:
+    Src = legalize(Src, Legal_Reg | Legal_Imm);
+    _or_rmw(Addr, Src);
+    return;
+  case InstArithmetic::Xor:
+    Src = legalize(Src, Legal_Reg | Legal_Imm);
+    _xor_rmw(Addr, Src);
+    return;
   }
   llvm::report_fatal_error("Couldn't lower RMW instruction");
 }
@@ -7092,17 +6298,10 @@
   }
 }
 
-/// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve
-/// integrity of liveness analysis. Undef values are also turned into zeroes,
-/// since loOperand() and hiOperand() don't expect Undef input.
 void TargetX8664::prelowerPhis() {
-  if (Traits::Is64Bit) {
-    // On x86-64 we don't need to prelower phis -- the architecture can handle
-    // 64-bit integer natively.
-    return;
-  }
-
-  PhiLowering::prelowerPhis32Bit<TargetX8664>(this, Context.getNode(), Func);
+  // On x86-64 we don't need to prelower phis -- the architecture can handle
+  // 64-bit integer natively.
+  return;
 }
 
 void TargetX8664::genTargetHelperCallFor(Inst *Instr) {
@@ -7111,24 +6310,7 @@
     RuntimeHelper HelperID = RuntimeHelper::H_Num;
     Variable *Dest = Arith->getDest();
     Type DestTy = Dest->getType();
-    if (!Traits::Is64Bit && DestTy == IceType_i64) {
-      switch (Arith->getOp()) {
-      default:
-        return;
-      case InstArithmetic::Udiv:
-        HelperID = RuntimeHelper::H_udiv_i64;
-        break;
-      case InstArithmetic::Sdiv:
-        HelperID = RuntimeHelper::H_sdiv_i64;
-        break;
-      case InstArithmetic::Urem:
-        HelperID = RuntimeHelper::H_urem_i64;
-        break;
-      case InstArithmetic::Srem:
-        HelperID = RuntimeHelper::H_srem_i64;
-        break;
-      }
-    } else if (isVectorType(DestTy)) {
+    if (isVectorType(DestTy)) {
       Variable *Dest = Arith->getDest();
       Operand *Src0 = Arith->getSrc(0);
       Operand *Src1 = Arith->getSrc(1);
@@ -7185,44 +6367,15 @@
     switch (CastKind) {
     default:
       return;
-    case InstCast::Fptosi:
-      if (!Traits::Is64Bit && DestTy == IceType_i64) {
-        HelperID = isFloat32Asserting32Or64(SrcType)
-                       ? RuntimeHelper::H_fptosi_f32_i64
-                       : RuntimeHelper::H_fptosi_f64_i64;
-      } else {
-        return;
-      }
-      break;
     case InstCast::Fptoui:
       if (isVectorType(DestTy)) {
         assert(DestTy == IceType_v4i32);
         assert(SrcType == IceType_v4f32);
         HelperID = RuntimeHelper::H_fptoui_4xi32_f32;
-      } else if (DestTy == IceType_i64 ||
-                 (!Traits::Is64Bit && DestTy == IceType_i32)) {
-        if (Traits::Is64Bit) {
-          HelperID = isFloat32Asserting32Or64(SrcType)
-                         ? RuntimeHelper::H_fptoui_f32_i64
-                         : RuntimeHelper::H_fptoui_f64_i64;
-        } else if (isInt32Asserting32Or64(DestTy)) {
-          HelperID = isFloat32Asserting32Or64(SrcType)
-                         ? RuntimeHelper::H_fptoui_f32_i32
-                         : RuntimeHelper::H_fptoui_f64_i32;
-        } else {
-          HelperID = isFloat32Asserting32Or64(SrcType)
-                         ? RuntimeHelper::H_fptoui_f32_i64
-                         : RuntimeHelper::H_fptoui_f64_i64;
-        }
-      } else {
-        return;
-      }
-      break;
-    case InstCast::Sitofp:
-      if (!Traits::Is64Bit && SrcType == IceType_i64) {
-        HelperID = isFloat32Asserting32Or64(DestTy)
-                       ? RuntimeHelper::H_sitofp_i64_f32
-                       : RuntimeHelper::H_sitofp_i64_f64;
+      } else if (DestTy == IceType_i64) {
+        HelperID = isFloat32Asserting32Or64(SrcType)
+                       ? RuntimeHelper::H_fptoui_f32_i64
+                       : RuntimeHelper::H_fptoui_f64_i64;
       } else {
         return;
       }
@@ -7232,8 +6385,7 @@
         assert(DestTy == IceType_v4f32);
         assert(SrcType == IceType_v4i32);
         HelperID = RuntimeHelper::H_uitofp_4xi32_4xf32;
-      } else if (SrcType == IceType_i64 ||
-                 (!Traits::Is64Bit && SrcType == IceType_i32)) {
+      } else if (SrcType == IceType_i64) {
         if (isInt32Asserting32Or64(SrcType)) {
           HelperID = isFloat32Asserting32Or64(DestTy)
                          ? RuntimeHelper::H_uitofp_i32_f32
@@ -7379,15 +6531,6 @@
       OutArgumentsSizeBytes += typeWidthInBytesOnStack(Ty);
     }
   }
-  if (Traits::Is64Bit)
-    return OutArgumentsSizeBytes;
-  // The 32 bit ABI requires floating point values to be returned on the x87 FP
-  // stack. Ensure there is enough space for the fstp/movs for floating returns.
-  if (isScalarFloatingType(ReturnType)) {
-    OutArgumentsSizeBytes =
-        std::max(OutArgumentsSizeBytes,
-                 static_cast<uint32_t>(typeWidthInBytesOnStack(ReturnType)));
-  }
   return OutArgumentsSizeBytes;
 }
 
@@ -7669,14 +6812,12 @@
 
     // If the operand is a 64 bit constant integer we need to legalize it to a
     // register in x86-64.
-    if (Traits::Is64Bit) {
-      if (auto *C64 = llvm::dyn_cast<ConstantInteger64>(Const)) {
-        if (!Utils::IsInt(32, C64->getValue())) {
-          if (RegNum.hasValue()) {
-            assert(Traits::getGprForType(IceType_i64, RegNum) == RegNum);
-          }
-          return copyToReg(Const, RegNum);
+    if (auto *C64 = llvm::dyn_cast<ConstantInteger64>(Const)) {
+      if (!Utils::IsInt(32, C64->getValue())) {
+        if (RegNum.hasValue()) {
+          assert(Traits::getGprForType(IceType_i64, RegNum) == RegNum);
         }
+        return copyToReg(Const, RegNum);
       }
     }
 
@@ -7821,8 +6962,6 @@
 }
 
 Variable *TargetX8664::makeReg(Type Type, RegNumT RegNum) {
-  // There aren't any 64-bit integer registers for x86-32.
-  assert(Traits::Is64Bit || Type != IceType_i64);
   Variable *Reg = Func->makeVariable(Type);
   if (RegNum.hasValue())
     Reg->setRegNum(RegNum);
@@ -7869,14 +7008,10 @@
 }
 
 void TargetX8664::emit(const ConstantInteger64 *C) const {
-  if (!Traits::Is64Bit) {
-    llvm::report_fatal_error("Not expecting to emit 64-bit integers");
-  } else {
-    if (!BuildDefs::dump())
-      return;
-    Ostream &Str = Ctx->getStrEmit();
-    Str << "$" << C->getValue();
-  }
+  if (!BuildDefs::dump())
+    return;
+  Ostream &Str = Ctx->getStrEmit();
+  Str << "$" << C->getValue();
 }
 
 void TargetX8664::emit(const ConstantFloat *C) const {
diff --git a/third_party/subzero/src/IceTargetLoweringX8664.h b/third_party/subzero/src/IceTargetLoweringX8664.h
index 727c067..b084862 100644
--- a/third_party/subzero/src/IceTargetLoweringX8664.h
+++ b/third_party/subzero/src/IceTargetLoweringX8664.h
@@ -248,9 +248,7 @@
     MaxOutArgsSizeBytes = std::max(MaxOutArgsSizeBytes, Size);
   }
 
-  bool shouldSplitToVariable64On32(Type Ty) const override {
-    return Traits::Is64Bit ? false : Ty == IceType_i64;
-  }
+  bool shouldSplitToVariable64On32(Type Ty) const override { return false; }
 
   SizeT getMinJumpTableSize() const override { return 4; }
 
@@ -265,24 +263,6 @@
 
   void initNodeForLowering(CfgNode *Node) override;
 
-  template <typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, Operand>::type *
-  loOperand(Operand *Operand);
-  template <typename T = Traits>
-  typename std::enable_if<T::Is64Bit, Operand>::type *loOperand(Operand *) {
-    llvm::report_fatal_error(
-        "Hey, yo! This is x86-64. Watcha doin'? (loOperand)");
-  }
-
-  template <typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, Operand>::type *
-  hiOperand(Operand *Operand);
-  template <typename T = Traits>
-  typename std::enable_if<T::Is64Bit, Operand>::type *hiOperand(Operand *) {
-    llvm::report_fatal_error(
-        "Hey, yo! This is x86-64. Watcha doin'? (hiOperand)");
-  }
-
   void addProlog(CfgNode *Node) override;
   void finishArgumentLowering(Variable *Arg, Variable *FramePtr,
                               size_t BasicFrameOffset, size_t StackAdjBytes,
@@ -935,19 +915,6 @@
   /// Optimizations for idiom recognition.
   bool lowerOptimizeFcmpSelect(const InstFcmp *Fcmp, const InstSelect *Select);
 
-  /// Complains loudly if invoked because the cpu can handle 64-bit types
-  /// natively.
-  template <typename T = Traits>
-  typename std::enable_if<T::Is64Bit, void>::type lowerIcmp64(const InstIcmp *,
-                                                              const Inst *) {
-    llvm::report_fatal_error(
-        "Hey, yo! This is x86-64. Watcha doin'? (lowerIcmp64)");
-  }
-  /// x86lowerIcmp64 handles 64-bit icmp lowering.
-  template <typename T = Traits>
-  typename std::enable_if<!T::Is64Bit, void>::type
-  lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer);
-
   BoolFolding FoldingInfo;
 
   /// Helpers for lowering ShuffleVector
diff --git a/third_party/subzero/src/IceTargetLoweringX8664Traits.h b/third_party/subzero/src/IceTargetLoweringX8664Traits.h
index d650b48..502c207 100644
--- a/third_party/subzero/src/IceTargetLoweringX8664Traits.h
+++ b/third_party/subzero/src/IceTargetLoweringX8664Traits.h
@@ -30,7 +30,6 @@
 #include <initializer_list>
 
 namespace Ice {
-
 namespace X8664 {
 using namespace ::Ice::X86;
 
@@ -49,8 +48,6 @@
   //      \/_/\/_/\/_____/\/_/  \/_/
   //
   //----------------------------------------------------------------------------
-
-  static constexpr bool Is64Bit = true;
   static constexpr ::Ice::RegX8664::GPRRegister Last8BitGPR =
       ::Ice::RegX8664::GPRRegister::Encoded_Reg_r15d;