Change some explicit type checks into using helper functions.
For some arithmetic assembler methods, instead of checking
IceType_i8 || IceType_i1, only allow IceType_i8 and assert if
an i1 leaked to that stage (should have been vetted earlier
by the bitcode reader / ABI checks). Could have looked up the
type width and isIntegerArithmeticType, etc. in the property table,
but that seemed a bit heavy for just checking one type
(or one of two types).
Also changed some f32 || f64 checks into just using
isScalarFloatingType() which looks things up in a property table.
Could alternatively just keep it as an simple f32 || f64 check,
and I could change isScalarFloatingType()'s implementation.
In some places where we assume something is either i32 or i64
and do a select, change that into using a helper function
so that we can do one compare, and then assert. Some of the
asserts are really redundant (already within a branch which
already checked that), but hopefully that disappears if
we compile in release mode.
Similar for f32 or f64 (which happened a lot in the assembler).
BUG=none
R=kschimpf@google.com, stichnot@chromium.org
Review URL: https://codereview.chromium.org/613483002
diff --git a/src/IceInstX8632.cpp b/src/IceInstX8632.cpp
index 0ea67b9..0a2d034 100644
--- a/src/IceInstX8632.cpp
+++ b/src/IceInstX8632.cpp
@@ -514,13 +514,8 @@
RegX8632::getEncodedByteRegOrGPR(Ty, Var->getRegNum());
if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
if (SrcVar->hasReg()) {
- RegX8632::GPRRegister SrcReg;
- if (Ty == IceType_i8 || Ty == IceType_i1) {
- SrcReg = static_cast<RegX8632::GPRRegister>(
- RegX8632::getEncodedByteReg(SrcVar->getRegNum()));
- } else {
- SrcReg = RegX8632::getEncodedGPR(SrcVar->getRegNum());
- }
+ RegX8632::GPRRegister SrcReg =
+ RegX8632::getEncodedByteRegOrGPR(Ty, SrcVar->getRegNum());
(Asm->*(Emitter.GPRGPR))(Ty, VarReg, SrcReg);
} else {
x86::Address SrcStackAddr = static_cast<TargetX8632 *>(Func->getTarget())
@@ -747,7 +742,7 @@
Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 1);
Type Ty = getSrc(0)->getType();
- assert(Ty == IceType_f32 || Ty == IceType_f64);
+ assert(isScalarFloatingType(Ty));
Str << "\tsqrt" << TypeX8632Attributes[Ty].SdSsString << "\t";
getDest()->emit(Func);
Str << ", ";
@@ -888,7 +883,7 @@
template <> void InstX8632Imul::emit(const Cfg *Func) const {
Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 2);
- if (getDest()->getType() == IceType_i8) {
+ if (isByteSizedArithType(getDest()->getType())) {
// The 8-bit version of imul only allows the form "imul r/m8".
Variable *Src0 = llvm::dyn_cast<Variable>(getSrc(0));
(void)Src0;
@@ -1743,8 +1738,7 @@
assert(getSrcSize() == 1);
Type Ty = getSrc(0)->getType();
Variable *Var = llvm::dyn_cast<Variable>(getSrc(0));
- if ((isVectorType(Ty) || Ty == IceType_f32 || Ty == IceType_f64) && Var &&
- Var->hasReg()) {
+ if ((isVectorType(Ty) || isScalarFloatingType(Ty)) && Var && Var->hasReg()) {
// The xmm registers can't be directly pushed, so we fake it by
// decrementing esp and then storing to [esp].
Str << "\tsub\tesp, " << typeWidthInBytes(Ty) << "\n";
diff --git a/src/IceRegistersX8632.h b/src/IceRegistersX8632.h
index cbbcfc9..851908c 100644
--- a/src/IceRegistersX8632.h
+++ b/src/IceRegistersX8632.h
@@ -85,7 +85,7 @@
}
static inline GPRRegister getEncodedByteRegOrGPR(Type Ty, int32_t RegNum) {
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedType(Ty))
return GPRRegister(getEncodedByteReg(RegNum));
else
return getEncodedGPR(RegNum);
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp
index 8a01f00..7a7fb12 100644
--- a/src/IceTargetLoweringX8632.cpp
+++ b/src/IceTargetLoweringX8632.cpp
@@ -1581,7 +1581,7 @@
//
// The 8-bit version of imul only allows the form "imul r/m8"
// where T must be in eax.
- if (Dest->getType() == IceType_i8)
+ if (isByteSizedArithType(Dest->getType()))
_mov(T, Src0, RegX8632::Reg_eax);
else
_mov(T, Src0);
@@ -1613,7 +1613,7 @@
// div and idiv are the few arithmetic operators that do not allow
// immediates as the operand.
Src1 = legalize(Src1, Legal_Reg | Legal_Mem);
- if (Dest->getType() == IceType_i8) {
+ if (isByteSizedArithType(Dest->getType())) {
Variable *T_ah = NULL;
Constant *Zero = Ctx->getConstantZero(IceType_i8);
_mov(T, Src0, RegX8632::Reg_eax);
@@ -1630,7 +1630,7 @@
break;
case InstArithmetic::Sdiv:
Src1 = legalize(Src1, Legal_Reg | Legal_Mem);
- if (Dest->getType() == IceType_i8) {
+ if (isByteSizedArithType(Dest->getType())) {
_mov(T, Src0, RegX8632::Reg_eax);
_cbwdq(T, T);
_idiv(T, Src1, T);
@@ -1645,7 +1645,7 @@
break;
case InstArithmetic::Urem:
Src1 = legalize(Src1, Legal_Reg | Legal_Mem);
- if (Dest->getType() == IceType_i8) {
+ if (isByteSizedArithType(Dest->getType())) {
Variable *T_ah = NULL;
Constant *Zero = Ctx->getConstantZero(IceType_i8);
_mov(T, Src0, RegX8632::Reg_eax);
@@ -1662,7 +1662,7 @@
break;
case InstArithmetic::Srem:
Src1 = legalize(Src1, Legal_Reg | Legal_Mem);
- if (Dest->getType() == IceType_i8) {
+ if (isByteSizedArithType(Dest->getType())) {
Variable *T_ah = makeReg(IceType_i8, RegX8632::Reg_ah);
_mov(T, Src0, RegX8632::Reg_eax);
_cbwdq(T, T);
@@ -1700,8 +1700,8 @@
case InstArithmetic::Frem: {
const SizeT MaxSrcs = 2;
Type Ty = Dest->getType();
- InstCall *Call =
- makeHelperCall(Ty == IceType_f32 ? "fmodf" : "fmod", Dest, MaxSrcs);
+ InstCall *Call = makeHelperCall(
+ isFloat32Asserting32Or64(Ty) ? "fmodf" : "fmod", Dest, MaxSrcs);
Call->addArg(Src0);
Call->addArg(Src1);
return lowerCall(Call);
@@ -1779,8 +1779,7 @@
Operand *Arg = Instr->getArg(i);
Type Ty = Arg->getType();
// The PNaCl ABI requires the width of arguments to be at least 32 bits.
- assert(Ty == IceType_i32 || Ty == IceType_f32 || Ty == IceType_i64 ||
- Ty == IceType_f64 || isVectorType(Ty));
+ assert(typeWidthInBytes(Ty) >= 4);
if (isVectorType(Ty) && XmmArgs.size() < X86_MAX_XMM_ARGS) {
XmmArgs.push_back(Arg);
} else {
@@ -1926,7 +1925,7 @@
_mov(Dest, ReturnReg);
}
}
- } else if (Dest->getType() == IceType_f32 || Dest->getType() == IceType_f64) {
+ } else if (isScalarFloatingType(Dest->getType())) {
// Special treatment for an FP function which returns its result in
// st(0).
// If Dest ends up being a physical xmm register, the fstp emit code
@@ -2120,7 +2119,8 @@
const SizeT MaxSrcs = 1;
Type SrcType = Inst->getSrc(0)->getType();
InstCall *Call = makeHelperCall(
- SrcType == IceType_f32 ? "cvtftosi64" : "cvtdtosi64", Dest, MaxSrcs);
+ isFloat32Asserting32Or64(SrcType) ? "cvtftosi64" : "cvtdtosi64", Dest,
+ MaxSrcs);
// TODO: Call the correct compiler-rt helper function.
Call->addArg(Inst->getSrc(0));
lowerCall(Call);
@@ -2151,8 +2151,8 @@
const SizeT MaxSrcs = 1;
Type DestType = Dest->getType();
Type SrcType = Inst->getSrc(0)->getType();
- IceString DstSubstring = (DestType == IceType_i64 ? "64" : "32");
- IceString SrcSubstring = (SrcType == IceType_f32 ? "f" : "d");
+ IceString DstSubstring = (isInt32Asserting32Or64(DestType) ? "32" : "64");
+ IceString SrcSubstring = (isFloat32Asserting32Or64(SrcType) ? "f" : "d");
// Possibilities are cvtftoui32, cvtdtoui32, cvtftoui64, cvtdtoui64
IceString TargetString = "cvt" + SrcSubstring + "toui" + DstSubstring;
// TODO: Call the correct compiler-rt helper function.
@@ -2185,7 +2185,8 @@
const SizeT MaxSrcs = 1;
Type DestType = Dest->getType();
InstCall *Call = makeHelperCall(
- DestType == IceType_f32 ? "cvtsi64tof" : "cvtsi64tod", Dest, MaxSrcs);
+ isFloat32Asserting32Or64(DestType) ? "cvtsi64tof" : "cvtsi64tod",
+ Dest, MaxSrcs);
// TODO: Call the correct compiler-rt helper function.
Call->addArg(Inst->getSrc(0));
lowerCall(Call);
@@ -2219,8 +2220,9 @@
// i32 on x86-32.
const SizeT MaxSrcs = 1;
Type DestType = Dest->getType();
- IceString SrcSubstring = (Src0->getType() == IceType_i64 ? "64" : "32");
- IceString DstSubstring = (DestType == IceType_f32 ? "f" : "d");
+ IceString SrcSubstring =
+ (isInt32Asserting32Or64(Src0->getType()) ? "32" : "64");
+ IceString DstSubstring = (isFloat32Asserting32Or64(DestType) ? "f" : "d");
// Possibilities are cvtui32tof, cvtui32tod, cvtui64tof, cvtui64tod
IceString TargetString = "cvtui" + SrcSubstring + "to" + DstSubstring;
// TODO: Call the correct compiler-rt helper function.
@@ -2413,7 +2415,7 @@
if (InVectorElementTy == IceType_i32) {
_movd(ExtractedElementR, T);
- } else { // Ty == Icetype_f32
+ } else { // Ty == IceType_f32
// TODO(wala): _movss is only used here because _mov does not
// allow a vector source and a scalar destination. _mov should be
// able to be used here.
@@ -3040,8 +3042,10 @@
case Intrinsics::Ctpop: {
Variable *Dest = Instr->getDest();
Operand *Val = Instr->getArg(0);
- InstCall *Call = makeHelperCall(Val->getType() == IceType_i64 ?
- "__popcountdi2" : "__popcountsi2", Dest, 1);
+ InstCall *Call =
+ makeHelperCall(isInt32Asserting32Or64(Val->getType()) ? "__popcountsi2"
+ : "__popcountdi2",
+ Dest, 1);
Call->addArg(Val);
lowerCall(Call);
// The popcount helpers always return 32-bit values, while the intrinsic's
@@ -3881,8 +3885,7 @@
Variable *edx = legalizeToVar(hiOperand(Src0), RegX8632::Reg_edx);
Reg = eax;
Context.insert(InstFakeUse::create(Func, edx));
- } else if (Src0->getType() == IceType_f32 ||
- Src0->getType() == IceType_f64) {
+ } else if (isScalarFloatingType(Src0->getType())) {
_fld(Src0);
} else if (isVectorType(Src0->getType())) {
Reg = legalizeToVar(Src0, RegX8632::Reg_xmm0);
@@ -4271,8 +4274,7 @@
if (!(Allowed & Legal_Reloc) && llvm::isa<ConstantRelocatable>(From))
// Relocatable specifically not allowed
NeedsReg = true;
- if (!(Allowed & Legal_Mem) &&
- (From->getType() == IceType_f32 || From->getType() == IceType_f64))
+ if (!(Allowed & Legal_Mem) && isScalarFloatingType(From->getType()))
// On x86, FP constants are lowered to mem operands.
NeedsReg = true;
if (NeedsReg) {
diff --git a/src/IceTypes.h b/src/IceTypes.h
index cc782ab..6480c2c 100644
--- a/src/IceTypes.h
+++ b/src/IceTypes.h
@@ -69,6 +69,34 @@
/// Returns the number of bits in a scalar integer type.
SizeT getScalarIntBitWidth(Type Ty);
+// Check if a type is byte sized (slight optimization over typeWidthInBytes).
+inline bool isByteSizedType(Type Ty) {
+ bool result = Ty == IceType_i8 || Ty == IceType_i1;
+ assert(result == (1 == typeWidthInBytes(Ty)));
+ return result;
+}
+
+// Check if Ty is byte sized and specifically i8. Assert that it's not
+// byte sized due to being an i1.
+inline bool isByteSizedArithType(Type Ty) {
+ assert(Ty != IceType_i1);
+ return Ty == IceType_i8;
+}
+
+// Return true if Ty is i32. This asserts that Ty is either i32 or i64.
+inline bool isInt32Asserting32Or64(Type Ty) {
+ bool result = Ty == IceType_i32;
+ assert(result || Ty == IceType_i64);
+ return result;
+}
+
+// Return true if Ty is f32. This asserts that Ty is either f32 or f64.
+inline bool isFloat32Asserting32Or64(Type Ty) {
+ bool result = Ty == IceType_f32;
+ assert(result || Ty == IceType_f64);
+ return result;
+}
+
template <typename StreamType>
inline StreamType &operator<<(StreamType &Str, const Type &Ty) {
Str << typeString(Ty);
diff --git a/src/assembler_ia32.cpp b/src/assembler_ia32.cpp
index 9545dc0..7791709 100644
--- a/src/assembler_ia32.cpp
+++ b/src/assembler_ia32.cpp
@@ -35,7 +35,7 @@
DirectCallRelocation(Kind, Sym);
}
- void Process(const MemoryRegion ®ion, intptr_t position) {
+ void Process(const MemoryRegion ®ion, intptr_t position) override {
// Direct calls are relative to the following instruction on x86.
int32_t pointer = region.Load<int32_t>(position);
int32_t delta = region.start() + position + sizeof(int32_t);
@@ -357,7 +357,7 @@
void AssemblerX86::addss(Type Ty, XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x58);
EmitXmmRegisterOperand(dst, src);
@@ -365,7 +365,7 @@
void AssemblerX86::addss(Type Ty, XmmRegister dst, const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x58);
EmitOperand(dst, src);
@@ -373,7 +373,7 @@
void AssemblerX86::subss(Type Ty, XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x5C);
EmitXmmRegisterOperand(dst, src);
@@ -381,7 +381,7 @@
void AssemblerX86::subss(Type Ty, XmmRegister dst, const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x5C);
EmitOperand(dst, src);
@@ -389,7 +389,7 @@
void AssemblerX86::mulss(Type Ty, XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x59);
EmitXmmRegisterOperand(dst, src);
@@ -397,7 +397,7 @@
void AssemblerX86::mulss(Type Ty, XmmRegister dst, const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x59);
EmitOperand(dst, src);
@@ -405,7 +405,7 @@
void AssemblerX86::divss(Type Ty, XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x5E);
EmitXmmRegisterOperand(dst, src);
@@ -413,7 +413,7 @@
void AssemblerX86::divss(Type Ty, XmmRegister dst, const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x5E);
EmitOperand(dst, src);
@@ -480,7 +480,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
EmitUint8(0x0F);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitUint8(0xFC);
} else if (Ty == IceType_i16) {
EmitUint8(0xFD);
@@ -494,7 +494,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
EmitUint8(0x0F);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitUint8(0xFC);
} else if (Ty == IceType_i16) {
EmitUint8(0xFD);
@@ -572,7 +572,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
EmitUint8(0x0F);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitUint8(0xF8);
} else if (Ty == IceType_i16) {
EmitUint8(0xF9);
@@ -586,7 +586,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
EmitUint8(0x0F);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitUint8(0xF8);
} else if (Ty == IceType_i16) {
EmitUint8(0xF9);
@@ -967,7 +967,7 @@
void AssemblerX86::sqrtss(Type Ty, XmmRegister dst, const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x51);
EmitOperand(dst, src);
@@ -975,7 +975,7 @@
void AssemblerX86::sqrtss(Type Ty, XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(Ty == IceType_f32 ? 0xF3 : 0xF2);
+ EmitUint8(isFloat32Asserting32Or64(Ty) ? 0xF3 : 0xF2);
EmitUint8(0x0F);
EmitUint8(0x51);
EmitXmmRegisterOperand(dst, src);
@@ -1201,7 +1201,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedType(Ty))
EmitUint8(0x22);
else
EmitUint8(0x23);
@@ -1212,7 +1212,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedType(Ty))
EmitUint8(0x22);
else
EmitUint8(0x23);
@@ -1221,7 +1221,7 @@
void AssemblerX86::And(Type Ty, GPRRegister dst, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedType(Ty)) {
EmitComplexI8(4, Operand(dst), imm);
return;
}
@@ -1234,7 +1234,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedType(Ty))
EmitUint8(0x0A);
else
EmitUint8(0x0B);
@@ -1245,7 +1245,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedType(Ty))
EmitUint8(0x0A);
else
EmitUint8(0x0B);
@@ -1254,7 +1254,7 @@
void AssemblerX86::Or(Type Ty, GPRRegister dst, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedType(Ty)) {
EmitComplexI8(1, Operand(dst), imm);
return;
}
@@ -1267,7 +1267,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedType(Ty))
EmitUint8(0x32);
else
EmitUint8(0x33);
@@ -1278,7 +1278,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedType(Ty))
EmitUint8(0x32);
else
EmitUint8(0x33);
@@ -1287,7 +1287,7 @@
void AssemblerX86::Xor(Type Ty, GPRRegister dst, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedType(Ty)) {
EmitComplexI8(6, Operand(dst), imm);
return;
}
@@ -1300,7 +1300,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x02);
else
EmitUint8(0x03);
@@ -1311,7 +1311,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x02);
else
EmitUint8(0x03);
@@ -1320,7 +1320,7 @@
void AssemblerX86::add(Type Ty, GPRRegister reg, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitComplexI8(0, Operand(reg), imm);
return;
}
@@ -1333,7 +1333,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x12);
else
EmitUint8(0x13);
@@ -1344,7 +1344,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x12);
else
EmitUint8(0x13);
@@ -1353,7 +1353,7 @@
void AssemblerX86::adc(Type Ty, GPRRegister reg, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitComplexI8(2, Operand(reg), imm);
return;
}
@@ -1366,7 +1366,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x2A);
else
EmitUint8(0x2B);
@@ -1377,7 +1377,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x2A);
else
EmitUint8(0x2B);
@@ -1386,7 +1386,7 @@
void AssemblerX86::sub(Type Ty, GPRRegister reg, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitComplexI8(5, Operand(reg), imm);
return;
}
@@ -1399,7 +1399,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x1A);
else
EmitUint8(0x1B);
@@ -1410,7 +1410,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x1A);
else
EmitUint8(0x1B);
@@ -1419,7 +1419,7 @@
void AssemblerX86::sbb(Type Ty, GPRRegister reg, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- if (Ty == IceType_i8 || Ty == IceType_i1) {
+ if (isByteSizedArithType(Ty)) {
EmitComplexI8(3, Operand(reg), imm);
return;
}
@@ -1449,7 +1449,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1460,7 +1460,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1471,7 +1471,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1482,7 +1482,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1526,7 +1526,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1537,7 +1537,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1650,7 +1650,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1661,7 +1661,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xF6);
else
EmitUint8(0xF7);
@@ -1900,7 +1900,7 @@
if (Ty == IceType_i16)
EmitOperandSizeOverride();
EmitUint8(0x0F);
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xB0);
else
EmitUint8(0xB1);
@@ -1919,7 +1919,7 @@
if (Ty == IceType_i16)
EmitOperandSizeOverride();
EmitUint8(0x0F);
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0xC0);
else
EmitUint8(0xC1);
@@ -1930,7 +1930,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (Ty == IceType_i16)
EmitOperandSizeOverride();
- if (Ty == IceType_i8 || Ty == IceType_i1)
+ if (isByteSizedArithType(Ty))
EmitUint8(0x86);
else
EmitUint8(0x87);
diff --git a/src/assembler_ia32.h b/src/assembler_ia32.h
index 5066397..3e9a937 100644
--- a/src/assembler_ia32.h
+++ b/src/assembler_ia32.h
@@ -51,7 +51,7 @@
DisplacementRelocation(Kind, Sym);
}
- void Process(const MemoryRegion ®ion, intptr_t position) {
+ void Process(const MemoryRegion ®ion, intptr_t position) override {
(void)region;
(void)position;
llvm_unreachable("We might not be using this Process() method later.");