Enhance address mode recovery
This adds some more patterns to address mode recovery to recover
ConstantRelocatables as displacements, and a few more generalizations that
catch indexed addressing.
BUG=
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/1428443002 .
diff --git a/src/IceAssembler.cpp b/src/IceAssembler.cpp
index 1de1588..0228476 100644
--- a/src/IceAssembler.cpp
+++ b/src/IceAssembler.cpp
@@ -36,7 +36,6 @@
}
void AssemblerBuffer::installFixup(AssemblerFixup *F) {
- F->set_position(0);
if (!Assemblr.getPreliminary())
Fixups.push_back(F);
}
diff --git a/src/IceAssemblerX86BaseImpl.h b/src/IceAssemblerX86BaseImpl.h
index f06ee96..a72f266 100644
--- a/src/IceAssemblerX86BaseImpl.h
+++ b/src/IceAssemblerX86BaseImpl.h
@@ -3313,14 +3313,23 @@
assert(rm >= 0 && rm < 8);
const intptr_t length = operand.length_;
assert(length > 0);
+ intptr_t displacement_start = 1;
// Emit the ModRM byte updated with the given RM value.
assert((operand.encoding_[0] & 0x38) == 0);
emitUint8(operand.encoding_[0] + (rm << 3));
+ // Whenever the addressing mode is not register indirect, using esp == 0x4
+ // as the register operation indicates an SIB byte follows.
+ if (((operand.encoding_[0] & 0xc0) != 0xc0) &&
+ ((operand.encoding_[0] & 0x07) == 0x04)) {
+ emitUint8(operand.encoding_[1]);
+ displacement_start = 2;
+ }
+ // Emit the displacement and the fixup that affects it, if any.
if (operand.fixup()) {
emitFixup(operand.fixup());
+ assert(length - displacement_start == 4);
}
- // Emit the rest of the encoded operand.
- for (intptr_t i = 1; i < length; i++) {
+ for (intptr_t i = displacement_start; i < length; i++) {
emitUint8(operand.encoding_[i]);
}
}
diff --git a/src/IceFixups.h b/src/IceFixups.h
index d49e4d1..aaef7ab 100644
--- a/src/IceFixups.h
+++ b/src/IceFixups.h
@@ -31,8 +31,14 @@
public:
AssemblerFixup() = default;
AssemblerFixup(const AssemblerFixup &) = default;
- intptr_t position() const { return position_; }
- void set_position(intptr_t Position) { position_ = Position; }
+ intptr_t position() const {
+ assert(position_was_set_);
+ return position_;
+ }
+ void set_position(intptr_t Position) {
+ position_ = Position;
+ position_was_set_ = true;
+ }
FixupKind kind() const { return kind_; }
void set_kind(FixupKind Kind) { kind_ = Kind; }
@@ -43,12 +49,15 @@
static const Constant *NullSymbol;
bool isNullSymbol() const { return value_ == NullSymbol; }
+ static constexpr AssemblerFixup *NoFixup = nullptr;
+
void set_value(const Constant *Value) { value_ = Value; }
/// Emits fixup, then returns the number of bytes to skip.
virtual size_t emit(GlobalContext *Ctx, const Assembler &Asm) const;
private:
+ bool position_was_set_ = false;
intptr_t position_ = 0;
FixupKind kind_ = 0;
const Constant *value_ = nullptr;
diff --git a/src/IceInstX8632.cpp b/src/IceInstX8632.cpp
index 3a8c57c..0837eef 100644
--- a/src/IceInstX8632.cpp
+++ b/src/IceInstX8632.cpp
@@ -120,9 +120,10 @@
llvm_unreachable("Invalid offset type for x86 mem operand");
}
- if (Base) {
+ if (Base || Index) {
Str << "(";
- Base->emit(Func);
+ if (Base)
+ Base->emit(Func);
if (Index) {
Str << ",";
Index->emit(Func);
@@ -151,8 +152,8 @@
Dumped = true;
}
if (Index) {
- assert(Base);
- Str << "+";
+ if (Base)
+ Str << "+";
if (Shift > 0)
Str << (1u << Shift) << "*";
if (Func)
@@ -216,18 +217,16 @@
return X8632::Traits::Address(
RegX8632::getEncodedGPR(getBase()->getRegNum()),
RegX8632::getEncodedGPR(getIndex()->getRegNum()),
- X8632::Traits::ScaleFactor(getShift()), Disp);
+ X8632::Traits::ScaleFactor(getShift()), Disp, Fixup);
} else if (getBase()) {
return X8632::Traits::Address(
- RegX8632::getEncodedGPR(getBase()->getRegNum()), Disp);
+ RegX8632::getEncodedGPR(getBase()->getRegNum()), Disp, Fixup);
} else if (getIndex()) {
return X8632::Traits::Address(
RegX8632::getEncodedGPR(getIndex()->getRegNum()),
- X8632::Traits::ScaleFactor(getShift()), Disp);
- } else if (Fixup) {
- return X8632::Traits::Address::Absolute(Disp, Fixup);
+ X8632::Traits::ScaleFactor(getShift()), Disp, Fixup);
} else {
- return X8632::Traits::Address::Absolute(Disp);
+ return X8632::Traits::Address(Disp, Fixup);
}
}
@@ -238,7 +237,8 @@
int32_t Offset =
Var->getStackOffset() + Target->getStackAdjustment() + getOffset();
return X8632::Traits::Address(
- RegX8632::getEncodedGPR(Target->getFrameOrStackReg()), Offset);
+ RegX8632::getEncodedGPR(Target->getFrameOrStackReg()), Offset,
+ AssemblerFixup::NoFixup);
}
void MachineTraits<TargetX8632>::VariableSplit::emit(const Cfg *Func) const {
diff --git a/src/IceInstX8664.cpp b/src/IceInstX8664.cpp
index cb765ae..bf38f1b 100644
--- a/src/IceInstX8664.cpp
+++ b/src/IceInstX8664.cpp
@@ -106,9 +106,10 @@
llvm_unreachable("Invalid offset type for x86 mem operand");
}
- if (Base) {
+ if (Base || Index) {
Str << "(";
- Base->emit(Func);
+ if (Base)
+ Base->emit(Func);
if (Index) {
Str << ",";
Index->emit(Func);
@@ -133,8 +134,8 @@
Dumped = true;
}
if (Index) {
- assert(Base);
- Str << "+";
+ if (Base)
+ Str << "+";
if (Shift > 0)
Str << (1u << Shift) << "*";
if (Func)
@@ -190,18 +191,16 @@
return X8664::Traits::Address(
RegX8664::getEncodedGPR(getBase()->getRegNum()),
RegX8664::getEncodedGPR(getIndex()->getRegNum()),
- X8664::Traits::ScaleFactor(getShift()), Disp);
+ X8664::Traits::ScaleFactor(getShift()), Disp, Fixup);
} else if (getBase()) {
return X8664::Traits::Address(
- RegX8664::getEncodedGPR(getBase()->getRegNum()), Disp);
+ RegX8664::getEncodedGPR(getBase()->getRegNum()), Disp, Fixup);
} else if (getIndex()) {
return X8664::Traits::Address(
RegX8664::getEncodedGPR(getIndex()->getRegNum()),
- X8664::Traits::ScaleFactor(getShift()), Disp);
- } else if (Fixup) {
- return X8664::Traits::Address::Absolute(Disp, Fixup);
+ X8664::Traits::ScaleFactor(getShift()), Disp, Fixup);
} else {
- return X8664::Traits::Address::Absolute(Disp);
+ return X8664::Traits::Address(Disp, Fixup);
}
}
@@ -212,7 +211,8 @@
int32_t Offset =
Var->getStackOffset() + Target->getStackAdjustment() + getOffset();
return X8664::Traits::Address(
- RegX8664::getEncodedGPR(Target->getFrameOrStackReg()), Offset);
+ RegX8664::getEncodedGPR(Target->getFrameOrStackReg()), Offset,
+ AssemblerFixup::NoFixup);
}
void MachineTraits<TargetX8664>::VariableSplit::emit(const Cfg *Func) const {
diff --git a/src/IceInstX86BaseImpl.h b/src/IceInstX86BaseImpl.h
index 056ecfe..0f1ddd3 100644
--- a/src/IceInstX86BaseImpl.h
+++ b/src/IceInstX86BaseImpl.h
@@ -2657,7 +2657,8 @@
Width);
typename InstX86Base<Machine>::Traits::Address StackSlot =
typename InstX86Base<Machine>::Traits::Address(
- InstX86Base<Machine>::Traits::RegisterSet::Encoded_Reg_esp, 0);
+ InstX86Base<Machine>::Traits::RegisterSet::Encoded_Reg_esp, 0,
+ AssemblerFixup::NoFixup);
Asm->movss(Ty, StackSlot,
InstX86Base<Machine>::Traits::RegisterSet::getEncodedXmm(
Var->getRegNum()));
@@ -2755,7 +2756,8 @@
InstX86Base<Machine>::Traits::RegisterSet::Encoded_Reg_esp, Width);
typename InstX86Base<Machine>::Traits::Address StackSlot =
typename InstX86Base<Machine>::Traits::Address(
- InstX86Base<Machine>::Traits::RegisterSet::Encoded_Reg_esp, 0);
+ InstX86Base<Machine>::Traits::RegisterSet::Encoded_Reg_esp, 0,
+ AssemblerFixup::NoFixup);
Asm->fstp(Ty, StackSlot);
Asm->movss(Ty, InstX86Base<Machine>::Traits::RegisterSet::getEncodedXmm(
Dest->getRegNum()),
diff --git a/src/IceTargetLoweringX8632Traits.h b/src/IceTargetLoweringX8632Traits.h
index 918a585..6872547 100644
--- a/src/IceTargetLoweringX8632Traits.h
+++ b/src/IceTargetLoweringX8632Traits.h
@@ -182,79 +182,69 @@
return *this;
}
- Address(GPRRegister base, int32_t disp) {
- if (disp == 0 && base != RegX8632::Encoded_Reg_ebp) {
- SetModRM(0, base);
- if (base == RegX8632::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8632::Encoded_Reg_esp, base);
- } else if (Utils::IsInt(8, disp)) {
- SetModRM(1, base);
- if (base == RegX8632::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8632::Encoded_Reg_esp, base);
- SetDisp8(disp);
+ Address(GPRRegister Base, int32_t Disp, AssemblerFixup *Fixup) {
+ if (Fixup == nullptr && Disp == 0 && Base != RegX8632::Encoded_Reg_ebp) {
+ SetModRM(0, Base);
+ if (Base == RegX8632::Encoded_Reg_esp)
+ SetSIB(TIMES_1, RegX8632::Encoded_Reg_esp, Base);
+ } else if (Fixup == nullptr && Utils::IsInt(8, Disp)) {
+ SetModRM(1, Base);
+ if (Base == RegX8632::Encoded_Reg_esp)
+ SetSIB(TIMES_1, RegX8632::Encoded_Reg_esp, Base);
+ SetDisp8(Disp);
} else {
- SetModRM(2, base);
- if (base == RegX8632::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8632::Encoded_Reg_esp, base);
- SetDisp32(disp);
+ SetModRM(2, Base);
+ if (Base == RegX8632::Encoded_Reg_esp)
+ SetSIB(TIMES_1, RegX8632::Encoded_Reg_esp, Base);
+ SetDisp32(Disp);
+ if (Fixup)
+ SetFixup(Fixup);
}
}
- Address(GPRRegister index, ScaleFactor scale, int32_t disp) {
- assert(index != RegX8632::Encoded_Reg_esp); // Illegal addressing mode.
+ Address(GPRRegister Index, ScaleFactor Scale, int32_t Disp,
+ AssemblerFixup *Fixup) {
+ assert(Index != RegX8632::Encoded_Reg_esp); // Illegal addressing mode.
SetModRM(0, RegX8632::Encoded_Reg_esp);
- SetSIB(scale, index, RegX8632::Encoded_Reg_ebp);
- SetDisp32(disp);
+ SetSIB(Scale, Index, RegX8632::Encoded_Reg_ebp);
+ SetDisp32(Disp);
+ if (Fixup)
+ SetFixup(Fixup);
}
- Address(GPRRegister base, GPRRegister index, ScaleFactor scale,
- int32_t disp) {
- assert(index != RegX8632::Encoded_Reg_esp); // Illegal addressing mode.
- if (disp == 0 && base != RegX8632::Encoded_Reg_ebp) {
+ Address(GPRRegister Base, GPRRegister Index, ScaleFactor Scale,
+ int32_t Disp, AssemblerFixup *Fixup) {
+ assert(Index != RegX8632::Encoded_Reg_esp); // Illegal addressing mode.
+ if (Fixup == nullptr && Disp == 0 && Base != RegX8632::Encoded_Reg_ebp) {
SetModRM(0, RegX8632::Encoded_Reg_esp);
- SetSIB(scale, index, base);
- } else if (Utils::IsInt(8, disp)) {
+ SetSIB(Scale, Index, Base);
+ } else if (Fixup == nullptr && Utils::IsInt(8, Disp)) {
SetModRM(1, RegX8632::Encoded_Reg_esp);
- SetSIB(scale, index, base);
- SetDisp8(disp);
+ SetSIB(Scale, Index, Base);
+ SetDisp8(Disp);
} else {
SetModRM(2, RegX8632::Encoded_Reg_esp);
- SetSIB(scale, index, base);
- SetDisp32(disp);
+ SetSIB(Scale, Index, Base);
+ SetDisp32(Disp);
+ if (Fixup)
+ SetFixup(Fixup);
}
}
- /// AbsoluteTag is a special tag used by clients to create an absolute
- /// Address.
- enum AbsoluteTag { ABSOLUTE };
-
- Address(AbsoluteTag, const uintptr_t Addr) {
- SetModRM(0, RegX8632::Encoded_Reg_ebp);
- SetDisp32(Addr);
- }
-
- // TODO(jpp): remove this.
- static Address Absolute(const uintptr_t Addr) {
- return Address(ABSOLUTE, Addr);
- }
-
- Address(AbsoluteTag, RelocOffsetT Offset, AssemblerFixup *Fixup) {
+ /// Generate an absolute address expression on x86-32.
+ Address(RelocOffsetT Offset, AssemblerFixup *Fixup) {
SetModRM(0, RegX8632::Encoded_Reg_ebp);
// Use the Offset in the displacement for now. If we decide to process
// fixups later, we'll need to patch up the emitted displacement.
SetDisp32(Offset);
- SetFixup(Fixup);
- }
-
- // TODO(jpp): remove this.
- static Address Absolute(RelocOffsetT Offset, AssemblerFixup *Fixup) {
- return Address(ABSOLUTE, Offset, Fixup);
+ if (Fixup)
+ SetFixup(Fixup);
}
static Address ofConstPool(Assembler *Asm, const Constant *Imm) {
AssemblerFixup *Fixup = Asm->createFixup(llvm::ELF::R_386_32, Imm);
const RelocOffsetT Offset = 0;
- return Address(ABSOLUTE, Offset, Fixup);
+ return Address(Offset, Fixup);
}
};
diff --git a/src/IceTargetLoweringX8664Traits.h b/src/IceTargetLoweringX8664Traits.h
index 0ed40a8..4f1c6e4 100644
--- a/src/IceTargetLoweringX8664Traits.h
+++ b/src/IceTargetLoweringX8664Traits.h
@@ -201,81 +201,72 @@
return *this;
}
- Address(GPRRegister base, int32_t disp) {
- if (disp == 0 && (base & 7) != RegX8664::Encoded_Reg_ebp) {
- SetModRM(0, base);
- if ((base & 7) == RegX8664::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, base);
- } else if (Utils::IsInt(8, disp)) {
- SetModRM(1, base);
- if ((base & 7) == RegX8664::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, base);
- SetDisp8(disp);
+ Address(GPRRegister Base, int32_t Disp, AssemblerFixup *Fixup) {
+ if (Fixup == nullptr && Disp == 0 &&
+ (Base & 7) != RegX8664::Encoded_Reg_ebp) {
+ SetModRM(0, Base);
+ if ((Base & 7) == RegX8664::Encoded_Reg_esp)
+ SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, Base);
+ } else if (Fixup == nullptr && Utils::IsInt(8, Disp)) {
+ SetModRM(1, Base);
+ if ((Base & 7) == RegX8664::Encoded_Reg_esp)
+ SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, Base);
+ SetDisp8(Disp);
} else {
- SetModRM(2, base);
- if ((base & 7) == RegX8664::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, base);
- SetDisp32(disp);
+ SetModRM(2, Base);
+ if ((Base & 7) == RegX8664::Encoded_Reg_esp)
+ SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, Base);
+ SetDisp32(Disp);
+ if (Fixup)
+ SetFixup(Fixup);
}
}
- Address(GPRRegister index, ScaleFactor scale, int32_t disp) {
- assert(index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
+ Address(GPRRegister Index, ScaleFactor Scale, int32_t Disp,
+ AssemblerFixup *Fixup) {
+ assert(Index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
SetModRM(0, RegX8664::Encoded_Reg_esp);
- SetSIB(scale, index, RegX8664::Encoded_Reg_ebp);
- SetDisp32(disp);
+ SetSIB(Scale, Index, RegX8664::Encoded_Reg_ebp);
+ SetDisp32(Disp);
+ if (Fixup)
+ SetFixup(Fixup);
}
- Address(GPRRegister base, GPRRegister index, ScaleFactor scale,
- int32_t disp) {
- assert(index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
- if (disp == 0 && (base & 7) != RegX8664::Encoded_Reg_ebp) {
+ Address(GPRRegister Base, GPRRegister Index, ScaleFactor Scale,
+ int32_t Disp, AssemblerFixup *Fixup) {
+ assert(Index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
+ if (Fixup == nullptr && Disp == 0 &&
+ (Base & 7) != RegX8664::Encoded_Reg_ebp) {
SetModRM(0, RegX8664::Encoded_Reg_esp);
- SetSIB(scale, index, base);
- } else if (Utils::IsInt(8, disp)) {
+ SetSIB(Scale, Index, Base);
+ } else if (Fixup == nullptr && Utils::IsInt(8, Disp)) {
SetModRM(1, RegX8664::Encoded_Reg_esp);
- SetSIB(scale, index, base);
- SetDisp8(disp);
+ SetSIB(Scale, Index, Base);
+ SetDisp8(Disp);
} else {
SetModRM(2, RegX8664::Encoded_Reg_esp);
- SetSIB(scale, index, base);
- SetDisp32(disp);
+ SetSIB(Scale, Index, Base);
+ SetDisp32(Disp);
+ if (Fixup)
+ SetFixup(Fixup);
}
}
- // PcRelTag is a special tag for requesting rip-relative addressing in
- // X86-64.
- // TODO(jpp): this is bogus. remove.
- enum AbsoluteTag { ABSOLUTE };
-
- Address(AbsoluteTag, const uintptr_t Addr) {
- SetModRM(0, RegX8664::Encoded_Reg_ebp);
- SetDisp32(Addr);
- }
-
- // TODO(jpp): remove this.
- static Address Absolute(const uintptr_t Addr) {
- return Address(ABSOLUTE, Addr);
- }
-
- Address(AbsoluteTag, RelocOffsetT Offset, AssemblerFixup *Fixup) {
+ /// Generate a RIP-relative address expression on x86-64.
+ Address(RelocOffsetT Offset, AssemblerFixup *Fixup) {
SetModRM(0, RegX8664::Encoded_Reg_ebp);
// Use the Offset in the displacement for now. If we decide to process
// fixups later, we'll need to patch up the emitted displacement.
SetDisp32(Offset);
- SetFixup(Fixup);
- }
-
- // TODO(jpp): remove this.
- static Address Absolute(RelocOffsetT Offset, AssemblerFixup *Fixup) {
- return Address(ABSOLUTE, Offset, Fixup);
+ if (Fixup)
+ SetFixup(Fixup);
}
static Address ofConstPool(Assembler *Asm, const Constant *Imm) {
// TODO(jpp): ???
AssemblerFixup *Fixup = Asm->createFixup(RelFixup, Imm);
const RelocOffsetT Offset = 4;
- return Address(ABSOLUTE, Offset, Fixup);
+ return Address(Offset, Fixup);
}
};
diff --git a/src/IceTargetLoweringX86BaseImpl.h b/src/IceTargetLoweringX86BaseImpl.h
index 8739c77..09bc6dd 100644
--- a/src/IceTargetLoweringX86BaseImpl.h
+++ b/src/IceTargetLoweringX86BaseImpl.h
@@ -798,7 +798,8 @@
Offset += getStackAdjustment();
}
return typename Traits::Address(
- Traits::RegisterSet::getEncodedGPR(BaseRegNum), Offset);
+ Traits::RegisterSet::getEncodedGPR(BaseRegNum), Offset,
+ AssemblerFixup::NoFixup);
}
/// Helper function for addProlog().
@@ -2851,15 +2852,19 @@
break;
case InstIcmp::Eq:
case InstIcmp::Ule:
- _mov(Temp, Src0LoRM);
- _or(Temp, Src0HiRM);
+ // Mov Src0HiRM first, because it was legalized most recently, and will
+ // sometimes avoid a move before the OR.
+ _mov(Temp, Src0HiRM);
+ _or(Temp, Src0LoRM);
Context.insert(InstFakeUse::create(Func, Temp));
setccOrBr(Traits::Cond::Br_e, Dest, Br);
return;
case InstIcmp::Ne:
case InstIcmp::Ugt:
- _mov(Temp, Src0LoRM);
- _or(Temp, Src0HiRM);
+ // Mov Src0HiRM first, because it was legalized most recently, and will
+ // sometimes avoid a move before the OR.
+ _mov(Temp, Src0HiRM);
+ _or(Temp, Src0LoRM);
Context.insert(InstFakeUse::create(Func, Temp));
setccOrBr(Traits::Cond::Br_ne, Dest, Br);
return;
@@ -4094,16 +4099,17 @@
}
inline bool isAdd(const Inst *Inst) {
- if (const InstArithmetic *Arith =
- llvm::dyn_cast_or_null<const InstArithmetic>(Inst)) {
+ if (auto *Arith = llvm::dyn_cast_or_null<const InstArithmetic>(Inst)) {
return (Arith->getOp() == InstArithmetic::Add);
}
return false;
}
-inline void dumpAddressOpt(const Cfg *Func, const Variable *Base,
+inline void dumpAddressOpt(const Cfg *Func,
+ const ConstantRelocatable *Relocatable,
+ int32_t Offset, const Variable *Base,
const Variable *Index, uint16_t Shift,
- int32_t Offset, const Inst *Reason) {
+ const Inst *Reason) {
if (!BuildDefs::dump())
return;
if (!Func->isVerbose(IceV_AddrOpt))
@@ -4122,11 +4128,13 @@
Index->dump(Func);
else
Str << "<null>";
- Str << ", Shift=" << Shift << ", Offset=" << Offset << "\n";
+ Str << ", Shift=" << Shift << ", Offset=" << Offset
+ << ", Relocatable=" << Relocatable << "\n";
}
-inline bool matchTransitiveAssign(const VariablesMetadata *VMetadata,
- Variable *&Var, const Inst *&Reason) {
+inline bool matchAssign(const VariablesMetadata *VMetadata, Variable *&Var,
+ ConstantRelocatable *&Relocatable, int32_t &Offset,
+ const Inst *&Reason) {
// Var originates from Var=SrcVar ==> set Var:=SrcVar
if (Var == nullptr)
return false;
@@ -4135,7 +4143,7 @@
if (llvm::isa<InstAssign>(VarAssign)) {
Operand *SrcOp = VarAssign->getSrc(0);
assert(SrcOp);
- if (Variable *SrcVar = llvm::dyn_cast<Variable>(SrcOp)) {
+ if (auto *SrcVar = llvm::dyn_cast<Variable>(SrcOp)) {
if (!VMetadata->isMultiDef(SrcVar) &&
// TODO: ensure SrcVar stays single-BB
true) {
@@ -4143,6 +4151,21 @@
Reason = VarAssign;
return true;
}
+ } else if (auto *Const = llvm::dyn_cast<ConstantInteger32>(SrcOp)) {
+ int32_t MoreOffset = Const->getValue();
+ if (Utils::WouldOverflowAdd(Offset, MoreOffset))
+ return false;
+ Var = nullptr;
+ Offset += MoreOffset;
+ Reason = VarAssign;
+ return true;
+ } else if (auto *AddReloc = llvm::dyn_cast<ConstantRelocatable>(SrcOp)) {
+ if (Relocatable == nullptr) {
+ Var = nullptr;
+ Relocatable = AddReloc;
+ Reason = VarAssign;
+ return true;
+ }
}
}
}
@@ -4158,16 +4181,16 @@
return false;
if (Index != nullptr)
return false;
- const Inst *BaseInst = VMetadata->getSingleDefinition(Base);
+ auto *BaseInst = VMetadata->getSingleDefinition(Base);
if (BaseInst == nullptr)
return false;
assert(!VMetadata->isMultiDef(Base));
if (BaseInst->getSrcSize() < 2)
return false;
- if (Variable *Var1 = llvm::dyn_cast<Variable>(BaseInst->getSrc(0))) {
+ if (auto *Var1 = llvm::dyn_cast<Variable>(BaseInst->getSrc(0))) {
if (VMetadata->isMultiDef(Var1))
return false;
- if (Variable *Var2 = llvm::dyn_cast<Variable>(BaseInst->getSrc(1))) {
+ if (auto *Var2 = llvm::dyn_cast<Variable>(BaseInst->getSrc(1))) {
if (VMetadata->isMultiDef(Var2))
return false;
if (isAdd(BaseInst) &&
@@ -4191,20 +4214,23 @@
// Index=Var, Shift+=log2(Const)
if (Index == nullptr)
return false;
- const Inst *IndexInst = VMetadata->getSingleDefinition(Index);
+ auto *IndexInst = VMetadata->getSingleDefinition(Index);
if (IndexInst == nullptr)
return false;
assert(!VMetadata->isMultiDef(Index));
if (IndexInst->getSrcSize() < 2)
return false;
- if (const InstArithmetic *ArithInst =
- llvm::dyn_cast<InstArithmetic>(IndexInst)) {
- if (Variable *Var = llvm::dyn_cast<Variable>(ArithInst->getSrc(0))) {
- if (ConstantInteger32 *Const =
+ if (auto *ArithInst = llvm::dyn_cast<InstArithmetic>(IndexInst)) {
+ if (auto *Var = llvm::dyn_cast<Variable>(ArithInst->getSrc(0))) {
+ if (auto *Const =
llvm::dyn_cast<ConstantInteger32>(ArithInst->getSrc(1))) {
- if (ArithInst->getOp() == InstArithmetic::Mul &&
- !VMetadata->isMultiDef(Var) && Const->getType() == IceType_i32) {
- uint64_t Mult = Const->getValue();
+ if (VMetadata->isMultiDef(Var) || Const->getType() != IceType_i32)
+ return false;
+ switch (ArithInst->getOp()) {
+ default:
+ return false;
+ case InstArithmetic::Mul: {
+ uint32_t Mult = Const->getValue();
uint32_t LogMult;
switch (Mult) {
case 1:
@@ -4229,6 +4255,25 @@
return true;
}
}
+ case InstArithmetic::Shl: {
+ uint32_t ShiftAmount = Const->getValue();
+ switch (ShiftAmount) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ break;
+ default:
+ return false;
+ }
+ if (Shift + ShiftAmount <= 3) {
+ Index = Var;
+ Shift += ShiftAmount;
+ Reason = IndexInst;
+ return true;
+ }
+ }
+ }
}
}
}
@@ -4236,49 +4281,93 @@
}
inline bool matchOffsetBase(const VariablesMetadata *VMetadata, Variable *&Base,
- int32_t &Offset, const Inst *&Reason) {
+ ConstantRelocatable *&Relocatable, int32_t &Offset,
+ const Inst *&Reason) {
// Base is Base=Var+Const || Base is Base=Const+Var ==>
// set Base=Var, Offset+=Const
// Base is Base=Var-Const ==>
// set Base=Var, Offset-=Const
- if (Base == nullptr)
+ if (Base == nullptr) {
return false;
+ }
const Inst *BaseInst = VMetadata->getSingleDefinition(Base);
- if (BaseInst == nullptr)
+ if (BaseInst == nullptr) {
return false;
+ }
assert(!VMetadata->isMultiDef(Base));
- if (const InstArithmetic *ArithInst =
- llvm::dyn_cast<const InstArithmetic>(BaseInst)) {
+ if (auto *ArithInst = llvm::dyn_cast<const InstArithmetic>(BaseInst)) {
if (ArithInst->getOp() != InstArithmetic::Add &&
ArithInst->getOp() != InstArithmetic::Sub)
return false;
bool IsAdd = ArithInst->getOp() == InstArithmetic::Add;
- Variable *Var = nullptr;
- ConstantInteger32 *Const = nullptr;
- if (Variable *VariableOperand =
- llvm::dyn_cast<Variable>(ArithInst->getSrc(0))) {
- Var = VariableOperand;
- Const = llvm::dyn_cast<ConstantInteger32>(ArithInst->getSrc(1));
- } else if (IsAdd) {
- Const = llvm::dyn_cast<ConstantInteger32>(ArithInst->getSrc(0));
- Var = llvm::dyn_cast<Variable>(ArithInst->getSrc(1));
+ Operand *Src0 = ArithInst->getSrc(0);
+ Operand *Src1 = ArithInst->getSrc(1);
+ auto *Var0 = llvm::dyn_cast<Variable>(Src0);
+ auto *Var1 = llvm::dyn_cast<Variable>(Src1);
+ auto *Const0 = llvm::dyn_cast<ConstantInteger32>(Src0);
+ auto *Const1 = llvm::dyn_cast<ConstantInteger32>(Src1);
+ auto *Reloc0 = llvm::dyn_cast<ConstantRelocatable>(Src0);
+ auto *Reloc1 = llvm::dyn_cast<ConstantRelocatable>(Src1);
+ Variable *NewBase = nullptr;
+ int32_t NewOffset = Offset;
+ ConstantRelocatable *NewRelocatable = Relocatable;
+ if (Var0 && Var1)
+ // TODO(sehr): merge base/index splitting into here.
+ return false;
+ if (!IsAdd && Var1)
+ return false;
+ if (Var0)
+ NewBase = Var0;
+ else if (Var1)
+ NewBase = Var1;
+ // Don't know how to add/subtract two relocatables.
+ if ((Relocatable && (Reloc0 || Reloc1)) || (Reloc0 && Reloc1))
+ return false;
+ // Don't know how to subtract a relocatable.
+ if (!IsAdd && Reloc1)
+ return false;
+ // Incorporate ConstantRelocatables.
+ if (Reloc0)
+ NewRelocatable = Reloc0;
+ else if (Reloc1)
+ NewRelocatable = Reloc1;
+ // Compute the updated constant offset.
+ if (Const0) {
+ int32_t MoreOffset = IsAdd ? Const0->getValue() : -Const0->getValue();
+ if (Utils::WouldOverflowAdd(NewOffset, MoreOffset))
+ return false;
+ NewOffset += MoreOffset;
}
- if (Var == nullptr || Const == nullptr || VMetadata->isMultiDef(Var))
- return false;
- int32_t MoreOffset = IsAdd ? Const->getValue() : -Const->getValue();
- if (Utils::WouldOverflowAdd(Offset, MoreOffset))
- return false;
- Base = Var;
- Offset += MoreOffset;
+ if (Const1) {
+ int32_t MoreOffset = IsAdd ? Const1->getValue() : -Const1->getValue();
+ if (Utils::WouldOverflowAdd(NewOffset, MoreOffset))
+ return false;
+ NewOffset += MoreOffset;
+ }
+ // Update the computed address parameters once we are sure optimization
+ // is valid.
+ Base = NewBase;
+ Offset = NewOffset;
+ Relocatable = NewRelocatable;
Reason = BaseInst;
return true;
}
return false;
}
-inline void computeAddressOpt(Cfg *Func, const Inst *Instr, Variable *&Base,
- Variable *&Index, uint16_t &Shift,
- int32_t &Offset) {
+// Builds information for a canonical address expresion:
+// <Relocatable + Offset>(Base, Index, Shift)
+// On entry:
+// Relocatable == null,
+// Offset == 0,
+// Base is a Variable,
+// Index == nullptr,
+// Shift == 0
+inline bool computeAddressOpt(Cfg *Func, const Inst *Instr,
+ ConstantRelocatable *&Relocatable,
+ int32_t &Offset, Variable *&Base,
+ Variable *&Index, uint16_t &Shift) {
+ bool AddressWasOptimized = false;
Func->resetCurrentNode();
if (Func->isVerbose(IceV_AddrOpt)) {
OstreamLocker L(Func->getContext());
@@ -4286,54 +4375,75 @@
Str << "\nStarting computeAddressOpt for instruction:\n ";
Instr->dumpDecorated(Func);
}
- (void)Offset; // TODO: pattern-match for non-zero offsets.
if (Base == nullptr)
- return;
+ return AddressWasOptimized;
// If the Base has more than one use or is live across multiple blocks, then
// don't go further. Alternatively (?), never consider a transformation that
// would change a variable that is currently *not* live across basic block
// boundaries into one that *is*.
if (Func->getVMetadata()->isMultiBlock(Base) /* || Base->getUseCount() > 1*/)
- return;
+ return AddressWasOptimized;
const bool MockBounds = Func->getContext()->getFlags().getMockBoundsCheck();
const VariablesMetadata *VMetadata = Func->getVMetadata();
- bool Continue = true;
- while (Continue) {
- const Inst *Reason = nullptr;
- if (matchTransitiveAssign(VMetadata, Base, Reason) ||
- matchTransitiveAssign(VMetadata, Index, Reason) ||
- (!MockBounds &&
- matchCombinedBaseIndex(VMetadata, Base, Index, Shift, Reason)) ||
- (!MockBounds && matchShiftedIndex(VMetadata, Index, Shift, Reason)) ||
- matchOffsetBase(VMetadata, Base, Offset, Reason)) {
- dumpAddressOpt(Func, Base, Index, Shift, Offset, Reason);
- } else {
- Continue = false;
+ const Inst *Reason = nullptr;
+ do {
+ if (Reason) {
+ dumpAddressOpt(Func, Relocatable, Offset, Base, Index, Shift, Reason);
+ AddressWasOptimized = true;
+ Reason = nullptr;
}
+ // Update Base and Index to follow through assignments to definitions.
+ if (matchAssign(VMetadata, Base, Relocatable, Offset, Reason)) {
+ // Assignments of Base from a Relocatable or ConstantInt32 can result
+ // in Base becoming nullptr. To avoid code duplication in this loop we
+ // prefer that Base be non-nullptr if possible.
+ if ((Base == nullptr) && (Index != nullptr) && (Shift == 0))
+ std::swap(Base, Index);
+ continue;
+ }
+ if (matchAssign(VMetadata, Index, Relocatable, Offset, Reason))
+ continue;
- // Index is Index=Var<<Const && Const+Shift<=3 ==>
- // Index=Var, Shift+=Const
-
- // Index is Index=Const*Var && log2(Const)+Shift<=3 ==>
- // Index=Var, Shift+=log2(Const)
-
- // Index && Shift==0 && Base is Base=Var*Const && log2(Const)+Shift<=3 ==>
- // swap(Index,Base)
- // Similar for Base=Const*Var and Base=Var<<Const
-
- // Index is Index=Var+Const ==>
- // set Index=Var, Offset+=(Const<<Shift)
-
- // Index is Index=Const+Var ==>
- // set Index=Var, Offset+=(Const<<Shift)
-
- // Index is Index=Var-Const ==>
- // set Index=Var, Offset-=(Const<<Shift)
-
+ if (!MockBounds) {
+ // Transition from:
+ // <Relocatable + Offset>(Base) to
+ // <Relocatable + Offset>(Base, Index)
+ if (matchCombinedBaseIndex(VMetadata, Base, Index, Shift, Reason))
+ continue;
+ // Recognize multiply/shift and update Shift amount.
+ // Index becomes Index=Var<<Const && Const+Shift<=3 ==>
+ // Index=Var, Shift+=Const
+ // Index becomes Index=Const*Var && log2(Const)+Shift<=3 ==>
+ // Index=Var, Shift+=log2(Const)
+ if (matchShiftedIndex(VMetadata, Index, Shift, Reason))
+ continue;
+ // If Shift is zero, the choice of Base and Index was purely arbitrary.
+ // Recognize multiply/shift and set Shift amount.
+ // Shift==0 && Base is Base=Var*Const && log2(Const)+Shift<=3 ==>
+ // swap(Index,Base)
+ // Similar for Base=Const*Var and Base=Var<<Const
+ if ((Shift == 0) && matchShiftedIndex(VMetadata, Base, Shift, Reason)) {
+ std::swap(Base, Index);
+ continue;
+ }
+ }
+ // Update Offset to reflect additions/subtractions with constants and
+ // relocatables.
// TODO: consider overflow issues with respect to Offset.
// TODO: handle symbolic constants.
- }
+ if (matchOffsetBase(VMetadata, Base, Relocatable, Offset, Reason))
+ continue;
+ // TODO(sehr, stichnot): Handle updates of Index with Shift != 0.
+ // Index is Index=Var+Const ==>
+ // set Index=Var, Offset+=(Const<<Shift)
+ // Index is Index=Const+Var ==>
+ // set Index=Var, Offset+=(Const<<Shift)
+ // Index is Index=Var-Const ==>
+ // set Index=Var, Offset-=(Const<<Shift)
+ break;
+ } while (Reason);
+ return AddressWasOptimized;
}
/// Add a mock bounds check on the memory address before using it as a load or
@@ -4415,19 +4525,26 @@
Variable *Dest = Inst->getDest();
Operand *Addr = Inst->getSrc(0);
Variable *Index = nullptr;
+ ConstantRelocatable *Relocatable = nullptr;
uint16_t Shift = 0;
- int32_t Offset = 0; // TODO: make Constant
+ int32_t Offset = 0;
// Vanilla ICE load instructions should not use the segment registers, and
// computeAddressOpt only works at the level of Variables and Constants, not
// other Traits::X86OperandMem, so there should be no mention of segment
// registers there either.
const typename Traits::X86OperandMem::SegmentRegisters SegmentReg =
Traits::X86OperandMem::DefaultSegment;
- Variable *Base = llvm::dyn_cast<Variable>(Addr);
- computeAddressOpt(Func, Inst, Base, Index, Shift, Offset);
- if (Base && Addr != Base) {
+ auto *Base = llvm::dyn_cast<Variable>(Addr);
+ if (computeAddressOpt(Func, Inst, Relocatable, Offset, Base, Index, Shift)) {
Inst->setDeleted();
- Constant *OffsetOp = Ctx->getConstantInt32(Offset);
+ Constant *OffsetOp = nullptr;
+ if (Relocatable == nullptr) {
+ OffsetOp = Ctx->getConstantInt32(Offset);
+ } else {
+ OffsetOp = Ctx->getConstantSym(Relocatable->getOffset() + Offset,
+ Relocatable->getName(),
+ Relocatable->getSuppressMangling());
+ }
Addr = Traits::X86OperandMem::create(Func, Dest->getType(), Base, OffsetOp,
Index, Shift, SegmentReg);
Context.insert(InstLoad::create(Func, Dest, Addr));
@@ -4623,19 +4740,26 @@
Operand *Data = Inst->getData();
Operand *Addr = Inst->getAddr();
Variable *Index = nullptr;
+ ConstantRelocatable *Relocatable = nullptr;
uint16_t Shift = 0;
- int32_t Offset = 0; // TODO: make Constant
- Variable *Base = llvm::dyn_cast<Variable>(Addr);
+ int32_t Offset = 0;
+ auto *Base = llvm::dyn_cast<Variable>(Addr);
// Vanilla ICE store instructions should not use the segment registers, and
// computeAddressOpt only works at the level of Variables and Constants, not
// other Traits::X86OperandMem, so there should be no mention of segment
// registers there either.
const typename Traits::X86OperandMem::SegmentRegisters SegmentReg =
Traits::X86OperandMem::DefaultSegment;
- computeAddressOpt(Func, Inst, Base, Index, Shift, Offset);
- if (Base && Addr != Base) {
+ if (computeAddressOpt(Func, Inst, Relocatable, Offset, Base, Index, Shift)) {
Inst->setDeleted();
- Constant *OffsetOp = Ctx->getConstantInt32(Offset);
+ Constant *OffsetOp = nullptr;
+ if (Relocatable == nullptr) {
+ OffsetOp = Ctx->getConstantInt32(Offset);
+ } else {
+ OffsetOp = Ctx->getConstantSym(Relocatable->getOffset() + Offset,
+ Relocatable->getName(),
+ Relocatable->getSuppressMangling());
+ }
Addr = Traits::X86OperandMem::create(Func, Data->getType(), Base, OffsetOp,
Index, Shift, SegmentReg);
InstStore *NewStore = InstStore::create(Func, Data, Addr);
diff --git a/tests_lit/llvm2ice_tests/address-mode-global.ll b/tests_lit/llvm2ice_tests/address-mode-global.ll
new file mode 100644
index 0000000..a8f6d4d
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/address-mode-global.ll
@@ -0,0 +1,35 @@
+; This file checks support for address mode optimization.
+
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
+
+@bytes = internal global [1024 x i8] zeroinitializer
+
+define internal i32 @load_global_direct() {
+entry:
+ %base = ptrtoint [1024 x i8]* @bytes to i32
+ %addr_lo.int = add i32 0, %base
+ %addr_hi.int = add i32 4, %base
+ %addr_lo.ptr = inttoptr i32 %addr_lo.int to i32*
+ %addr_hi.ptr = inttoptr i32 %addr_hi.int to i32*
+ %addr_lo.load = load i32, i32* %addr_lo.ptr, align 1
+ %addr_hi.load = load i32, i32* %addr_hi.ptr, align 1
+ %result = add i32 %addr_lo.load, %addr_hi.load
+ ret i32 %result
+; CHECK-LABEL: load_global_direct
+; CHECK-NEXT: mov eax,DWORD PTR ds:0x0{{.*}}bytes
+; CHECK-NEXT: add eax,DWORD PTR ds:0x4{{.*}}bytes
+}
+
+define internal i32 @load_global_indexed(i32 %arg) {
+entry:
+ %offset = shl i32 %arg, 3
+ %base = ptrtoint [1024 x i8]* @bytes to i32
+ %addr.int = add i32 %offset, %base
+ %addr.ptr = inttoptr i32 %addr.int to i32*
+ %addr.load = load i32, i32* %addr.ptr, align 1
+ ret i32 %addr.load
+; CHECK-LABEL: load_global_indexed
+; CHECK-NEXT: mov eax,DWORD PTR [esp+0x4]
+; CHECK-NEXT: mov eax,DWORD PTR [eax*8+0x0]
+}
diff --git a/unittest/AssemblerX8632/ControlFlow.cpp b/unittest/AssemblerX8632/ControlFlow.cpp
index 35adf07..10df5ac 100644
--- a/unittest/AssemblerX8632/ControlFlow.cpp
+++ b/unittest/AssemblerX8632/ControlFlow.cpp
@@ -133,7 +133,7 @@
__ hlt();
__ hlt();
__ mov(IceType_i32, GPRRegister::Encoded_Reg_eax, Immediate(0xf1f2f300));
- __ call(Address(GPRRegister::Encoded_Reg_esp, 0));
+ __ call(Address(GPRRegister::Encoded_Reg_esp, 0, AssemblerFixup::NoFixup));
__ popl(GPRRegister::Encoded_Reg_edx);
AssembledTest test = assemble();
diff --git a/unittest/AssemblerX8632/GPRArith.cpp b/unittest/AssemblerX8632/GPRArith.cpp
index 6ca27ac..309e345 100644
--- a/unittest/AssemblerX8632/GPRArith.cpp
+++ b/unittest/AssemblerX8632/GPRArith.cpp
@@ -132,7 +132,8 @@
Immediate(BaseValue)); \
} \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst, \
- Address(GPRRegister::Encoded_Reg_##Base, Disp)); \
+ Address(GPRRegister::Encoded_Reg_##Base, Disp, \
+ AssemblerFixup::NoFixup)); \
AssembledTest test = assemble(); \
test.run(); \
ASSERT_EQ(test.Base() + (Disp), test.Dst()) << TestString << " with Disp " \
@@ -148,13 +149,17 @@
__ mov(IceType_i32, GPRRegister::Encoded_Reg_##Index, \
Immediate(IndexValue)); \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst0, \
- Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_1, Disp)); \
+ Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_1, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst1, \
- Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_2, Disp)); \
+ Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_2, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst2, \
- Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_4, Disp)); \
+ Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_4, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst3, \
- Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_8, Disp)); \
+ Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_8, Disp, \
+ AssemblerFixup::NoFixup)); \
AssembledTest test = assemble(); \
test.run(); \
ASSERT_EQ((test.Index() << Traits::TIMES_1) + (Disp), test.Dst0()) \
@@ -186,16 +191,20 @@
} \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst0, \
Address(GPRRegister::Encoded_Reg_##Base, \
- GPRRegister::Encoded_Reg_##Index, Traits::TIMES_1, Disp)); \
+ GPRRegister::Encoded_Reg_##Index, Traits::TIMES_1, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst1, \
Address(GPRRegister::Encoded_Reg_##Base, \
- GPRRegister::Encoded_Reg_##Index, Traits::TIMES_2, Disp)); \
+ GPRRegister::Encoded_Reg_##Index, Traits::TIMES_2, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst2, \
Address(GPRRegister::Encoded_Reg_##Base, \
- GPRRegister::Encoded_Reg_##Index, Traits::TIMES_4, Disp)); \
+ GPRRegister::Encoded_Reg_##Index, Traits::TIMES_4, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst3, \
Address(GPRRegister::Encoded_Reg_##Base, \
- GPRRegister::Encoded_Reg_##Index, Traits::TIMES_8, Disp)); \
+ GPRRegister::Encoded_Reg_##Index, Traits::TIMES_8, Disp, \
+ AssemblerFixup::NoFixup)); \
AssembledTest test = assemble(); \
test.run(); \
uint32_t ExpectedIndexValue = test.Index(); \
@@ -287,7 +296,7 @@
do { \
static constexpr char TestString[] = "(" #Dst ", " #Value ")"; \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst, \
- Address(Address::ABSOLUTE, Value)); \
+ Address(Value, AssemblerFixup::NoFixup)); \
static constexpr uint32_t ByteCount = 6; \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
static constexpr uint8_t Opcode = 0x8D; \
diff --git a/unittest/AssemblerX8632/Locked.cpp b/unittest/AssemblerX8632/Locked.cpp
index 9be0f94..f222a22 100644
--- a/unittest/AssemblerX8632/Locked.cpp
+++ b/unittest/AssemblerX8632/Locked.cpp
@@ -200,7 +200,7 @@
// Ensures that xadd emits a lock prefix accordingly.
{
- __ xadd(IceType_i8, Address::Absolute(0x1FF00),
+ __ xadd(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, NotLocked);
static constexpr uint8_t ByteCountNotLocked8 = 7;
ASSERT_EQ(ByteCountNotLocked8, codeBytesSize());
@@ -208,7 +208,7 @@
0x01, 0x00);
reset();
- __ xadd(IceType_i8, Address::Absolute(0x1FF00),
+ __ xadd(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, Locked);
static constexpr uint8_t ByteCountLocked8 = 1 + ByteCountNotLocked8;
ASSERT_EQ(ByteCountLocked8, codeBytesSize());
@@ -218,7 +218,7 @@
}
{
- __ xadd(IceType_i16, Address::Absolute(0x1FF00),
+ __ xadd(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, NotLocked);
static constexpr uint8_t ByteCountNotLocked16 = 8;
ASSERT_EQ(ByteCountNotLocked16, codeBytesSize());
@@ -226,7 +226,7 @@
0xFF, 0x01, 0x00);
reset();
- __ xadd(IceType_i16, Address::Absolute(0x1FF00),
+ __ xadd(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, Locked);
static constexpr uint8_t ByteCountLocked16 = 1 + ByteCountNotLocked16;
ASSERT_EQ(ByteCountLocked16, codeBytesSize());
@@ -236,7 +236,7 @@
}
{
- __ xadd(IceType_i32, Address::Absolute(0x1FF00),
+ __ xadd(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, NotLocked);
static constexpr uint8_t ByteCountNotLocked32 = 7;
ASSERT_EQ(ByteCountNotLocked32, codeBytesSize());
@@ -244,7 +244,7 @@
0x01, 0x00);
reset();
- __ xadd(IceType_i32, Address::Absolute(0x1FF00),
+ __ xadd(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, Locked);
static constexpr uint8_t ByteCountLocked32 = 1 + ByteCountNotLocked32;
ASSERT_EQ(ByteCountLocked32, codeBytesSize());
@@ -307,14 +307,14 @@
static constexpr bool Locked = true;
// Ensures that cmpxchg8b emits a lock prefix accordingly.
- __ cmpxchg8b(Address::Absolute(0x1FF00), NotLocked);
+ __ cmpxchg8b(Address(0x1FF00, AssemblerFixup::NoFixup), NotLocked);
static constexpr uint8_t ByteCountNotLocked = 7;
ASSERT_EQ(ByteCountNotLocked, codeBytesSize());
verifyBytes<ByteCountNotLocked>(codeBytes(), 0x0F, 0xC7, 0x0D, 0x00, 0xFF,
0x01, 0x00);
reset();
- __ cmpxchg8b(Address::Absolute(0x1FF00), Locked);
+ __ cmpxchg8b(Address(0x1FF00, AssemblerFixup::NoFixup), Locked);
static constexpr uint8_t ByteCountLocked = 1 + ByteCountNotLocked;
ASSERT_EQ(ByteCountLocked, codeBytesSize());
verifyBytes<ByteCountLocked>(codeBytes(), 0xF0, 0x0F, 0xC7, 0x0D, 0x00, 0xFF,
@@ -400,7 +400,7 @@
// Ensures that cmpxchg emits a lock prefix accordingly.
{
- __ cmpxchg(IceType_i8, Address::Absolute(0x1FF00),
+ __ cmpxchg(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, NotLocked);
static constexpr uint8_t ByteCountNotLocked8 = 7;
ASSERT_EQ(ByteCountNotLocked8, codeBytesSize());
@@ -408,7 +408,7 @@
0x01, 0x00);
reset();
- __ cmpxchg(IceType_i8, Address::Absolute(0x1FF00),
+ __ cmpxchg(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, Locked);
static constexpr uint8_t ByteCountLocked8 = 1 + ByteCountNotLocked8;
ASSERT_EQ(ByteCountLocked8, codeBytesSize());
@@ -418,7 +418,7 @@
}
{
- __ cmpxchg(IceType_i16, Address::Absolute(0x1FF00),
+ __ cmpxchg(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, NotLocked);
static constexpr uint8_t ByteCountNotLocked16 = 8;
ASSERT_EQ(ByteCountNotLocked16, codeBytesSize());
@@ -426,7 +426,7 @@
0xFF, 0x01, 0x00);
reset();
- __ cmpxchg(IceType_i16, Address::Absolute(0x1FF00),
+ __ cmpxchg(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, Locked);
static constexpr uint8_t ByteCountLocked16 = 1 + ByteCountNotLocked16;
ASSERT_EQ(ByteCountLocked16, codeBytesSize());
@@ -436,7 +436,7 @@
}
{
- __ cmpxchg(IceType_i32, Address::Absolute(0x1FF00),
+ __ cmpxchg(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, NotLocked);
static constexpr uint8_t ByteCountNotLocked32 = 7;
ASSERT_EQ(ByteCountNotLocked32, codeBytesSize());
@@ -444,7 +444,7 @@
0x01, 0x00);
reset();
- __ cmpxchg(IceType_i32, Address::Absolute(0x1FF00),
+ __ cmpxchg(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
GPRRegister::Encoded_Reg_esi, Locked);
static constexpr uint8_t ByteCountLocked32 = 1 + ByteCountNotLocked32;
ASSERT_EQ(ByteCountLocked32, codeBytesSize());
diff --git a/unittest/AssemblerX8632/LowLevel.cpp b/unittest/AssemblerX8632/LowLevel.cpp
index 7593b4c..1f01e8e 100644
--- a/unittest/AssemblerX8632/LowLevel.cpp
+++ b/unittest/AssemblerX8632/LowLevel.cpp
@@ -141,7 +141,7 @@
", " #__VA_ARGS__ ")"; \
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, GPRRegister::Encoded_Reg_##Dst, \
- Address(Address::ABSOLUTE, Disp)); \
+ Address(Disp, AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -155,7 +155,8 @@
", " #ByteCountUntyped ", " #__VA_ARGS__ ")"; \
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, GPRRegister::Encoded_Reg_##Dst, \
- Address(GPRRegister::Encoded_Reg_##Base, Disp)); \
+ Address(GPRRegister::Encoded_Reg_##Base, Disp, \
+ AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -171,7 +172,7 @@
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, GPRRegister::Encoded_Reg_##Dst, \
Address(GPRRegister::Encoded_Reg_##Index, Traits::TIMES_##Scale, \
- Disp)); \
+ Disp, AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -188,7 +189,7 @@
__ Inst(IceType_##OpType, GPRRegister::Encoded_Reg_##Dst, \
Address(GPRRegister::Encoded_Reg_##Base, \
GPRRegister::Encoded_Reg_##Index, Traits::TIMES_##Scale, \
- Disp)); \
+ Disp, AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -204,7 +205,8 @@
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, Address(GPRRegister::Encoded_Reg_##Base, \
GPRRegister::Encoded_Reg_##Index, \
- Traits::TIMES_##Scale, Disp), \
+ Traits::TIMES_##Scale, Disp, \
+ AssemblerFixup::NoFixup), \
Immediate(Imm)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
@@ -221,7 +223,8 @@
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, Address(GPRRegister::Encoded_Reg_##Base, \
GPRRegister::Encoded_Reg_##Index, \
- Traits::TIMES_##Scale, Disp), \
+ Traits::TIMES_##Scale, Disp, \
+ AssemblerFixup::NoFixup), \
GPRRegister::Encoded_Reg_##Src); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
diff --git a/unittest/AssemblerX8632/TestUtil.h b/unittest/AssemblerX8632/TestUtil.h
index e3d94db..c11868f 100644
--- a/unittest/AssemblerX8632/TestUtil.h
+++ b/unittest/AssemblerX8632/TestUtil.h
@@ -751,7 +751,7 @@
}
Address dwordAddress(uint32_t Dword) {
- return Address(GPRRegister::Encoded_Reg_ebp, dwordDisp(Dword));
+ return Address(GPRRegister::Encoded_Reg_ebp, dwordDisp(Dword), nullptr);
}
private:
diff --git a/unittest/AssemblerX8632/X87.cpp b/unittest/AssemblerX8632/X87.cpp
index d3f38a1..ff36f7e 100644
--- a/unittest/AssemblerX8632/X87.cpp
+++ b/unittest/AssemblerX8632/X87.cpp
@@ -14,8 +14,10 @@
namespace {
TEST_F(AssemblerX8632LowLevelTest, Fld) {
- __ fld(IceType_f32, Address(GPRRegister::Encoded_Reg_ebp, 1));
- __ fld(IceType_f64, Address(GPRRegister::Encoded_Reg_ebp, 0x10000));
+ __ fld(IceType_f32, Address(GPRRegister::Encoded_Reg_ebp, 1,
+ AssemblerFixup::NoFixup));
+ __ fld(IceType_f64, Address(GPRRegister::Encoded_Reg_ebp, 0x10000,
+ AssemblerFixup::NoFixup));
constexpr size_t ByteCount = 9;
ASSERT_EQ(ByteCount, codeBytesSize());
@@ -31,8 +33,10 @@
}
TEST_F(AssemblerX8632LowLevelTest, FstpAddr) {
- __ fstp(IceType_f32, Address(GPRRegister::Encoded_Reg_ebp, 1));
- __ fstp(IceType_f64, Address(GPRRegister::Encoded_Reg_ebp, 0x10000));
+ __ fstp(IceType_f32, Address(GPRRegister::Encoded_Reg_ebp, 1,
+ AssemblerFixup::NoFixup));
+ __ fstp(IceType_f64, Address(GPRRegister::Encoded_Reg_ebp, 0x10000,
+ AssemblerFixup::NoFixup));
constexpr size_t ByteCount = 9;
ASSERT_EQ(ByteCount, codeBytesSize());
@@ -57,7 +61,8 @@
}
TEST_F(AssemblerX8632LowLevelTest, FnstcwAddr) {
- __ fnstcw(Address(GPRRegister::Encoded_Reg_ebp, 0x12345));
+ __ fnstcw(Address(GPRRegister::Encoded_Reg_ebp, 0x12345,
+ AssemblerFixup::NoFixup));
constexpr size_t ByteCount = 6;
ASSERT_EQ(ByteCount, codeBytesSize());
@@ -69,7 +74,8 @@
}
TEST_F(AssemblerX8632LowLevelTest, FldcwAddr) {
- __ fldcw(Address(GPRRegister::Encoded_Reg_ebp, 0x12345));
+ __ fldcw(Address(GPRRegister::Encoded_Reg_ebp, 0x12345,
+ AssemblerFixup::NoFixup));
constexpr size_t ByteCount = 6;
ASSERT_EQ(ByteCount, codeBytesSize());
diff --git a/unittest/AssemblerX8664/ControlFlow.cpp b/unittest/AssemblerX8664/ControlFlow.cpp
index 704f6fd..d22cca5 100644
--- a/unittest/AssemblerX8664/ControlFlow.cpp
+++ b/unittest/AssemblerX8664/ControlFlow.cpp
@@ -169,7 +169,7 @@
} \
__ mov(IceType_i64, Encoded_GPR_##Dst##q(), dwordAddress(T0)); \
__ mov(IceType_i64, Encoded_GPR_##Src##q(), Encoded_GPR_rsp()); \
- __ call(Address(Encoded_GPR_##Src##q(), 0)); \
+ __ call(Address(Encoded_GPR_##Src##q(), 0, AssemblerFixup::NoFixup)); \
__ popl(Encoded_GPR_##Src##q()); \
\
AssembledTest test = assemble(); \
diff --git a/unittest/AssemblerX8664/GPRArith.cpp b/unittest/AssemblerX8664/GPRArith.cpp
index f5404f8..ee5c01c 100644
--- a/unittest/AssemblerX8664/GPRArith.cpp
+++ b/unittest/AssemblerX8664/GPRArith.cpp
@@ -117,7 +117,7 @@
__ mov(IceType_i32, Encoded_GPR_##Base(), Immediate(BaseValue)); \
} \
__ lea(IceType_i32, Encoded_GPR_##Dst(), \
- Address(Encoded_GPR_##Base(), Disp)); \
+ Address(Encoded_GPR_##Base(), Disp, AssemblerFixup::NoFixup)); \
AssembledTest test = assemble(); \
test.run(); \
ASSERT_EQ(test.Base##d() + (Disp), test.Dst##d()) \
@@ -134,13 +134,17 @@
__ mov(IceType_i32, Encoded_GPR_##Index(), Immediate(IndexValue)); \
} \
__ lea(IceType_i32, Encoded_GPR_##Dst0(), \
- Address(Encoded_GPR_##Index(), Traits::TIMES_1, Disp)); \
+ Address(Encoded_GPR_##Index(), Traits::TIMES_1, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, Encoded_GPR_##Dst1(), \
- Address(Encoded_GPR_##Index(), Traits::TIMES_2, Disp)); \
+ Address(Encoded_GPR_##Index(), Traits::TIMES_2, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, Encoded_GPR_##Dst2(), \
- Address(Encoded_GPR_##Index(), Traits::TIMES_4, Disp)); \
+ Address(Encoded_GPR_##Index(), Traits::TIMES_4, Disp, \
+ AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, Encoded_GPR_##Dst3(), \
- Address(Encoded_GPR_##Index(), Traits::TIMES_8, Disp)); \
+ Address(Encoded_GPR_##Index(), Traits::TIMES_8, Disp, \
+ AssemblerFixup::NoFixup)); \
AssembledTest test = assemble(); \
test.run(); \
ASSERT_EQ((test.Index##d() << Traits::TIMES_1) + (Disp), test.Dst0##d()) \
@@ -171,16 +175,16 @@
\
__ lea(IceType_i32, Encoded_GPR_##Dst0(), \
Address(Encoded_GPR_##Base(), Encoded_GPR_##Index(), \
- Traits::TIMES_1, Disp)); \
+ Traits::TIMES_1, Disp, AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, Encoded_GPR_##Dst1(), \
Address(Encoded_GPR_##Base(), Encoded_GPR_##Index(), \
- Traits::TIMES_2, Disp)); \
+ Traits::TIMES_2, Disp, AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, Encoded_GPR_##Dst2(), \
Address(Encoded_GPR_##Base(), Encoded_GPR_##Index(), \
- Traits::TIMES_4, Disp)); \
+ Traits::TIMES_4, Disp, AssemblerFixup::NoFixup)); \
__ lea(IceType_i32, Encoded_GPR_##Dst3(), \
Address(Encoded_GPR_##Base(), Encoded_GPR_##Index(), \
- Traits::TIMES_8, Disp)); \
+ Traits::TIMES_8, Disp, AssemblerFixup::NoFixup)); \
AssembledTest test = assemble(); \
test.run(); \
uint32_t ExpectedIndexValue = test.Index(); \
@@ -323,7 +327,7 @@
do { \
static constexpr char TestString[] = "(" #Dst ", " #Value ")"; \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst, \
- Address(Address::ABSOLUTE, Value)); \
+ Address(Value, AssemblerFixup::NoFixup)); \
static constexpr uint32_t ByteCount = 6; \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
static constexpr uint8_t Opcode = 0x8D; \
diff --git a/unittest/AssemblerX8664/Locked.cpp b/unittest/AssemblerX8664/Locked.cpp
index 086bd04..3d48ccb 100644
--- a/unittest/AssemblerX8664/Locked.cpp
+++ b/unittest/AssemblerX8664/Locked.cpp
@@ -204,15 +204,16 @@
// Ensures that xadd emits a lock prefix accordingly.
{
- __ xadd(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- NotLocked);
+ __ xadd(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), NotLocked);
static constexpr uint8_t ByteCountNotLocked8 = 8;
ASSERT_EQ(ByteCountNotLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked8>(codeBytes(), 0x44, 0x0F, 0xC0,
0x35, 0x00, 0xFF, 0x01, 0x00));
reset();
- __ xadd(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(), Locked);
+ __ xadd(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked8 = 1 + ByteCountNotLocked8;
ASSERT_EQ(ByteCountLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked8>(
@@ -221,15 +222,16 @@
}
{
- __ xadd(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- NotLocked);
+ __ xadd(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), NotLocked);
static constexpr uint8_t ByteCountNotLocked16 = 9;
ASSERT_EQ(ByteCountNotLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked16>(
codeBytes(), 0x66, 0x44, 0x0F, 0xC1, 0x35, 0x00, 0xFF, 0x01, 0x00));
reset();
- __ xadd(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(), Locked);
+ __ xadd(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked16 = 1 + ByteCountNotLocked16;
ASSERT_EQ(ByteCountLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked16>(codeBytes(), 0x66, 0xF0, 0x44,
@@ -239,15 +241,16 @@
}
{
- __ xadd(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- NotLocked);
+ __ xadd(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), NotLocked);
static constexpr uint8_t ByteCountNotLocked32 = 8;
ASSERT_EQ(ByteCountNotLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked32>(
codeBytes(), 0x44, 0x0F, 0xC1, 0x35, 0x00, 0xFF, 0x01, 0x00));
reset();
- __ xadd(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(), Locked);
+ __ xadd(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked32 = 1 + ByteCountNotLocked32;
ASSERT_EQ(ByteCountLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked32>(
@@ -329,14 +332,14 @@
static constexpr bool Locked = true;
// Ensures that cmpxchg8b emits a lock prefix accordingly.
- __ cmpxchg8b(Address::Absolute(0x1FF00), NotLocked);
+ __ cmpxchg8b(Address(0x1FF00, AssemblerFixup::NoFixup), NotLocked);
static constexpr uint8_t ByteCountNotLocked = 7;
ASSERT_EQ(ByteCountNotLocked, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked>(codeBytes(), 0x0F, 0xC7, 0x0D,
0x00, 0xFF, 0x01, 0x00));
reset();
- __ cmpxchg8b(Address::Absolute(0x1FF00), Locked);
+ __ cmpxchg8b(Address(0x1FF00, AssemblerFixup::NoFixup), Locked);
static constexpr uint8_t ByteCountLocked = 1 + ByteCountNotLocked;
ASSERT_EQ(ByteCountLocked, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked>(codeBytes(), 0xF0, 0x0F, 0xC7, 0x0D,
@@ -436,16 +439,16 @@
// Ensures that cmpxchg emits a lock prefix accordingly.
{
- __ cmpxchg(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- NotLocked);
+ __ cmpxchg(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), NotLocked);
static constexpr uint8_t ByteCountNotLocked8 = 8;
ASSERT_EQ(ByteCountNotLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked8>(codeBytes(), 0x44, 0x0F, 0xB0,
0x35, 0x00, 0xFF, 0x01, 0x00));
reset();
- __ cmpxchg(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- Locked);
+ __ cmpxchg(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked8 = 1 + ByteCountNotLocked8;
ASSERT_EQ(ByteCountLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked8>(
@@ -454,16 +457,16 @@
}
{
- __ cmpxchg(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- NotLocked);
+ __ cmpxchg(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), NotLocked);
static constexpr uint8_t ByteCountNotLocked16 = 9;
ASSERT_EQ(ByteCountNotLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked16>(
codeBytes(), 0x66, 0x44, 0x0F, 0xB1, 0x35, 0x00, 0xFF, 0x01, 0x00));
reset();
- __ cmpxchg(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- Locked);
+ __ cmpxchg(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked16 = 1 + ByteCountNotLocked16;
ASSERT_EQ(ByteCountLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked16>(codeBytes(), 0x66, 0xF0, 0x44,
@@ -473,16 +476,16 @@
}
{
- __ cmpxchg(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- NotLocked);
+ __ cmpxchg(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), NotLocked);
static constexpr uint8_t ByteCountNotLocked32 = 8;
ASSERT_EQ(ByteCountNotLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked32>(
codeBytes(), 0x44, 0x0F, 0xB1, 0x35, 0x00, 0xFF, 0x01, 0x00));
reset();
- __ cmpxchg(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
- Locked);
+ __ cmpxchg(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
+ Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked32 = 1 + ByteCountNotLocked32;
ASSERT_EQ(ByteCountLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked32>(
diff --git a/unittest/AssemblerX8664/LowLevel.cpp b/unittest/AssemblerX8664/LowLevel.cpp
index 3c87d8b..20f2252 100644
--- a/unittest/AssemblerX8664/LowLevel.cpp
+++ b/unittest/AssemblerX8664/LowLevel.cpp
@@ -187,7 +187,7 @@
", " #__VA_ARGS__ ")"; \
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, Encoded_GPR_##Dst(), \
- Address(Address::ABSOLUTE, Disp)); \
+ Address(Disp, AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -201,7 +201,7 @@
", " #ByteCountUntyped ", " #__VA_ARGS__ ")"; \
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, Encoded_GPR_##Dst(), \
- Address(Encoded_GPR_##Base(), Disp)); \
+ Address(Encoded_GPR_##Base(), Disp, AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -215,8 +215,10 @@
"(" #Inst ", " #Dst ", " #Index ", " #Scale ", " #Disp ", " #OpType \
", " #ByteCountUntyped ", " #__VA_ARGS__ ")"; \
static constexpr uint8_t ByteCount = ByteCountUntyped; \
- __ Inst(IceType_##OpType, Encoded_GPR_##Dst(), \
- Address(Encoded_GPR_##Index(), Traits::TIMES_##Scale, Disp)); \
+ __ Inst( \
+ IceType_##OpType, Encoded_GPR_##Dst(), \
+ Address(Encoded_GPR_##Index(), Traits::TIMES_##Scale, Disp, \
+ AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -232,7 +234,7 @@
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, Encoded_GPR_##Dst(), \
Address(Encoded_GPR_##Base(), Encoded_GPR_##Index(), \
- Traits::TIMES_##Scale, Disp)); \
+ Traits::TIMES_##Scale, Disp, AssemblerFixup::NoFixup)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \
@@ -248,7 +250,7 @@
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, \
Address(Encoded_GPR_##Base(), Encoded_GPR_##Index(), \
- Traits::TIMES_##Scale, Disp), \
+ Traits::TIMES_##Scale, Disp, AssemblerFixup::NoFixup), \
Immediate(Imm)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
@@ -265,7 +267,7 @@
static constexpr uint8_t ByteCount = ByteCountUntyped; \
__ Inst(IceType_##OpType, \
Address(Encoded_GPR_##Base(), Encoded_GPR_##Index(), \
- Traits::TIMES_##Scale, Disp), \
+ Traits::TIMES_##Scale, Disp, AssemblerFixup::NoFixup), \
Encoded_GPR_##Src()); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
diff --git a/unittest/AssemblerX8664/TestUtil.h b/unittest/AssemblerX8664/TestUtil.h
index 4615459..5a1d4a5 100644
--- a/unittest/AssemblerX8664/TestUtil.h
+++ b/unittest/AssemblerX8664/TestUtil.h
@@ -930,7 +930,7 @@
}
Address dwordAddress(uint32_t Dword) {
- return Address(Encoded_GPR_r9(), dwordDisp(Dword));
+ return Address(Encoded_GPR_r9(), dwordDisp(Dword), nullptr);
}
private: