Subzero. X8664. Enables RIP-based addressing mode.
BUG= https://bugs.chromium.org/p/nativeclient/issues/detail?id=4077
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/1616103002 .
diff --git a/src/IceAssemblerX86Base.h b/src/IceAssemblerX86Base.h
index 3af54e3..e155a62 100644
--- a/src/IceAssemblerX86Base.h
+++ b/src/IceAssemblerX86Base.h
@@ -729,7 +729,7 @@
inline void emitXmmRegisterOperand(RegType reg, RmType rm);
inline void emitOperandSizeOverride();
- void emitOperand(int rm, const Operand &operand);
+ void emitOperand(int rm, const Operand &operand, RelocOffsetT Addend = 0);
void emitImmediate(Type ty, const Immediate &imm);
void emitComplexI8(int rm, const Operand &operand,
const Immediate &immediate);
diff --git a/src/IceAssemblerX86BaseImpl.h b/src/IceAssemblerX86BaseImpl.h
index f007194..0035eee 100644
--- a/src/IceAssemblerX86BaseImpl.h
+++ b/src/IceAssemblerX86BaseImpl.h
@@ -299,11 +299,13 @@
emitRex(Ty, dst, RexRegIrrelevant);
if (isByteSizedType(Ty)) {
emitUint8(0xC6);
- emitOperand(0, dst);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(0, dst, OffsetFromNextInstruction);
emitUint8(imm.value() & 0xFF);
} else {
emitUint8(0xC7);
- emitOperand(0, dst);
+ const uint8_t OffsetFromNextInstruction = Ty == IceType_i16 ? 2 : 4;
+ emitOperand(0, dst, OffsetFromNextInstruction);
emitImmediate(Ty, imm);
}
}
@@ -1429,7 +1431,8 @@
emitRex(RexTypeIrrelevant, src, dst);
emitUint8(0x0F);
emitUint8(0xC2);
- emitOperand(gprEncoding(dst), src);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(gprEncoding(dst), src, OffsetFromNextInstruction);
emitUint8(CmpCondition);
}
@@ -1551,7 +1554,8 @@
emitRex(RexTypeIrrelevant, src, dst);
emitUint8(0x0F);
emitUint8(0x70);
- emitOperand(gprEncoding(dst), src);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(gprEncoding(dst), src, OffsetFromNextInstruction);
assert(imm.is_uint8());
emitUint8(imm.value());
}
@@ -1578,7 +1582,8 @@
emitRex(RexTypeIrrelevant, src, dst);
emitUint8(0x0F);
emitUint8(0xC6);
- emitOperand(gprEncoding(dst), src);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(gprEncoding(dst), src, OffsetFromNextInstruction);
assert(imm.is_uint8());
emitUint8(imm.value());
}
@@ -1830,7 +1835,8 @@
emitUint8(0x0F);
emitUint8(0x3A);
emitUint8(0x21);
- emitOperand(gprEncoding(dst), src);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(gprEncoding(dst), src, OffsetFromNextInstruction);
emitUint8(imm.value());
}
@@ -1869,7 +1875,8 @@
emitUint8(0x3A);
emitUint8(isByteSizedType(Ty) ? 0x20 : 0x22);
}
- emitOperand(gprEncoding(dst), src);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(gprEncoding(dst), src, OffsetFromNextInstruction);
emitUint8(imm.value());
}
@@ -2244,7 +2251,8 @@
emitAddrSizeOverridePrefix();
emitRex(Ty, addr, RexRegIrrelevant);
emitUint8(0xF6);
- emitOperand(0, addr);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(0, addr, OffsetFromNextInstruction);
emitUint8(immediate.value() & 0xFF);
} else {
if (Ty == IceType_i16)
@@ -2252,7 +2260,8 @@
emitAddrSizeOverridePrefix();
emitRex(Ty, addr, RexRegIrrelevant);
emitUint8(0xF7);
- emitOperand(0, addr);
+ const uint8_t OffsetFromNextInstruction = Ty == IceType_i16 ? 2 : 4;
+ emitOperand(0, addr, OffsetFromNextInstruction);
emitImmediate(Ty, immediate);
}
}
@@ -2653,11 +2662,13 @@
emitRex(Ty, address, dst);
if (imm.is_int8()) {
emitUint8(0x6B);
- emitOperand(gprEncoding(dst), address);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(gprEncoding(dst), address, OffsetFromNextInstruction);
emitUint8(imm.value() & 0xFF);
} else {
emitUint8(0x69);
- emitOperand(gprEncoding(dst), address);
+ const uint8_t OffsetFromNextInstruction = Ty == IceType_i16 ? 2 : 4;
+ emitOperand(gprEncoding(dst), address, OffsetFromNextInstruction);
emitImmediate(Ty, imm);
}
}
@@ -3346,7 +3357,8 @@
}
template <typename TraitsType>
-void AssemblerX86Base<TraitsType>::emitOperand(int rm, const Operand &operand) {
+void AssemblerX86Base<TraitsType>::emitOperand(int rm, const Operand &operand,
+ RelocOffsetT Addend) {
assert(rm >= 0 && rm < 8);
const intptr_t length = operand.length_;
assert(length > 0);
@@ -3362,9 +3374,18 @@
displacement_start = 2;
}
// Emit the displacement and the fixup that affects it, if any.
- if (operand.fixup()) {
- emitFixup(operand.fixup());
+ AssemblerFixup *Fixup = operand.fixup();
+ if (Fixup != nullptr) {
+ emitFixup(Fixup);
assert(length - displacement_start == 4);
+ if (fixupIsPCRel(Fixup->kind())) {
+ Fixup->set_addend(-Addend);
+ int32_t Offset;
+ memmove(&Offset, &operand.encoding_[displacement_start], sizeof(Offset));
+ Offset -= Addend;
+ emitInt32(Offset);
+ return;
+ }
}
for (intptr_t i = displacement_start; i < length; i++) {
emitUint8(operand.encoding_[i]);
@@ -3374,15 +3395,17 @@
template <typename TraitsType>
void AssemblerX86Base<TraitsType>::emitImmediate(Type Ty,
const Immediate &imm) {
+ auto *const Fixup = imm.fixup();
if (Ty == IceType_i16) {
- assert(!imm.fixup());
+ assert(Fixup == nullptr);
emitInt16(imm.value());
- } else {
- if (imm.fixup()) {
- emitFixup(imm.fixup());
- }
- emitInt32(imm.value());
+ return;
}
+
+ if (Fixup != nullptr) {
+ emitFixup(Fixup);
+ }
+ emitInt32(imm.value());
}
template <typename TraitsType>
@@ -3397,7 +3420,8 @@
} else {
// Use sign-extended 8-bit immediate.
emitUint8(0x80);
- emitOperand(rm, operand);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(rm, operand, OffsetFromNextInstruction);
emitUint8(immediate.value() & 0xFF);
}
}
@@ -3410,7 +3434,8 @@
if (immediate.is_int8()) {
// Use sign-extended 8-bit immediate.
emitUint8(0x83);
- emitOperand(rm, operand);
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(rm, operand, OffsetFromNextInstruction);
emitUint8(immediate.value() & 0xFF);
} else if (operand.IsRegister(Traits::Encoded_Reg_Accumulator)) {
// Use short form if the destination is eax.
@@ -3418,7 +3443,8 @@
emitImmediate(Ty, immediate);
} else {
emitUint8(0x81);
- emitOperand(rm, operand);
+ const uint8_t OffsetFromNextInstruction = Ty == IceType_i16 ? 2 : 4;
+ emitOperand(rm, operand, OffsetFromNextInstruction);
emitImmediate(Ty, immediate);
}
}
@@ -3468,7 +3494,8 @@
emitOperand(rm, Operand(reg));
} else {
emitUint8(isByteSizedArithType(Ty) ? 0xC0 : 0xC1);
- emitOperand(rm, Operand(reg));
+ static constexpr RelocOffsetT OffsetFromNextInstruction = 1;
+ emitOperand(rm, Operand(reg), OffsetFromNextInstruction);
emitUint8(imm.value() & 0xFF);
}
}
diff --git a/src/IceFixups.cpp b/src/IceFixups.cpp
index ef594ac..b323bc6 100644
--- a/src/IceFixups.cpp
+++ b/src/IceFixups.cpp
@@ -23,10 +23,10 @@
RelocOffsetT AssemblerFixup::offset() const {
if (isNullSymbol())
- return 0;
+ return addend_;
if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(value_))
- return CR->getOffset();
- return 0;
+ return CR->getOffset() + addend_;
+ return addend_;
}
IceString AssemblerFixup::symbol(const GlobalContext *Ctx,
diff --git a/src/IceFixups.h b/src/IceFixups.h
index aeb0fb0..265b850 100644
--- a/src/IceFixups.h
+++ b/src/IceFixups.h
@@ -25,7 +25,7 @@
/// Assembler fixups are positions in generated code/data that hold relocation
/// information that needs to be processed before finalizing the code/data.
-struct AssemblerFixup {
+class AssemblerFixup {
AssemblerFixup &operator=(const AssemblerFixup &) = delete;
public:
@@ -53,6 +53,8 @@
void set_value(const Constant *Value) { value_ = Value; }
+ void set_addend(RelocOffsetT Addend) { addend_ = Addend; }
+
/// Emits fixup, then returns the number of bytes to skip.
virtual size_t emit(GlobalContext *Ctx, const Assembler &Asm) const;
@@ -61,6 +63,9 @@
intptr_t position_ = 0;
FixupKind kind_ = 0;
const Constant *value_ = nullptr;
+ // An offset addend to the fixup offset (as returned by offset()), in case the
+ // assembler needs to adjust it.
+ RelocOffsetT addend_ = 0;
};
/// Extends a fixup to be textual. That is, it emits text instead of a sequence
diff --git a/src/IceInstX8664.cpp b/src/IceInstX8664.cpp
index a7236a3..7bac57e 100644
--- a/src/IceInstX8664.cpp
+++ b/src/IceInstX8664.cpp
@@ -136,7 +136,17 @@
// rematerializable base/index and Disp.
assert(Disp == 0);
const bool UseNonsfi = Func->getContext()->getFlags().getUseNonsfi();
- CR->emitWithoutPrefix(Func->getTarget(), UseNonsfi ? "@GOTOFF" : "");
+ CR->emitWithoutPrefix(Target, UseNonsfi ? "@GOTOFF" : "");
+ assert(!UseNonsfi);
+ if (Base == nullptr && Index == nullptr) {
+ if (CR->getName() != "") { // rip-relative addressing.
+ if (NeedSandboxing) {
+ Str << "(%rip)";
+ } else {
+ Str << "(%eip)";
+ }
+ }
+ }
} else {
llvm_unreachable("Invalid offset type for x86 mem operand");
}
@@ -256,10 +266,16 @@
if (getOffset() != nullptr) {
if (const auto *CI = llvm::dyn_cast<ConstantInteger32>(getOffset())) {
Disp += static_cast<int32_t>(CI->getValue());
- } else if (const auto CR =
+ } else if (const auto *CR =
llvm::dyn_cast<ConstantRelocatable>(getOffset())) {
- Disp = CR->getOffset();
- Fixup = Asm->createFixup(FK_Abs, CR);
+ RelocOffsetT DispAdjustment = 0;
+ if (CR->getName() != "") {
+ const auto FixupKind =
+ (getBase() != nullptr || getIndex() != nullptr) ? FK_Abs : FK_PcRel;
+ DispAdjustment = FixupKind == FK_PcRel ? 4 : 0;
+ Fixup = Asm->createFixup(FixupKind, CR);
+ }
+ Disp = CR->getOffset() - DispAdjustment;
} else {
llvm_unreachable("Unexpected offset type");
}
@@ -290,7 +306,15 @@
Fixup);
}
- return X8664::Traits::Address(Disp, Fixup);
+ if (Fixup == nullptr) {
+ // Absolute addresses are not allowed in Nexes -- they must be rebased
+ // w.r.t. %r15.
+ // Exception: LEAs are fine because they do not touch memory.
+ assert(!Target->needSandboxing() || IsLeaAddr);
+ return X8664::Traits::Address::Absolute(Disp);
+ }
+
+ return X8664::Traits::Address::RipRelative(Disp, Fixup);
}
TargetX8664Traits::Address
diff --git a/src/IceTargetLoweringX8664.cpp b/src/IceTargetLoweringX8664.cpp
index 5884321..aed9b30 100644
--- a/src/IceTargetLoweringX8664.cpp
+++ b/src/IceTargetLoweringX8664.cpp
@@ -294,10 +294,31 @@
(void)Node;
}
+namespace {
+bool isAssignedToRspOrRbp(const Variable *Var) {
+ if (Var == nullptr) {
+ return false;
+ }
+
+ if (Var->isRematerializable()) {
+ return true;
+ }
+
+ if (!Var->hasReg()) {
+ return false;
+ }
+
+ const int32_t RegNum = Var->getRegNum();
+ if ((RegNum == Traits::RegisterSet::Reg_rsp) ||
+ (RegNum == Traits::RegisterSet::Reg_rbp)) {
+ return true;
+ }
+
+ return false;
+}
+} // end of anonymous namespace
+
Traits::X86OperandMem *TargetX8664::_sandbox_mem_reference(X86OperandMem *Mem) {
- // In x86_64-nacl, all memory references are relative to %r15 (i.e., %rzp.)
- // NaCl sandboxing also requires that any registers that are not %rsp and
- // %rbp to be 'truncated' to 32-bit before memory access.
if (SandboxingType == ST_None) {
return Mem;
}
@@ -307,25 +328,49 @@
"_sandbox_mem_reference not implemented for nonsfi");
}
+ // In x86_64-nacl, all memory references are relative to a base register
+ // (%r15, %rsp, %rbp, or %rip).
+
Variable *Base = Mem->getBase();
Variable *Index = Mem->getIndex();
uint16_t Shift = 0;
- Variable *ZeroReg =
- getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
+ Variable *ZeroReg = RebasePtr;
Constant *Offset = Mem->getOffset();
Variable *T = nullptr;
+ bool AbsoluteAddress = false;
+ if (Base == nullptr && Index == nullptr) {
+ if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Offset)) {
+ if (CR->getName() != "") {
+ // Mem is RIP-relative. There's no need to rebase it.
+ return Mem;
+ }
+ }
+ // Offset is an absolute address, so we need to emit
+ // Offset(%r15)
+ AbsoluteAddress = true;
+ }
+
if (Mem->getIsRebased()) {
- // If Mem.IsRebased, then we don't need to update Mem to contain a reference
- // to a valid base register (%r15, %rsp, or %rbp), but we still need to
- // truncate Mem.Index (if any) to 32-bit.
- assert(ZeroReg == Base || Base->isRematerializable());
- T = makeReg(IceType_i32);
- _mov(T, Index);
- Shift = Mem->getShift();
+ // If Mem.IsRebased, then we don't need to update Mem, as it's already been
+ // updated to contain a reference to one of %rsp, %rbp, or %r15.
+ // We don't return early because we still need to zero extend Index.
+ assert(ZeroReg == Base || AbsoluteAddress || isAssignedToRspOrRbp(Base));
+ if (!AbsoluteAddress) {
+ // If Mem is an absolute address, no need to update ZeroReg (which is
+ // already set to %r15.)
+ ZeroReg = Base;
+ }
+ if (Index != nullptr) {
+ T = makeReg(IceType_i32);
+ _mov(T, Index);
+ Shift = Mem->getShift();
+ }
} else {
if (Base != nullptr) {
- if (Base->isRematerializable()) {
+ // If Base is a valid base pointer we don't need to use the RebasePtr. By
+ // doing this we might save us the need to zero extend the memory operand.
+ if (isAssignedToRspOrRbp(Base)) {
ZeroReg = Base;
} else {
T = Base;
@@ -334,11 +379,23 @@
if (Index != nullptr) {
assert(!Index->isRematerializable());
+ // If Index is not nullptr, it is mandatory that T is a nullptr.
+ // Otherwise, the lowering generated a memory operand with two registers.
+ // Note that Base might still be non-nullptr, but it must be a valid
+ // base register.
if (T != nullptr) {
llvm::report_fatal_error("memory reference contains base and index.");
}
- T = Index;
- Shift = Mem->getShift();
+ // If the Index is not shifted, and it is a Valid Base, and the ZeroReg is
+ // still RebasePtr, then we do ZeroReg = Index, and hopefully prevent the
+ // need to zero-extend the memory operand (which may still happen -- see
+ // NeedLea below.)
+ if (Shift == 0 && isAssignedToRspOrRbp(Index) && ZeroReg == RebasePtr) {
+ ZeroReg = Index;
+ } else {
+ T = Index;
+ Shift = Mem->getShift();
+ }
}
}
@@ -348,11 +405,13 @@
// needed to ensure the sandboxed memory operand will only use the lower
// 32-bits of T+Offset.
bool NeedsLea = false;
- if (const auto *Offset = Mem->getOffset()) {
- if (llvm::isa<ConstantRelocatable>(Offset)) {
- NeedsLea = true;
+ if (Offset != nullptr) {
+ if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Offset)) {
+ NeedsLea = CR->getName() != "" || CR->getOffset() < 0;
} else if (const auto *Imm = llvm::cast<ConstantInteger32>(Offset)) {
NeedsLea = Imm->getValue() < 0;
+ } else {
+ llvm::report_fatal_error("Unexpected Offset type.");
}
}
@@ -362,21 +421,20 @@
if (T->hasReg()) {
RegNum = Traits::getGprForType(IceType_i64, T->getRegNum());
RegNum32 = Traits::getGprForType(IceType_i32, RegNum);
- switch (RegNum) {
- case Traits::RegisterSet::Reg_rsp:
- case Traits::RegisterSet::Reg_rbp:
- // Memory operands referencing rsp/rbp do not need to be sandboxed.
- return Mem;
- }
+ // At this point, if T was assigned to rsp/rbp, then we would have already
+ // made this the ZeroReg.
+ assert(RegNum != Traits::RegisterSet::Reg_rsp);
+ assert(RegNum != Traits::RegisterSet::Reg_rbp);
}
switch (T->getType()) {
default:
+ llvm::report_fatal_error("Mem pointer should be a 32-bit GPR.");
case IceType_i64:
// Even though "default:" would also catch T.Type == IceType_i64, an
// explicit 'case IceType_i64' shows that memory operands are always
// supposed to be 32-bits.
- llvm::report_fatal_error("Mem pointer should be 32-bit.");
+ llvm::report_fatal_error("Mem pointer should not be a 64-bit GPR.");
case IceType_i32: {
Variable *T64 = makeReg(IceType_i64, RegNum);
auto *Movzx = _movzx(T64, T);
diff --git a/src/IceTargetLoweringX8664Traits.h b/src/IceTargetLoweringX8664Traits.h
index bfa0231..44316b4 100644
--- a/src/IceTargetLoweringX8664Traits.h
+++ b/src/IceTargetLoweringX8664Traits.h
@@ -89,18 +89,15 @@
RexB = RexBase | (1 << 0),
};
- Operand(const Operand &other)
- : fixup_(other.fixup_), rex_(other.rex_), length_(other.length_) {
- memmove(&encoding_[0], &other.encoding_[0], other.length_);
- }
+ protected:
+ // Needed by subclass Address.
+ Operand() = default;
- Operand &operator=(const Operand &other) {
- length_ = other.length_;
- fixup_ = other.fixup_;
- rex_ = other.rex_;
- memmove(&encoding_[0], &other.encoding_[0], other.length_);
- return *this;
- }
+ public:
+ Operand(const Operand &) = default;
+ Operand(Operand &&) = default;
+ Operand &operator=(const Operand &) = default;
+ Operand &operator=(Operand &&) = default;
uint8_t mod() const { return (encoding_at(0) >> 6) & 3; }
@@ -131,19 +128,9 @@
return static_cast<int8_t>(encoding_[length_ - 1]);
}
- int32_t disp32() const {
- assert(length_ >= 5);
- // TODO(stichnot): This method is not currently used. Delete it along
- // with other unused methods, or use a safe version of bitCopy().
- llvm::report_fatal_error("Unexpected call to disp32()");
- // return Utils::bitCopy<int32_t>(encoding_[length_ - 4]);
- }
-
AssemblerFixup *fixup() const { return fixup_; }
protected:
- Operand() : fixup_(nullptr), length_(0) {} // Needed by subclass Address.
-
void SetModRM(int mod, GPRRegister rm) {
assert((mod & ~3) == 0);
encoding_[0] = (mod << 6) | (rm & 0x07);
@@ -175,10 +162,10 @@
void SetFixup(AssemblerFixup *fixup) { fixup_ = fixup; }
private:
- AssemblerFixup *fixup_;
+ AssemblerFixup *fixup_ = nullptr;
uint8_t rex_ = 0;
uint8_t encoding_[6];
- uint8_t length_;
+ uint8_t length_ = 0;
explicit Operand(GPRRegister reg) : fixup_(nullptr) { SetModRM(3, reg); }
@@ -201,31 +188,29 @@
};
class Address : public Operand {
- Address() = delete;
+ Address() = default;
public:
- Address(const Address &other) : Operand(other) {}
-
- Address &operator=(const Address &other) {
- Operand::operator=(other);
- return *this;
- }
+ Address(const Address &) = default;
+ Address(Address &&) = default;
+ Address &operator=(const Address &) = default;
+ Address &operator=(Address &&) = default;
Address(GPRRegister Base, int32_t Disp, AssemblerFixup *Fixup) {
if (Fixup == nullptr && Disp == 0 &&
- (Base & 7) != RegX8664::Encoded_Reg_ebp) {
+ (Base & 7) != RegX8664::Encoded_Reg_rbp) {
SetModRM(0, Base);
- if ((Base & 7) == RegX8664::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, Base);
+ if ((Base & 7) == RegX8664::Encoded_Reg_rsp)
+ SetSIB(TIMES_1, RegX8664::Encoded_Reg_rsp, Base);
} else if (Fixup == nullptr && Utils::IsInt(8, Disp)) {
SetModRM(1, Base);
- if ((Base & 7) == RegX8664::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, Base);
+ if ((Base & 7) == RegX8664::Encoded_Reg_rsp)
+ SetSIB(TIMES_1, RegX8664::Encoded_Reg_rsp, Base);
SetDisp8(Disp);
} else {
SetModRM(2, Base);
- if ((Base & 7) == RegX8664::Encoded_Reg_esp)
- SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, Base);
+ if ((Base & 7) == RegX8664::Encoded_Reg_rsp)
+ SetSIB(TIMES_1, RegX8664::Encoded_Reg_rsp, Base);
SetDisp32(Disp);
if (Fixup)
SetFixup(Fixup);
@@ -234,9 +219,9 @@
Address(GPRRegister Index, ScaleFactor Scale, int32_t Disp,
AssemblerFixup *Fixup) {
- assert(Index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
- SetModRM(0, RegX8664::Encoded_Reg_esp);
- SetSIB(Scale, Index, RegX8664::Encoded_Reg_ebp);
+ assert(Index != RegX8664::Encoded_Reg_rsp); // Illegal addressing mode.
+ SetModRM(0, RegX8664::Encoded_Reg_rsp);
+ SetSIB(Scale, Index, RegX8664::Encoded_Reg_rbp);
SetDisp32(Disp);
if (Fixup)
SetFixup(Fixup);
@@ -244,17 +229,17 @@
Address(GPRRegister Base, GPRRegister Index, ScaleFactor Scale,
int32_t Disp, AssemblerFixup *Fixup) {
- assert(Index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
+ assert(Index != RegX8664::Encoded_Reg_rsp); // Illegal addressing mode.
if (Fixup == nullptr && Disp == 0 &&
- (Base & 7) != RegX8664::Encoded_Reg_ebp) {
- SetModRM(0, RegX8664::Encoded_Reg_esp);
+ (Base & 7) != RegX8664::Encoded_Reg_rbp) {
+ SetModRM(0, RegX8664::Encoded_Reg_rsp);
SetSIB(Scale, Index, Base);
} else if (Fixup == nullptr && Utils::IsInt(8, Disp)) {
- SetModRM(1, RegX8664::Encoded_Reg_esp);
+ SetModRM(1, RegX8664::Encoded_Reg_rsp);
SetSIB(Scale, Index, Base);
SetDisp8(Disp);
} else {
- SetModRM(2, RegX8664::Encoded_Reg_esp);
+ SetModRM(2, RegX8664::Encoded_Reg_rsp);
SetSIB(Scale, Index, Base);
SetDisp32(Disp);
if (Fixup)
@@ -263,23 +248,37 @@
}
/// Generate a RIP-relative address expression on x86-64.
- Address(RelocOffsetT Offset, AssemblerFixup *Fixup) {
- SetModRM(0x0, RegX8664::Encoded_Reg_esp);
+ static Address RipRelative(RelocOffsetT Offset, AssemblerFixup *Fixup) {
+ assert(Fixup != nullptr);
+ assert(Fixup->kind() == FK_PcRel);
+ Address NewAddress;
+ NewAddress.SetModRM(0x0, RegX8664::Encoded_Reg_rbp);
- static constexpr ScaleFactor Scale = TIMES_1;
- SetSIB(Scale, RegX8664::Encoded_Reg_esp, RegX8664::Encoded_Reg_ebp);
// Use the Offset in the displacement for now. If we decide to process
// fixups later, we'll need to patch up the emitted displacement.
- SetDisp32(Offset);
+ NewAddress.SetDisp32(Offset);
if (Fixup)
- SetFixup(Fixup);
+ NewAddress.SetFixup(Fixup);
+
+ return NewAddress;
+ }
+
+ /// Generate an absolute address.
+ static Address Absolute(RelocOffsetT Addr) {
+ Address NewAddress;
+ NewAddress.SetModRM(0x0, RegX8664::Encoded_Reg_rsp);
+ static constexpr ScaleFactor NoScale = TIMES_1;
+ NewAddress.SetSIB(NoScale, RegX8664::Encoded_Reg_rsp,
+ RegX8664::Encoded_Reg_rbp);
+ NewAddress.SetDisp32(Addr);
+ return NewAddress;
}
static Address ofConstPool(Assembler *Asm, const Constant *Imm) {
// TODO(jpp): ???
AssemblerFixup *Fixup = Asm->createFixup(FK_Abs, Imm);
const RelocOffsetT Offset = 4;
- return Address(Offset, Fixup);
+ return Address::RipRelative(Offset, Fixup);
}
};
diff --git a/unittest/AssemblerX8664/GPRArith.cpp b/unittest/AssemblerX8664/GPRArith.cpp
index 2ec85cc..aad2e77 100644
--- a/unittest/AssemblerX8664/GPRArith.cpp
+++ b/unittest/AssemblerX8664/GPRArith.cpp
@@ -328,7 +328,7 @@
do { \
static constexpr char TestString[] = "(" #Dst ", " #Value ")"; \
__ lea(IceType_i32, GPRRegister::Encoded_Reg_##Dst, \
- Address(Value, AssemblerFixup::NoFixup)); \
+ Address::Absolute(Value)); \
static constexpr uint32_t ByteCount = 8; \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
static constexpr uint8_t Opcode = 0x8D; \
diff --git a/unittest/AssemblerX8664/Locked.cpp b/unittest/AssemblerX8664/Locked.cpp
index 9a0904d..f6dc494 100644
--- a/unittest/AssemblerX8664/Locked.cpp
+++ b/unittest/AssemblerX8664/Locked.cpp
@@ -204,8 +204,8 @@
// Ensures that xadd emits a lock prefix accordingly.
{
- __ xadd(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), NotLocked);
+ __ xadd(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ NotLocked);
static constexpr uint8_t ByteCountNotLocked8 = 10;
ASSERT_EQ(ByteCountNotLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked8>(codeBytes(), 0x67, 0x44, 0x0F,
@@ -213,8 +213,7 @@
0x01, 0x00));
reset();
- __ xadd(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), Locked);
+ __ xadd(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked8 = 1 + ByteCountNotLocked8;
ASSERT_EQ(ByteCountLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked8>(codeBytes(), 0xF0, 0x67, 0x44,
@@ -224,8 +223,8 @@
}
{
- __ xadd(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), NotLocked);
+ __ xadd(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ NotLocked);
static constexpr uint8_t ByteCountNotLocked16 = 11;
ASSERT_EQ(ByteCountNotLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked16>(codeBytes(), 0x66, 0x67, 0x44,
@@ -233,8 +232,7 @@
0xFF, 0x01, 0x00));
reset();
- __ xadd(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), Locked);
+ __ xadd(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked16 = 1 + ByteCountNotLocked16;
ASSERT_EQ(ByteCountLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked16>(codeBytes(), 0x66, 0xF0, 0x67,
@@ -244,8 +242,8 @@
}
{
- __ xadd(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), NotLocked);
+ __ xadd(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ NotLocked);
static constexpr uint8_t ByteCountNotLocked32 = 10;
ASSERT_EQ(ByteCountNotLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked32>(codeBytes(), 0x67, 0x44, 0x0F,
@@ -253,8 +251,7 @@
0x01, 0x00));
reset();
- __ xadd(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), Locked);
+ __ xadd(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(), Locked);
static constexpr uint8_t ByteCountLocked32 = 1 + ByteCountNotLocked32;
ASSERT_EQ(ByteCountLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked32>(codeBytes(), 0xF0, 0x67, 0x44,
@@ -337,14 +334,14 @@
static constexpr bool Locked = true;
// Ensures that cmpxchg8b emits a lock prefix accordingly.
- __ cmpxchg8b(Address(0x1FF00, AssemblerFixup::NoFixup), NotLocked);
+ __ cmpxchg8b(Address::Absolute(0x1FF00), NotLocked);
static constexpr uint8_t ByteCountNotLocked = 9;
ASSERT_EQ(ByteCountNotLocked, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked>(
codeBytes(), 0x67, 0x0F, 0xC7, 0x0C, 0x25, 0x00, 0xFF, 0x01, 0x00));
reset();
- __ cmpxchg8b(Address(0x1FF00, AssemblerFixup::NoFixup), Locked);
+ __ cmpxchg8b(Address::Absolute(0x1FF00), Locked);
static constexpr uint8_t ByteCountLocked = 1 + ByteCountNotLocked;
ASSERT_EQ(ByteCountLocked, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked>(codeBytes(), 0xF0, 0x67, 0x0F, 0xC7,
@@ -444,8 +441,8 @@
// Ensures that cmpxchg emits a lock prefix accordingly.
{
- __ cmpxchg(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), NotLocked);
+ __ cmpxchg(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ NotLocked);
static constexpr uint8_t ByteCountNotLocked8 = 10;
ASSERT_EQ(ByteCountNotLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked8>(codeBytes(), 0x67, 0x44, 0x0F,
@@ -453,8 +450,8 @@
0x01, 0x00));
reset();
- __ cmpxchg(IceType_i8, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), Locked);
+ __ cmpxchg(IceType_i8, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ Locked);
static constexpr uint8_t ByteCountLocked8 = 1 + ByteCountNotLocked8;
ASSERT_EQ(ByteCountLocked8, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked8>(codeBytes(), 0xF0, 0x67, 0x44,
@@ -464,8 +461,8 @@
}
{
- __ cmpxchg(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), NotLocked);
+ __ cmpxchg(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ NotLocked);
static constexpr uint8_t ByteCountNotLocked16 = 11;
ASSERT_EQ(ByteCountNotLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked16>(codeBytes(), 0x66, 0x67, 0x44,
@@ -473,8 +470,8 @@
0xFF, 0x01, 0x00));
reset();
- __ cmpxchg(IceType_i16, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), Locked);
+ __ cmpxchg(IceType_i16, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ Locked);
static constexpr uint8_t ByteCountLocked16 = 1 + ByteCountNotLocked16;
ASSERT_EQ(ByteCountLocked16, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked16>(codeBytes(), 0x66, 0xF0, 0x67,
@@ -484,8 +481,8 @@
}
{
- __ cmpxchg(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), NotLocked);
+ __ cmpxchg(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ NotLocked);
static constexpr uint8_t ByteCountNotLocked32 = 10;
ASSERT_EQ(ByteCountNotLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountNotLocked32>(codeBytes(), 0x67, 0x44, 0x0F,
@@ -493,8 +490,8 @@
0x01, 0x00));
reset();
- __ cmpxchg(IceType_i32, Address(0x1FF00, AssemblerFixup::NoFixup),
- Encoded_GPR_r14(), Locked);
+ __ cmpxchg(IceType_i32, Address::Absolute(0x1FF00), Encoded_GPR_r14(),
+ Locked);
static constexpr uint8_t ByteCountLocked32 = 1 + ByteCountNotLocked32;
ASSERT_EQ(ByteCountLocked32, codeBytesSize());
ASSERT_TRUE(verifyBytes<ByteCountLocked32>(codeBytes(), 0xF0, 0x67, 0x44,
diff --git a/unittest/AssemblerX8664/LowLevel.cpp b/unittest/AssemblerX8664/LowLevel.cpp
index 2281b14..550f51a 100644
--- a/unittest/AssemblerX8664/LowLevel.cpp
+++ b/unittest/AssemblerX8664/LowLevel.cpp
@@ -186,8 +186,7 @@
"(" #Inst ", " #Dst ", " #Disp ", " #OpType ", " #ByteCountUntyped \
", " #__VA_ARGS__ ")"; \
static constexpr uint8_t ByteCount = ByteCountUntyped; \
- __ Inst(IceType_##OpType, Encoded_GPR_##Dst(), \
- Address(Disp, AssemblerFixup::NoFixup)); \
+ __ Inst(IceType_##OpType, Encoded_GPR_##Dst(), Address::Absolute(Disp)); \
ASSERT_EQ(ByteCount, codeBytesSize()) << TestString; \
ASSERT_TRUE(verifyBytes<ByteCount>(codeBytes(), __VA_ARGS__)) \
<< TestString; \