Handle "Mov" which is mov, movss, movsd, and used for nacl.read.tp.
Currently, this only checks and emits the segment
override only for GPR instructions, assuming it's mostly
only used for nacl.read.tp. The code will assert when used
in other situations.
The lea hack is still tested in some files, but it's not
emitted with emitIAS, and instead the "immediate" operand
now has a fixup.
There is a more compact encoding for "mov eax, moffs32",
etc., but that isn't used right now.
BUG=none
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/649463002
diff --git a/src/IceDefs.h b/src/IceDefs.h
index 62665e7..c27090b 100644
--- a/src/IceDefs.h
+++ b/src/IceDefs.h
@@ -74,6 +74,9 @@
typedef uint32_t TimerStackIdT;
typedef uint32_t TimerIdT;
+// PNaCl is ILP32, so theoretically we should only need 32-bit offsets.
+typedef int32_t RelocOffsetT;
+
enum LivenessMode {
// Basic version of live-range-end calculation. Marks the last uses
// of variables based on dataflow analysis. Records the set of
diff --git a/src/IceGlobalContext.cpp b/src/IceGlobalContext.cpp
index b724433..951de47 100644
--- a/src/IceGlobalContext.cpp
+++ b/src/IceGlobalContext.cpp
@@ -313,7 +313,7 @@
return ConstPool->Doubles.getOrAdd(this, IceType_f64, ConstantDouble);
}
-Constant *GlobalContext::getConstantSym(Type Ty, int64_t Offset,
+Constant *GlobalContext::getConstantSym(Type Ty, RelocOffsetT Offset,
const IceString &Name,
bool SuppressMangling) {
return ConstPool->Relocatables.getOrAdd(
diff --git a/src/IceGlobalContext.h b/src/IceGlobalContext.h
index 026cb7f..a21392e 100644
--- a/src/IceGlobalContext.h
+++ b/src/IceGlobalContext.h
@@ -110,7 +110,8 @@
Constant *getConstantFloat(float Value);
Constant *getConstantDouble(double Value);
// Returns a symbolic constant.
- Constant *getConstantSym(Type Ty, int64_t Offset, const IceString &Name = "",
+ Constant *getConstantSym(Type Ty, RelocOffsetT Offset,
+ const IceString &Name = "",
bool SuppressMangling = false);
// Returns an undef.
Constant *getConstantUndef(Type Ty);
diff --git a/src/IceInstX8632.cpp b/src/IceInstX8632.cpp
index 274d02c..75c294f 100644
--- a/src/IceInstX8632.cpp
+++ b/src/IceInstX8632.cpp
@@ -62,8 +62,14 @@
};
const char *InstX8632SegmentRegNames[] = {
-#define X(val, name) name,
- SEG_REGX8632_TABLE
+#define X(val, name, prefix) name,
+ SEG_REGX8632_TABLE
+#undef X
+};
+
+uint8_t InstX8632SegmentPrefixes[] = {
+#define X(val, name, prefix) prefix,
+ SEG_REGX8632_TABLE
#undef X
};
@@ -334,8 +340,10 @@
namespace {
-void emitIASBytes(Ostream &Str, const x86::AssemblerX86 *Asm,
+void emitIASBytes(const Cfg *Func, const x86::AssemblerX86 *Asm,
intptr_t StartPosition) {
+ GlobalContext *Ctx = Func->getContext();
+ Ostream &Str = Ctx->getStrEmit();
intptr_t EndPosition = Asm->GetPosition();
intptr_t LastFixupLoc = -1;
AssemblerFixup *LastFixup = NULL;
@@ -360,7 +368,12 @@
Str.write_hex(Asm->LoadBuffer<uint8_t>(i));
Str << "\n";
}
- Str << "\t.long " << LastFixup->value()->getName();
+ Str << "\t.long ";
+ const ConstantRelocatable *Reloc = LastFixup->value();
+ if (Reloc->getSuppressMangling())
+ Str << Reloc->getName();
+ else
+ Str << Ctx->mangleName(Reloc->getName());
if (LastFixup->value()->getOffset()) {
Str << " + " << LastFixup->value()->getOffset();
}
@@ -483,7 +496,7 @@
const x86::AssemblerX86::GPREmitterOneOp &Emitter) {
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
- if (const Variable *Var = llvm::dyn_cast<Variable>(Op)) {
+ if (const auto Var = llvm::dyn_cast<Variable>(Op)) {
if (Var->hasReg()) {
// We cheat a little and use GPRRegister even for byte operations.
RegX8632::GPRRegister VarReg =
@@ -494,13 +507,13 @@
->stackVarToAsmOperand(Var));
(Asm->*(Emitter.Addr))(Ty, StackAddr);
}
- } else if (const OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Op)) {
+ } else if (const auto Mem = llvm::dyn_cast<OperandX8632Mem>(Op)) {
+ Mem->emitSegmentOverride(Asm);
(Asm->*(Emitter.Addr))(Ty, Mem->toAsmAddress(Asm));
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void emitIASRegOpTyGPR(const Cfg *Func, Type Ty, const Variable *Var,
@@ -512,7 +525,7 @@
// We cheat a little and use GPRRegister even for byte operations.
RegX8632::GPRRegister VarReg =
RegX8632::getEncodedByteRegOrGPR(Ty, Var->getRegNum());
- if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
+ if (const auto SrcVar = llvm::dyn_cast<Variable>(Src)) {
if (SrcVar->hasReg()) {
RegX8632::GPRRegister SrcReg =
RegX8632::getEncodedByteRegOrGPR(Ty, SrcVar->getRegNum());
@@ -522,18 +535,22 @@
->stackVarToAsmOperand(SrcVar);
(Asm->*(Emitter.GPRAddr))(Ty, VarReg, SrcStackAddr);
}
- } else if (const OperandX8632Mem *Mem =
- llvm::dyn_cast<OperandX8632Mem>(Src)) {
- x86::Address SrcAddr = Mem->toAsmAddress(Asm);
- (Asm->*(Emitter.GPRAddr))(Ty, VarReg, SrcAddr);
- } else if (const ConstantInteger32 *Imm =
- llvm::dyn_cast<ConstantInteger32>(Src)) {
+ } else if (const auto Mem = llvm::dyn_cast<OperandX8632Mem>(Src)) {
+ Mem->emitSegmentOverride(Asm);
+ (Asm->*(Emitter.GPRAddr))(Ty, VarReg, Mem->toAsmAddress(Asm));
+ } else if (const auto Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
(Asm->*(Emitter.GPRImm))(Ty, VarReg, x86::Immediate(Imm->getValue()));
+ } else if (const auto Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) {
+ AssemblerFixup *Fixup =
+ x86::DisplacementRelocation::create(Asm, FK_Abs_4, Reloc);
+ (Asm->*(Emitter.GPRImm))(Ty, VarReg, x86::Immediate(Fixup));
+ } else if (const auto Split = llvm::dyn_cast<VariableSplit>(Src)) {
+ x86::Address SrcAddr = Split->toAsmAddress(Func);
+ (Asm->*(Emitter.GPRAddr))(Ty, VarReg, SrcAddr);
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void emitIASAddrOpTyGPR(const Cfg *Func, Type Ty, const x86::Address &Addr,
@@ -542,19 +559,21 @@
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
// Src can only be Reg or Immediate.
- if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
+ if (const auto SrcVar = llvm::dyn_cast<Variable>(Src)) {
assert(SrcVar->hasReg());
RegX8632::GPRRegister SrcReg =
RegX8632::getEncodedByteRegOrGPR(Ty, SrcVar->getRegNum());
(Asm->*(Emitter.AddrGPR))(Ty, Addr, SrcReg);
- } else if (const ConstantInteger32 *Imm =
- llvm::dyn_cast<ConstantInteger32>(Src)) {
+ } else if (const auto Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
(Asm->*(Emitter.AddrImm))(Ty, Addr, x86::Immediate(Imm->getValue()));
+ } else if (const auto Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) {
+ AssemblerFixup *Fixup =
+ x86::DisplacementRelocation::create(Asm, FK_Abs_4, Reloc);
+ (Asm->*(Emitter.AddrImm))(Ty, Addr, x86::Immediate(Fixup));
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void emitIASGPRShift(const Cfg *Func, Type Ty, const Variable *Var,
@@ -570,19 +589,17 @@
RegX8632::getEncodedByteRegOrGPR(Ty, Var->getRegNum());
// Src must be reg == ECX or an Imm8.
// This is asserted by the assembler.
- if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
+ if (const auto SrcVar = llvm::dyn_cast<Variable>(Src)) {
assert(SrcVar->hasReg());
RegX8632::GPRRegister SrcReg =
RegX8632::getEncodedByteRegOrGPR(Ty, SrcVar->getRegNum());
(Asm->*(Emitter.GPRGPR))(Ty, VarReg, SrcReg);
- } else if (const ConstantInteger32 *Imm =
- llvm::dyn_cast<ConstantInteger32>(Src)) {
+ } else if (const auto Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
(Asm->*(Emitter.GPRImm))(Ty, VarReg, x86::Immediate(Imm->getValue()));
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void emitIASXmmShift(const Cfg *Func, Type Ty, const Variable *Var,
@@ -592,7 +609,7 @@
intptr_t StartPosition = Asm->GetPosition();
assert(Var->hasReg());
RegX8632::XmmRegister VarReg = RegX8632::getEncodedXmm(Var->getRegNum());
- if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
+ if (const auto SrcVar = llvm::dyn_cast<Variable>(Src)) {
if (SrcVar->hasReg()) {
RegX8632::XmmRegister SrcReg =
RegX8632::getEncodedXmm(SrcVar->getRegNum());
@@ -602,18 +619,15 @@
->stackVarToAsmOperand(SrcVar);
(Asm->*(Emitter.XmmAddr))(Ty, VarReg, SrcStackAddr);
}
- } else if (const OperandX8632Mem *Mem =
- llvm::dyn_cast<OperandX8632Mem>(Src)) {
- x86::Address SrcAddr = Mem->toAsmAddress(Asm);
- (Asm->*(Emitter.XmmAddr))(Ty, VarReg, SrcAddr);
- } else if (const ConstantInteger32 *Imm =
- llvm::dyn_cast<ConstantInteger32>(Src)) {
+ } else if (const auto Mem = llvm::dyn_cast<OperandX8632Mem>(Src)) {
+ assert(Mem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
+ (Asm->*(Emitter.XmmAddr))(Ty, VarReg, Mem->toAsmAddress(Asm));
+ } else if (const auto Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
(Asm->*(Emitter.XmmImm))(Ty, VarReg, x86::Immediate(Imm->getValue()));
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void emitIASRegOpTyXMM(const Cfg *Func, Type Ty, const Variable *Var,
@@ -623,7 +637,7 @@
intptr_t StartPosition = Asm->GetPosition();
assert(Var->hasReg());
RegX8632::XmmRegister VarReg = RegX8632::getEncodedXmm(Var->getRegNum());
- if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
+ if (const auto SrcVar = llvm::dyn_cast<Variable>(Src)) {
if (SrcVar->hasReg()) {
RegX8632::XmmRegister SrcReg =
RegX8632::getEncodedXmm(SrcVar->getRegNum());
@@ -633,18 +647,16 @@
->stackVarToAsmOperand(SrcVar);
(Asm->*(Emitter.XmmAddr))(Ty, VarReg, SrcStackAddr);
}
- } else if (const OperandX8632Mem *Mem =
- llvm::dyn_cast<OperandX8632Mem>(Src)) {
- x86::Address SrcAddr = Mem->toAsmAddress(Asm);
- (Asm->*(Emitter.XmmAddr))(Ty, VarReg, SrcAddr);
- } else if (const Constant *Imm = llvm::dyn_cast<Constant>(Src)) {
+ } else if (const auto Mem = llvm::dyn_cast<OperandX8632Mem>(Src)) {
+ assert(Mem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
+ (Asm->*(Emitter.XmmAddr))(Ty, VarReg, Mem->toAsmAddress(Asm));
+ } else if (const auto Imm = llvm::dyn_cast<Constant>(Src)) {
(Asm->*(Emitter.XmmAddr))(
Ty, VarReg, x86::Address::ofConstPool(Func->getContext(), Asm, Imm));
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
template <typename DReg_t, typename SReg_t, DReg_t (*destEnc)(int32_t),
@@ -656,7 +668,7 @@
intptr_t StartPosition = Asm->GetPosition();
assert(Dest->hasReg());
DReg_t DestReg = destEnc(Dest->getRegNum());
- if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
+ if (const auto SrcVar = llvm::dyn_cast<Variable>(Src)) {
if (SrcVar->hasReg()) {
SReg_t SrcReg = srcEnc(SrcVar->getRegNum());
(Asm->*(Emitter.RegReg))(DispatchTy, DestReg, SrcReg);
@@ -665,15 +677,13 @@
->stackVarToAsmOperand(SrcVar);
(Asm->*(Emitter.RegAddr))(DispatchTy, DestReg, SrcStackAddr);
}
- } else if (const OperandX8632Mem *Mem =
- llvm::dyn_cast<OperandX8632Mem>(Src)) {
- x86::Address SrcAddr = Mem->toAsmAddress(Asm);
- (Asm->*(Emitter.RegAddr))(DispatchTy, DestReg, SrcAddr);
+ } else if (const auto Mem = llvm::dyn_cast<OperandX8632Mem>(Src)) {
+ Mem->emitSegmentOverride(Asm);
+ (Asm->*(Emitter.RegAddr))(DispatchTy, DestReg, Mem->toAsmAddress(Asm));
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void emitIASMovlikeXMM(const Cfg *Func, const Variable *Dest,
@@ -683,7 +693,7 @@
intptr_t StartPosition = Asm->GetPosition();
if (Dest->hasReg()) {
RegX8632::XmmRegister DestReg = RegX8632::getEncodedXmm(Dest->getRegNum());
- if (const Variable *SrcVar = llvm::dyn_cast<Variable>(Src)) {
+ if (const auto SrcVar = llvm::dyn_cast<Variable>(Src)) {
if (SrcVar->hasReg()) {
(Asm->*(Emitter.XmmXmm))(DestReg,
RegX8632::getEncodedXmm(SrcVar->getRegNum()));
@@ -692,8 +702,8 @@
->stackVarToAsmOperand(SrcVar));
(Asm->*(Emitter.XmmAddr))(DestReg, StackAddr);
}
- } else if (const OperandX8632Mem *SrcMem =
- llvm::dyn_cast<OperandX8632Mem>(Src)) {
+ } else if (const auto SrcMem = llvm::dyn_cast<OperandX8632Mem>(Src)) {
+ assert(SrcMem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
(Asm->*(Emitter.XmmAddr))(DestReg, SrcMem->toAsmAddress(Asm));
} else {
llvm_unreachable("Unexpected operand type");
@@ -707,8 +717,7 @@
(Asm->*(Emitter.AddrXmm))(StackAddr,
RegX8632::getEncodedXmm(SrcVar->getRegNum()));
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
bool checkForRedundantAssign(const Variable *Dest, const Operand *Source) {
@@ -1191,7 +1200,6 @@
}
template <> void InstX8632Cbwdq::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
assert(getSrcSize() == 1);
@@ -1215,7 +1223,7 @@
Asm->cdq();
break;
}
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Mul::emit(const Cfg *Func) const {
@@ -1313,8 +1321,6 @@
}
void InstX8632Cmov::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
- Str << "\t";
assert(Condition != CondX86::Br_None);
assert(getDest()->hasReg());
assert(getSrcSize() == 2);
@@ -1326,7 +1332,7 @@
intptr_t StartPosition = Asm->GetPosition();
Asm->cmov(Condition, RegX8632::getEncodedGPR(getDest()->getRegNum()),
RegX8632::getEncodedGPR(Src->getRegNum()));
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Cmov::dump(const Cfg *Func) const {
@@ -1352,7 +1358,6 @@
}
void InstX8632Cmpps::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
assert(getSrcSize() == 2);
@@ -1370,7 +1375,7 @@
Asm->cmpps(RegX8632::getEncodedXmm(getDest()->getRegNum()), SrcStackAddr,
Condition);
}
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Cmpps::dump(const Cfg *Func) const {
@@ -1396,12 +1401,12 @@
}
void InstX8632Cmpxchg::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 3);
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Type Ty = getSrc(0)->getType();
const OperandX8632Mem *Mem = llvm::cast<OperandX8632Mem>(getSrc(0));
+ assert(Mem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
const x86::Address Addr = Mem->toAsmAddress(Asm);
const Variable *VarReg = llvm::cast<Variable>(getSrc(2));
assert(VarReg->hasReg());
@@ -1412,7 +1417,7 @@
} else {
Asm->cmpxchg(Ty, Addr, Reg);
}
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Cmpxchg::dump(const Cfg *Func) const {
@@ -1436,17 +1441,17 @@
}
void InstX8632Cmpxchg8b::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 5);
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
const OperandX8632Mem *Mem = llvm::cast<OperandX8632Mem>(getSrc(0));
+ assert(Mem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
const x86::Address Addr = Mem->toAsmAddress(Asm);
if (Locked) {
Asm->lock();
}
Asm->cmpxchg8b(Addr);
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Cmpxchg8b::dump(const Cfg *Func) const {
@@ -1574,6 +1579,7 @@
} else if (const OperandX8632Mem *SrcMem0 =
llvm::dyn_cast<OperandX8632Mem>(Src0)) {
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
+ SrcMem0->emitSegmentOverride(Asm);
emitIASAddrOpTyGPR(Func, Ty, SrcMem0->toAsmAddress(Asm), Src1, AddrEmitter);
}
}
@@ -1624,8 +1630,7 @@
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Asm->ud2();
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632UD2::dump(const Cfg *Func) const {
@@ -1668,6 +1673,7 @@
llvm::dyn_cast<OperandX8632Mem>(Src0)) {
llvm_unreachable("Nothing actually generates this so it's untested");
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
+ SrcMem0->emitSegmentOverride(Asm);
emitIASAddrOpTyGPR(Func, Ty, SrcMem0->toAsmAddress(Asm), Src1, AddrEmitter);
}
}
@@ -1685,11 +1691,10 @@
}
void InstX8632Mfence::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Asm->mfence();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Mfence::dump(const Cfg *Func) const {
@@ -1732,11 +1737,11 @@
assert(getSrcSize() == 2);
const Variable *Src = llvm::cast<Variable>(getSrc(0));
const OperandX8632Mem *DestMem = llvm::cast<OperandX8632Mem>(getSrc(1));
+ assert(DestMem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
assert(Src->hasReg());
Asm->movups(DestMem->toAsmAddress(Asm),
RegX8632::getEncodedXmm(Src->getRegNum()));
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632StoreP::dump(const Cfg *Func) const {
@@ -1765,11 +1770,11 @@
assert(getSrcSize() == 2);
const Variable *Src = llvm::cast<Variable>(getSrc(0));
const OperandX8632Mem *DestMem = llvm::cast<OperandX8632Mem>(getSrc(1));
+ assert(DestMem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
assert(Src->hasReg());
Asm->movq(DestMem->toAsmAddress(Asm),
RegX8632::getEncodedXmm(Src->getRegNum()));
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632StoreQ::dump(const Cfg *Func) const {
@@ -1849,6 +1854,77 @@
}
}
+template <> void InstX8632Mov::emitIAS(const Cfg *Func) const {
+ assert(getSrcSize() == 1);
+ const Variable *Dest = getDest();
+ const Operand *Src = getSrc(0);
+ Type DestTy = Dest->getType();
+ Type SrcTy = Src->getType();
+ // Mov can be used for GPRs or XMM registers. Also, the type does not
+ // necessarily match (Mov can be used for bitcasts). However, when
+ // the type does not match, one of the operands must be a register.
+ // Thus, the strategy is to find out if Src or Dest are a register,
+ // then use that register's type to decide on which emitter set to use.
+ // The emitter set will include reg-reg movs, but that case should
+ // be unused when the types don't match.
+ static const x86::AssemblerX86::XmmEmitterRegOp XmmRegEmitter = {
+ &x86::AssemblerX86::movss, &x86::AssemblerX86::movss};
+ static const x86::AssemblerX86::GPREmitterRegOp GPRRegEmitter = {
+ &x86::AssemblerX86::mov, &x86::AssemblerX86::mov,
+ &x86::AssemblerX86::mov};
+ static const x86::AssemblerX86::GPREmitterAddrOp GPRAddrEmitter = {
+ &x86::AssemblerX86::mov, &x86::AssemblerX86::mov};
+ // For an integer truncation operation, src is wider than dest.
+ // Ideally, we use a mov instruction whose data width matches the
+ // narrower dest. This is a problem if e.g. src is a register like
+ // esi or si where there is no 8-bit version of the register. To be
+ // safe, we instead widen the dest to match src. This works even
+ // for stack-allocated dest variables because typeWidthOnStack()
+ // pads to a 4-byte boundary even if only a lower portion is used.
+ // TODO: This assert disallows usages such as copying a floating point
+ // value between a vector and a scalar (which movss is used for).
+ // Clean this up.
+ assert(Func->getTarget()->typeWidthInBytesOnStack(getDest()->getType()) ==
+ Func->getTarget()->typeWidthInBytesOnStack(Src->getType()));
+ if (Dest->hasReg()) {
+ if (isScalarFloatingType(DestTy)) {
+ emitIASRegOpTyXMM(Func, DestTy, Dest, Src, XmmRegEmitter);
+ return;
+ } else {
+ assert(isScalarIntegerType(DestTy));
+ // Widen DestTy for truncation (see above note). We should only do this
+ // when both Src and Dest are integer types.
+ if (isScalarIntegerType(SrcTy)) {
+ DestTy = SrcTy;
+ }
+ emitIASRegOpTyGPR(Func, DestTy, Dest, Src, GPRRegEmitter);
+ return;
+ }
+ } else {
+ // Dest must be Stack and Src *could* be a register. Use Src's type
+ // to decide on the emitters.
+ x86::Address StackAddr(static_cast<TargetX8632 *>(Func->getTarget())
+ ->stackVarToAsmOperand(Dest));
+ if (isScalarFloatingType(SrcTy)) {
+ // Src must be a register.
+ const Variable *SrcVar = llvm::cast<Variable>(Src);
+ assert(SrcVar->hasReg());
+ x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
+ intptr_t StartPosition = Asm->GetPosition();
+ Asm->movss(SrcTy, StackAddr,
+ RegX8632::getEncodedXmm(SrcVar->getRegNum()));
+ emitIASBytes(Func, Asm, StartPosition);
+ return;
+ } else {
+ // Src can be a register or immediate.
+ assert(isScalarIntegerType(SrcTy));
+ emitIASAddrOpTyGPR(Func, SrcTy, StackAddr, Src, GPRAddrEmitter);
+ return;
+ }
+ return;
+ }
+}
+
template <> void InstX8632Movd::emitIAS(const Cfg *Func) const {
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
@@ -1881,8 +1957,7 @@
Asm->movd(StackAddr, SrcReg);
}
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
template <> void InstX8632Movp::emit(const Cfg *Func) const {
@@ -1947,8 +2022,7 @@
intptr_t StartPosition = Asm->GetPosition();
Asm->movss(IceType_f32, RegX8632::getEncodedXmm(Dest->getRegNum()),
RegX8632::getEncodedXmm(Src->getRegNum()));
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Movsx::emit(const Cfg *Func) const {
@@ -1996,12 +2070,11 @@
}
void InstX8632Nop::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
// TODO: Emit the right code for the variant.
Asm->nop();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Nop::dump(const Cfg *Func) const {
@@ -2054,14 +2127,14 @@
Asm->fld(Ty, StackAddr);
}
} else if (const auto Mem = llvm::dyn_cast<OperandX8632Mem>(Src)) {
+ assert(Mem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
Asm->fld(Ty, Mem->toAsmAddress(Asm));
} else if (const auto Imm = llvm::dyn_cast<Constant>(Src)) {
Asm->fld(Ty, x86::Address::ofConstPool(Func->getContext(), Asm, Imm));
} else {
llvm_unreachable("Unexpected operand type");
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Fld::dump(const Cfg *Func) const {
@@ -2112,8 +2185,7 @@
// of popping the stack.
if (Dest == NULL) {
Asm->fstp(RegX8632::getEncodedSTReg(0));
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
return;
}
Type Ty = Dest->getType();
@@ -2133,8 +2205,7 @@
Asm->movss(Ty, RegX8632::getEncodedXmm(Dest->getRegNum()), StackSlot);
Asm->add(IceType_i32, RegX8632::Encoded_Reg_esp, Width);
}
- Ostream &Str = Func->getContext()->getStrEmit();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Fstp::dump(const Cfg *Func) const {
@@ -2216,7 +2287,6 @@
}
void InstX8632Pop::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 0);
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
@@ -2226,7 +2296,7 @@
Asm->popl(static_cast<TargetX8632 *>(Func->getTarget())
->stackVarToAsmOperand(getDest()));
}
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Pop::dump(const Cfg *Func) const {
@@ -2242,11 +2312,10 @@
}
void InstX8632AdjustStack::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Asm->sub(IceType_i32, RegX8632::Encoded_Reg_esp, x86::Immediate(Amount));
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
Func->getTarget()->updateStackAdjustment(Amount);
}
@@ -2267,7 +2336,6 @@
}
void InstX8632Push::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 1);
// Push is currently only used for saving GPRs.
Variable *Var = llvm::cast<Variable>(getSrc(0));
@@ -2275,7 +2343,7 @@
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Asm->pushl(RegX8632::getEncodedGPR(Var->getRegNum()));
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Push::dump(const Cfg *Func) const {
@@ -2312,11 +2380,10 @@
}
void InstX8632Ret::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Asm->ret();
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Ret::dump(const Cfg *Func) const {
@@ -2339,12 +2406,12 @@
}
void InstX8632Xadd::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 2);
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Type Ty = getSrc(0)->getType();
const OperandX8632Mem *Mem = llvm::cast<OperandX8632Mem>(getSrc(0));
+ assert(Mem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
const x86::Address Addr = Mem->toAsmAddress(Asm);
const Variable *VarReg = llvm::cast<Variable>(getSrc(1));
assert(VarReg->hasReg());
@@ -2354,7 +2421,7 @@
Asm->lock();
}
Asm->xadd(Ty, Addr, Reg);
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Xadd::dump(const Cfg *Func) const {
@@ -2377,19 +2444,19 @@
}
void InstX8632Xchg::emitIAS(const Cfg *Func) const {
- Ostream &Str = Func->getContext()->getStrEmit();
assert(getSrcSize() == 2);
x86::AssemblerX86 *Asm = Func->getAssembler<x86::AssemblerX86>();
intptr_t StartPosition = Asm->GetPosition();
Type Ty = getSrc(0)->getType();
const OperandX8632Mem *Mem = llvm::cast<OperandX8632Mem>(getSrc(0));
+ assert(Mem->getSegmentRegister() == OperandX8632Mem::DefaultSegment);
const x86::Address Addr = Mem->toAsmAddress(Asm);
const Variable *VarReg = llvm::cast<Variable>(getSrc(1));
assert(VarReg->hasReg());
const RegX8632::GPRRegister Reg =
RegX8632::getEncodedGPR(VarReg->getRegNum());
Asm->xchg(Ty, Addr, Reg);
- emitIASBytes(Str, Asm, StartPosition);
+ emitIASBytes(Func, Asm, StartPosition);
}
void InstX8632Xchg::dump(const Cfg *Func) const {
@@ -2496,6 +2563,13 @@
Str << "]";
}
+void OperandX8632Mem::emitSegmentOverride(x86::AssemblerX86 *Asm) const {
+ if (SegmentReg != DefaultSegment) {
+ assert(SegmentReg >= 0 && SegmentReg < SegReg_NUM);
+ Asm->EmitSegmentOverride(InstX8632SegmentPrefixes[SegmentReg]);
+ }
+}
+
x86::Address OperandX8632Mem::toAsmAddress(Assembler *Asm) const {
int32_t Disp = 0;
AssemblerFixup *Fixup = NULL;
@@ -2506,10 +2580,6 @@
Disp = static_cast<int32_t>(CI->getValue());
} else if (ConstantRelocatable *CR =
llvm::dyn_cast<ConstantRelocatable>(getOffset())) {
- // TODO(jvoung): CR + non-zero-offset isn't really tested yet,
- // since the addressing mode optimization doesn't try to combine
- // ConstantRelocatable with something else.
- assert(CR->getOffset() == 0);
Fixup = x86::DisplacementRelocation::create(Asm, FK_Abs_4, CR);
} else {
llvm_unreachable("Unexpected offset type");
@@ -2526,11 +2596,24 @@
} else if (getIndex()) {
return x86::Address(RegX8632::getEncodedGPR(getIndex()->getRegNum()),
x86::ScaleFactor(getShift()), Disp);
+ } else if (Fixup) {
+ // The fixup itself has an offset, so Disp should still be 0.
+ assert(Disp == 0);
+ return x86::Address::Absolute(Fixup);
} else {
- return x86::Address::Absolute(Disp, Fixup);
+ return x86::Address::Absolute(Disp);
}
}
+x86::Address VariableSplit::toAsmAddress(const Cfg *Func) const {
+ assert(!Var->hasReg());
+ const TargetLowering *Target = Func->getTarget();
+ int32_t Offset =
+ Var->getStackOffset() + Target->getStackAdjustment() + getOffset();
+ return x86::Address(RegX8632::getEncodedGPR(Target->getFrameOrStackReg()),
+ Offset);
+}
+
void VariableSplit::emit(const Cfg *Func) const {
Ostream &Str = Func->getContext()->getStrEmit();
assert(!Var->hasReg());
@@ -2539,9 +2622,8 @@
const Type Ty = IceType_i32;
Str << TypeX8632Attributes[Ty].WidthString << " ["
<< Target->getRegName(Target->getFrameOrStackReg(), Ty);
- int32_t Offset = Var->getStackOffset() + Target->getStackAdjustment();
- if (Part == High)
- Offset += 4;
+ int32_t Offset =
+ Var->getStackOffset() + Target->getStackAdjustment() + getOffset();
if (Offset) {
if (Offset > 0)
Str << "+";
diff --git a/src/IceInstX8632.def b/src/IceInstX8632.def
index e48db41..fcd9e3a 100644
--- a/src/IceInstX8632.def
+++ b/src/IceInstX8632.def
@@ -78,15 +78,15 @@
//#define X(val, encode)
// X86 segment registers.
-#define SEG_REGX8632_TABLE \
- /* enum value, name */ \
- X(SegReg_CS, "cs") \
- X(SegReg_DS, "ds") \
- X(SegReg_ES, "es") \
- X(SegReg_SS, "ss") \
- X(SegReg_FS, "fs") \
- X(SegReg_GS, "gs") \
-//#define X(val, name)
+#define SEG_REGX8632_TABLE \
+ /* enum value, name, prefix */ \
+ X(SegReg_CS, "cs", 0x2E) \
+ X(SegReg_DS, "ds", 0x3E) \
+ X(SegReg_ES, "es", 0x26) \
+ X(SegReg_SS, "ss", 0x36) \
+ X(SegReg_FS, "fs", 0x64) \
+ X(SegReg_GS, "gs", 0x65) \
+//#define X(val, name, prefix)
// X87 ST(n) registers.
#define X87ST_REGX8632_TABLE \
diff --git a/src/IceInstX8632.h b/src/IceInstX8632.h
index 02cec79..604f1a5 100644
--- a/src/IceInstX8632.h
+++ b/src/IceInstX8632.h
@@ -58,7 +58,7 @@
public:
enum SegmentRegisters {
DefaultSegment = -1,
-#define X(val, name) val,
+#define X(val, name, prefix) val,
SEG_REGX8632_TABLE
#undef X
SegReg_NUM
@@ -75,6 +75,7 @@
Variable *getIndex() const { return Index; }
uint16_t getShift() const { return Shift; }
SegmentRegisters getSegmentRegister() const { return SegmentReg; }
+ void emitSegmentOverride(x86::AssemblerX86 *Asm) const;
x86::Address toAsmAddress(Assembler *Asm) const;
void emit(const Cfg *Func) const override;
using OperandX8632::dump;
@@ -112,6 +113,9 @@
static VariableSplit *create(Cfg *Func, Variable *Var, Portion Part) {
return new (Func->allocate<VariableSplit>()) VariableSplit(Func, Var, Part);
}
+ int32_t getOffset() const { return Part == High ? 4 : 0; }
+
+ x86::Address toAsmAddress(const Cfg *Func) const;
void emit(const Cfg *Func) const override;
using OperandX8632::dump;
void dump(const Cfg *Func, Ostream &Str) const override;
@@ -835,7 +839,7 @@
}
bool isSimpleAssign() const override { return true; }
void emit(const Cfg *Func) const override;
- void emitIAS(const Cfg *Func) const override { emit(Func); }
+ void emitIAS(const Cfg *Func) const override;
void dump(const Cfg *Func) const override {
Ostream &Str = Func->getContext()->getStrDump();
Str << Opcode << "." << getDest()->getType() << " ";
@@ -1519,8 +1523,6 @@
template <> void InstX8632Imul::emitIAS(const Cfg *Func) const;
template <> void InstX8632Cbwdq::emitIAS(const Cfg *Func) const;
template <> void InstX8632Movd::emitIAS(const Cfg *Func) const;
-template <> void InstX8632Movp::emitIAS(const Cfg *Func) const;
-template <> void InstX8632Movq::emitIAS(const Cfg *Func) const;
template <> void InstX8632MovssRegs::emitIAS(const Cfg *Func) const;
template <> void InstX8632Pblendvb::emitIAS(const Cfg *Func) const;
template <> void InstX8632Pmull::emitIAS(const Cfg *Func) const;
diff --git a/src/IceOperand.h b/src/IceOperand.h
index cbe0095..95ffb50 100644
--- a/src/IceOperand.h
+++ b/src/IceOperand.h
@@ -18,7 +18,9 @@
#ifndef SUBZERO_SRC_ICEOPERAND_H
#define SUBZERO_SRC_ICEOPERAND_H
+#include "IceCfg.h"
#include "IceDefs.h"
+#include "IceGlobalContext.h"
#include "IceTypes.h"
namespace Ice {
@@ -183,14 +185,14 @@
RelocatableTuple &operator=(const RelocatableTuple &) = delete;
public:
- RelocatableTuple(const int64_t Offset, const IceString &Name,
+ RelocatableTuple(const RelocOffsetT Offset, const IceString &Name,
bool SuppressMangling)
: Offset(Offset), Name(Name), SuppressMangling(SuppressMangling) {}
RelocatableTuple(const RelocatableTuple &Other)
: Offset(Other.Offset), Name(Other.Name),
SuppressMangling(Other.SuppressMangling) {}
- const int64_t Offset;
+ const RelocOffsetT Offset;
const IceString Name;
bool SuppressMangling;
};
@@ -207,7 +209,8 @@
return new (Ctx->allocate<ConstantRelocatable>()) ConstantRelocatable(
Ty, Tuple.Offset, Tuple.Name, Tuple.SuppressMangling, PoolEntryID);
}
- int64_t getOffset() const { return Offset; }
+
+ RelocOffsetT getOffset() const { return Offset; }
IceString getName() const { return Name; }
void setSuppressMangling(bool Value) { SuppressMangling = Value; }
bool getSuppressMangling() const { return SuppressMangling; }
@@ -222,14 +225,14 @@
}
private:
- ConstantRelocatable(Type Ty, int64_t Offset, const IceString &Name,
+ ConstantRelocatable(Type Ty, RelocOffsetT Offset, const IceString &Name,
bool SuppressMangling, uint32_t PoolEntryID)
: Constant(kConstRelocatable, Ty, PoolEntryID), Offset(Offset),
Name(Name), SuppressMangling(SuppressMangling) {}
ConstantRelocatable(const ConstantRelocatable &) = delete;
ConstantRelocatable &operator=(const ConstantRelocatable &) = delete;
~ConstantRelocatable() override {}
- const int64_t Offset; // fixed offset to add
+ const RelocOffsetT Offset; // fixed offset to add
const IceString Name; // optional for debug/dump
bool SuppressMangling;
};
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp
index 4e04186..ab021c6 100644
--- a/src/IceTargetLoweringX8632.cpp
+++ b/src/IceTargetLoweringX8632.cpp
@@ -1058,6 +1058,7 @@
Offset = Ctx->getConstantInt32(IceType_i32, 4 + IntOffset->getValue());
} else if (ConstantRelocatable *SymOffset =
llvm::dyn_cast<ConstantRelocatable>(Offset)) {
+ assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4));
Offset = Ctx->getConstantSym(IceType_i32, 4 + SymOffset->getOffset(),
SymOffset->getName());
}
diff --git a/src/PNaClTranslator.cpp b/src/PNaClTranslator.cpp
index 4f7a745..6bef570 100644
--- a/src/PNaClTranslator.cpp
+++ b/src/PNaClTranslator.cpp
@@ -299,7 +299,7 @@
Error(StrBuf.str());
Name = "??";
}
- const uint64_t Offset = 0;
+ const Ice::RelocOffsetT Offset = 0;
C = getTranslator().getContext()->getConstantSym(
getIcePointerType(), Offset, Name);
ValueIDConstants[ID] = C;
diff --git a/src/assembler_ia32.cpp b/src/assembler_ia32.cpp
index 76ff93f..16d01ce 100644
--- a/src/assembler_ia32.cpp
+++ b/src/assembler_ia32.cpp
@@ -25,8 +25,6 @@
namespace Ice {
namespace x86 {
-const Type BrokenType = IceType_i32;
-
class DirectCallRelocation : public AssemblerFixup {
public:
static DirectCallRelocation *create(Assembler *Asm, FixupKind Kind,
@@ -56,13 +54,13 @@
Type Ty = Imm->getType();
assert(llvm::isa<ConstantFloat>(Imm) || llvm::isa<ConstantDouble>(Imm));
StrBuf << "L$" << Ty << "$" << Imm->getPoolEntryID();
- const int64_t Offset = 0;
+ const RelocOffsetT Offset = 0;
const bool SuppressMangling = true;
Constant *Sym =
Ctx->getConstantSym(Ty, Offset, StrBuf.str(), SuppressMangling);
AssemblerFixup *Fixup = x86::DisplacementRelocation::create(
Asm, FK_Abs_4, llvm::cast<ConstantRelocatable>(Sym));
- return x86::Address::Absolute(Offset, Fixup);
+ return x86::Address::Absolute(Fixup);
}
void AssemblerX86::call(GPRRegister reg) {
@@ -127,35 +125,68 @@
EmitUint8(0xC0 + dst);
}
-void AssemblerX86::movl(GPRRegister dst, const Immediate &imm) {
+void AssemblerX86::mov(Type Ty, GPRRegister dst, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (isByteSizedType(Ty)) {
+ EmitUint8(0xB0 + dst);
+ EmitUint8(imm.value() & 0xFF);
+ return;
+ }
+ if (Ty == IceType_i16)
+ EmitOperandSizeOverride();
EmitUint8(0xB8 + dst);
- EmitImmediate(BrokenType, imm);
+ EmitImmediate(Ty, imm);
}
-void AssemblerX86::movl(GPRRegister dst, GPRRegister src) {
+void AssemblerX86::mov(Type Ty, GPRRegister dst, GPRRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x89);
+ if (Ty == IceType_i16)
+ EmitOperandSizeOverride();
+ if (isByteSizedType(Ty)) {
+ EmitUint8(0x88);
+ } else {
+ EmitUint8(0x89);
+ }
EmitRegisterOperand(src, dst);
}
-void AssemblerX86::movl(GPRRegister dst, const Address &src) {
+void AssemblerX86::mov(Type Ty, GPRRegister dst, const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x8B);
+ if (Ty == IceType_i16)
+ EmitOperandSizeOverride();
+ if (isByteSizedType(Ty)) {
+ EmitUint8(0x8A);
+ } else {
+ EmitUint8(0x8B);
+ }
EmitOperand(dst, src);
}
-void AssemblerX86::movl(const Address &dst, GPRRegister src) {
+void AssemblerX86::mov(Type Ty, const Address &dst, GPRRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x89);
+ if (Ty == IceType_i16)
+ EmitOperandSizeOverride();
+ if (isByteSizedType(Ty)) {
+ EmitUint8(0x88);
+ } else {
+ EmitUint8(0x89);
+ }
EmitOperand(src, dst);
}
-void AssemblerX86::movl(const Address &dst, const Immediate &imm) {
+void AssemblerX86::mov(Type Ty, const Address &dst, const Immediate &imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0xC7);
- EmitOperand(0, dst);
- EmitImmediate(BrokenType, imm);
+ if (Ty == IceType_i16)
+ EmitOperandSizeOverride();
+ if (isByteSizedType(Ty)) {
+ EmitUint8(0xC6);
+ EmitOperand(0, dst);
+ EmitUint8(imm.value() & 0xFF);
+ } else {
+ EmitUint8(0xC7);
+ EmitOperand(0, dst);
+ EmitImmediate(Ty, imm);
+ }
}
void AssemblerX86::movzxb(GPRRegister dst, ByteRegister src) {
@@ -186,27 +217,6 @@
EmitOperand(dst, src);
}
-void AssemblerX86::movb(ByteRegister dst, const Address &src) {
- (void)dst;
- (void)src;
- // FATAL
- llvm_unreachable("Use movzxb or movsxb instead.");
-}
-
-void AssemblerX86::movb(const Address &dst, ByteRegister src) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x88);
- EmitOperand(src, dst);
-}
-
-void AssemblerX86::movb(const Address &dst, const Immediate &imm) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0xC6);
- EmitOperand(RegX8632::Encoded_Reg_eax, dst);
- assert(imm.is_int8());
- EmitUint8(imm.value() & 0xFF);
-}
-
void AssemblerX86::movzxw(GPRRegister dst, GPRRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -235,20 +245,6 @@
EmitOperand(dst, src);
}
-void AssemblerX86::movw(GPRRegister dst, const Address &src) {
- (void)dst;
- (void)src;
- // FATAL
- llvm_unreachable("Use movzxw or movsxw instead.");
-}
-
-void AssemblerX86::movw(const Address &dst, GPRRegister src) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOperandSizeOverride();
- EmitUint8(0x89);
- EmitOperand(src, dst);
-}
-
void AssemblerX86::lea(Type Ty, GPRRegister dst, const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
assert(Ty == IceType_i16 || Ty == IceType_i32);
@@ -927,7 +923,7 @@
void AssemblerX86::set1ps(XmmRegister dst, GPRRegister tmp1,
const Immediate &imm) {
// Load 32-bit immediate value into tmp1.
- movl(tmp1, imm);
+ mov(IceType_i32, tmp1, imm);
// Move value from tmp1 into dst.
movd(dst, tmp1);
// Broadcast low lane into other three lanes.
@@ -2266,10 +2262,15 @@
}
void AssemblerX86::EmitImmediate(Type Ty, const Immediate &imm) {
- if (Ty == IceType_i16)
+ if (Ty == IceType_i16) {
+ assert(!imm.fixup());
EmitInt16(imm.value());
- else
+ } else {
+ if (imm.fixup()) {
+ EmitFixup(imm.fixup());
+ }
EmitInt32(imm.value());
+ }
}
void AssemblerX86::EmitComplexI8(int rm, const Operand &operand,
diff --git a/src/assembler_ia32.h b/src/assembler_ia32.h
index dc600a2..1885e0c 100644
--- a/src/assembler_ia32.h
+++ b/src/assembler_ia32.h
@@ -20,8 +20,9 @@
#ifndef SUBZERO_SRC_ASSEMBLER_IA32_H_
#define SUBZERO_SRC_ASSEMBLER_IA32_H_
-#include "IceDefs.h"
#include "IceConditionCodesX8632.h"
+#include "IceDefs.h"
+#include "IceOperand.h"
#include "IceRegistersX8632.h"
#include "IceTypes.h"
#include "IceUtils.h"
@@ -31,7 +32,6 @@
namespace Ice {
class Assembler;
-class ConstantRelocatable;
using RegX8632::GPRRegister;
using RegX8632::XmmRegister;
@@ -67,18 +67,33 @@
class Immediate {
public:
- explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(int32_t value) : value_(value), fixup_(NULL) {}
- Immediate(const Immediate &other) : value_(other.value_) {}
+ explicit Immediate(const Immediate &other)
+ : value_(other.value_), fixup_(other.fixup_) {}
+
+ explicit Immediate(AssemblerFixup *fixup)
+ : value_(fixup->value()->getOffset()), fixup_(fixup) {
+ // Use the Offset in the "value" for now. If the symbol is part of
+ // ".bss", then the relocation's symbol will be plain ".bss" and
+ // the value will need to be adjusted further to be sym's
+ // bss offset + Offset.
+ }
int32_t value() const { return value_; }
+ AssemblerFixup *fixup() const { return fixup_; }
- bool is_int8() const { return Utils::IsInt(8, value_); }
- bool is_uint8() const { return Utils::IsUint(8, value_); }
- bool is_uint16() const { return Utils::IsUint(16, value_); }
+ bool is_int8() const {
+ // We currently only allow 32-bit fixups, and they usually have value = 0,
+ // so if fixup_ != NULL, it shouldn't be classified as int8/16.
+ return fixup_ == NULL && Utils::IsInt(8, value_);
+ }
+ bool is_uint8() const { return fixup_ == NULL && Utils::IsUint(8, value_); }
+ bool is_uint16() const { return fixup_ == NULL && Utils::IsUint(16, value_); }
private:
const int32_t value_;
+ AssemblerFixup *fixup_;
};
class Operand {
@@ -228,10 +243,21 @@
return *this;
}
- static Address Absolute(const uintptr_t addr, AssemblerFixup *fixup) {
+ static Address Absolute(const uintptr_t addr) {
Address result;
result.SetModRM(0, RegX8632::Encoded_Reg_ebp);
result.SetDisp32(addr);
+ return result;
+ }
+
+ static Address Absolute(AssemblerFixup *fixup) {
+ Address result;
+ result.SetModRM(0, RegX8632::Encoded_Reg_ebp);
+ // Use the Offset in the displacement for now. If the symbol is part of
+ // ".bss", then the relocation's symbol will be plain .bss and the
+ // displacement will need to be adjusted further to be sym's
+ // bss offset + Offset.
+ result.SetDisp32(fixup->value()->getOffset());
result.SetFixup(fixup);
return result;
}
@@ -427,28 +453,22 @@
void setcc(CondX86::BrCond condition, ByteRegister dst);
- void movl(GPRRegister dst, const Immediate &src);
- void movl(GPRRegister dst, GPRRegister src);
+ void mov(Type Ty, GPRRegister dst, const Immediate &src);
+ void mov(Type Ty, GPRRegister dst, GPRRegister src);
- void movl(GPRRegister dst, const Address &src);
- void movl(const Address &dst, GPRRegister src);
- void movl(const Address &dst, const Immediate &imm);
+ void mov(Type Ty, GPRRegister dst, const Address &src);
+ void mov(Type Ty, const Address &dst, GPRRegister src);
+ void mov(Type Ty, const Address &dst, const Immediate &imm);
void movzxb(GPRRegister dst, ByteRegister src);
void movzxb(GPRRegister dst, const Address &src);
void movsxb(GPRRegister dst, ByteRegister src);
void movsxb(GPRRegister dst, const Address &src);
- void movb(ByteRegister dst, const Address &src);
- void movb(const Address &dst, ByteRegister src);
- void movb(const Address &dst, const Immediate &imm);
-
void movzxw(GPRRegister dst, GPRRegister src);
void movzxw(GPRRegister dst, const Address &src);
void movsxw(GPRRegister dst, GPRRegister src);
void movsxw(GPRRegister dst, const Address &src);
- void movw(GPRRegister dst, const Address &src);
- void movw(const Address &dst, GPRRegister src);
void lea(Type Ty, GPRRegister dst, const Address &src);
@@ -744,6 +764,8 @@
cmpxchg(Ty, address, reg);
}
+ void EmitSegmentOverride(uint8_t prefix) { EmitUint8(prefix); }
+
intptr_t PreferredLoopAlignment() { return 16; }
void Align(intptr_t alignment, intptr_t offset);
void Bind(Label *label);
diff --git a/tests_lit/assembler/x86/opcode_register_encodings.ll b/tests_lit/assembler/x86/opcode_register_encodings.ll
index ca58336..f937868 100644
--- a/tests_lit/assembler/x86/opcode_register_encodings.ll
+++ b/tests_lit/assembler/x86/opcode_register_encodings.ll
@@ -2,7 +2,7 @@
; those for pmull vary more wildly depending on operand size (rather than
; follow a usual pattern).
-; RUN: %p2i -i %s --args -O2 -mattr=sse4.1 --verbose none \
+; RUN: %p2i -i %s --args -O2 -mattr=sse4.1 -sandbox --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
; RUN: %p2i -i %s --args --verbose none | FileCheck --check-prefix=ERRORS %s
@@ -127,4 +127,38 @@
; CHECK-LABEL: load_v16xI8
; CHECK: 0f 10 0{{.*}} movups xmm0, xmmword ptr [e{{.*}}]
+; Test segment override prefix. This happens w/ nacl.read.tp.
+declare i8* @llvm.nacl.read.tp()
+
+; Also test more address complex operands via address-mode-optimization.
+define i32 @test_nacl_read_tp_more_addressing() {
+entry:
+ %ptr = call i8* @llvm.nacl.read.tp()
+ %__1 = ptrtoint i8* %ptr to i32
+ %x = add i32 %__1, %__1
+ %__3 = inttoptr i32 %x to i32*
+ %v = load i32* %__3, align 1
+ %v_add = add i32 %v, 1
+
+ %ptr2 = call i8* @llvm.nacl.read.tp()
+ %__6 = ptrtoint i8* %ptr2 to i32
+ %y = add i32 %__6, -128
+ %__8 = inttoptr i32 %y to i32*
+ %v_add2 = add i32 %v, 4
+ store i32 %v_add2, i32* %__8, align 1
+
+ %z = add i32 %__6, 256
+ %__9 = inttoptr i32 %z to i32*
+ %v_add3 = add i32 %v, 91
+ store i32 %v_add2, i32* %__9, align 1
+
+ ret i32 %v
+}
+; CHECK-LABEL: test_nacl_read_tp_more_addressing
+; CHECK: 65 8b 05 00 00 00 00 mov eax, dword ptr gs:[0]
+; CHECK: 8b 04 00 mov eax, dword ptr [eax + eax]
+; CHECK: 65 8b 0d 00 00 00 00 mov ecx, dword ptr gs:[0]
+; CHECK: 89 51 80 mov dword ptr [ecx - 128], edx
+; CHECK: 89 91 00 01 00 00 mov dword ptr [ecx + 256], edx
+
; ERRORS-NOT: ICE translation error
diff --git a/tests_lit/llvm2ice_tests/8bit.pnacl.ll b/tests_lit/llvm2ice_tests/8bit.pnacl.ll
index 2b31eb8..6e98f0e 100644
--- a/tests_lit/llvm2ice_tests/8bit.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/8bit.pnacl.ll
@@ -276,5 +276,44 @@
; CHECK-LABEL: icmp8BitMemSwapped
; CHECK: cmp {{[abcd]l|byte ptr}}
+define internal i32 @testPhi8(i32 %arg, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) {
+entry:
+ %trunc = trunc i32 %arg to i8
+ %trunc2 = trunc i32 %arg2 to i8
+ %trunc3 = trunc i32 %arg3 to i8
+ %trunc4 = trunc i32 %arg4 to i8
+ %trunc5 = trunc i32 %arg5 to i8
+ %cmp1 = icmp sgt i32 %arg, 0
+ br i1 %cmp1, label %next, label %target
+next:
+ %trunc6_16 = trunc i32 %arg6 to i16
+ %trunc7_16 = trunc i32 %arg7 to i16
+ %trunc8_16 = trunc i32 %arg8 to i16
+ %trunc9 = trunc i32 %arg9 to i8
+ %trunc10 = trunc i32 %arg10 to i8
+ %trunc7_8 = trunc i16 %trunc7_16 to i8
+ %trunc6_8 = trunc i16 %trunc6_16 to i8
+ %trunc8_8 = trunc i16 %trunc8_16 to i8
+ br label %target
+target:
+ %merge1 = phi i1 [ %cmp1, %entry ], [ false, %next ]
+ %merge2 = phi i8 [ %trunc, %entry ], [ %trunc6_8, %next ]
+ %merge3 = phi i8 [ %trunc2, %entry ], [ %trunc7_8, %next ]
+ %merge5 = phi i8 [ %trunc4, %entry ], [ %trunc9, %next ]
+ %merge6 = phi i8 [ %trunc5, %entry ], [ %trunc10, %next ]
+ %merge4 = phi i8 [ %trunc3, %entry ], [ %trunc8_8, %next ]
+ %res1 = select i1 %merge1, i8 %merge2, i8 %merge3
+ %res2 = select i1 %merge1, i8 %merge4, i8 %merge5
+ %res1_2 = select i1 %merge1, i8 %res1, i8 %res2
+ %res123 = select i1 %merge1, i8 %merge6, i8 %res1_2
+ %result = zext i8 %res123 to i32
+ ret i32 %result
+}
+; CHECK-LABEL: testPhi8
+; This assumes there will be some copy from an 8-bit register / stack slot.
+; CHECK-DAG: mov {{.*}}, {{[a-d]}}l
+; CHECK-DAG: mov {{.*}}, byte ptr
+; CHECK-DAG: mov byte ptr {{.*}}
+
; ERRORS-NOT: ICE translation error
; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/convert.ll b/tests_lit/llvm2ice_tests/convert.ll
index 1bc8bdb..c349b5d 100644
--- a/tests_lit/llvm2ice_tests/convert.ll
+++ b/tests_lit/llvm2ice_tests/convert.ll
@@ -43,7 +43,10 @@
; CHECK: mov dword ptr [
; CHECK: movsx
; CHECK: sar {{.*}}, 31
-; CHECK: [8]
+; This appears to be a bug in llvm-mc. It should be [8] and [12] to represent
+; i64v and i64+4.
+; CHECK-DAG: [8]
+; CHECK-DAG: [8]
define void @from_int16() {
entry:
diff --git a/tests_lit/llvm2ice_tests/globalinit.pnacl.ll b/tests_lit/llvm2ice_tests/globalinit.pnacl.ll
index ba612e3..64af43e 100644
--- a/tests_lit/llvm2ice_tests/globalinit.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/globalinit.pnacl.ll
@@ -1,8 +1,18 @@
; Test of global initializers.
-; RUN: %p2i -i %s --args --verbose inst | FileCheck %s
+; Test -ias=0 to test the lea "hack" until we are fully confident in -ias=1
+; RUN: %p2i -i %s --args --verbose none -ias=0 | FileCheck %s
+
+; Test -ias=1 and try to cross reference instructions w/ the symbol table.
; RUN: %p2i -i %s --args --verbose none \
-; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj
+; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
+; RUN: | llvm-objdump -d -r --symbolize -x86-asm-syntax=intel - \
+; RUN: | FileCheck --check-prefix=IAS %s
+; RUN: %p2i -i %s --args --verbose none \
+; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
+; RUN: | llvm-objdump -d -t --symbolize -x86-asm-syntax=intel - \
+; RUN: | FileCheck --check-prefix=SYMTAB %s
+
; RUN: %p2i -i %s --args --verbose none | FileCheck --check-prefix=ERRORS %s
@PrimitiveInit = internal global [4 x i8] c"\1B\00\00\00", align 4
@@ -114,6 +124,49 @@
; CHECK: leal ArrayUninit,
; CHECK: .intel_syntax
+; llvm-objdump does not indicate what symbol the mov/relocation applies to
+; so we grep for "mov {{.*}}, OFFSET", along with "OFFSET {{.*}} symbol" in
+; the symbol table as a sanity check. NOTE: The symbol table sorting has no
+; relation to the code's references.
+; IAS-LABEL: main
+; SYMTAB-LABEL: SYMBOL TABLE
+
+; SYMTAB-DAG: 00000000 {{.*}} .data {{.*}} PrimitiveInit
+; IAS: mov {{.*}}, 0
+; IAS-NEXT: R_386_32
+; IAS: call
+
+; SYMTAB-DAG: 00000000 {{.*}} .rodata {{.*}} PrimitiveInitConst
+; IAS: mov {{.*}}, 0
+; IAS-NEXT: R_386_32
+; IAS: call
+
+; SYMTAB-DAG: 00000000 {{.*}} .bss {{.*}} PrimitiveInitStatic
+; IAS: mov {{.*}}, 0
+; IAS-NEXT: R_386_32
+; IAS: call
+
+; SYMTAB-DAG: 00000004 {{.*}} .bss {{.*}} PrimitiveUninit
+; IAS: mov {{.*}}, 4
+; IAS-NEXT: R_386_32
+; IAS: call
+
+; SYMTAB-DAG: 00000004{{.*}}.data{{.*}}ArrayInit
+; IAS: mov {{.*}}, 4
+; IAS-NEXT: R_386_32
+; IAS: call
+
+; SYMTAB-DAG: 00000018 {{.*}} .data {{.*}} ArrayInitPartial
+; IAS: mov {{.*}}, 24
+; IAS-NEXT: R_386_32
+; IAS: call
+
+; SYMTAB-DAG: 00000008 {{.*}} .bss {{.*}} ArrayUninit
+; IAS: mov {{.*}}, 8
+; IAS-NEXT: R_386_32
+; IAS: call
+
+
declare void @use(i32)
define internal i32 @nacl_tp_tdb_offset(i32 %__0) {
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
index 08d9a3b..d29acf3 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
@@ -6,11 +6,11 @@
; TODO(kschimpf) Find out why lc2i is needed.
; RUN: %lc2i -i %s --args -O2 --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
-; RUN: | llvm-objdump -d -symbolize -x86-asm-syntax=intel - | FileCheck %s
+; RUN: | llvm-objdump -d -r -symbolize -x86-asm-syntax=intel - | FileCheck %s
; TODO(jvoung): llvm-objdump doesn't symbolize global symbols well, so we
-; have [0] == g32_a, [4] == g32_b, [8] == g32_c.
-; g32_d is also [0] because it's in the .data section instead of .bss.
+; have 0 == g32_a, 4 == g32_b, 8 == g32_c.
+; g32_d is also 0 because it's in the .data section instead of .bss.
declare void @llvm.nacl.atomic.fence.all()
declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
@@ -50,15 +50,18 @@
; CHECK: mov {{.*}}, esp
; CHECK: mov dword ptr {{.*}}, 999
; atomic store (w/ its own mfence)
-; CHECK: dword ptr [0]
+; CHECK: mov {{.*}}, 0
+; CHECK-NEXT: R_386_32
; The load + add are optimized into one everywhere.
; CHECK: add {{.*}}, dword ptr
; CHECK: mov dword ptr
; CHECK: mfence
-; CHECK: dword ptr [4]
+; CHECK: mov {{.*}}, 4
+; CHECK-NEXT: R_386_32
; CHECK: add {{.*}}, dword ptr
; CHECK: mov dword ptr
-; CHECK: dword ptr [8]
+; CHECK: mov {{.*}}, 8
+; CHECK-NEXT: R_386_32
; CHECK: add {{.*}}, dword ptr
; CHECK: mfence
; CHECK: mov dword ptr
@@ -93,14 +96,17 @@
; CHECK: mov {{.*}}, esp
; CHECK: mov dword ptr {{.*}}, 999
; atomic store (w/ its own mfence)
-; CHECK: dword ptr [0]
+; CHECK: mov {{.*}}, 0
+; CHECK-NEXT: R_386_32
; CHECK: add {{.*}}, dword ptr
; CHECK: mov dword ptr
; CHECK: mfence
-; CHECK: dword ptr [4]
+; CHECK: mov {{.*}}, 4
+; CHECK-NEXT: R_386_32
; CHECK: add {{.*}}, dword ptr
; CHECK: mov dword ptr
-; CHECK: dword ptr [8]
+; CHECK: mov {{.*}}, 8
+; CHECK-NEXT: R_386_32
; CHECK: mfence
; Load + add can still be optimized into one instruction
; because it is not separated by a fence.
@@ -137,11 +143,13 @@
; CHECK: mov {{.*}}, esp
; CHECK: mov dword ptr {{.*}}, 999
; atomic store (w/ its own mfence)
-; CHECK: dword ptr [0]
+; CHECK: mov {{.*}}, 0
+; CHECK-NEXT: R_386_32
; CHECK: add {{.*}}, dword ptr
; CHECK: mov dword ptr
; CHECK: mfence
-; CHECK: dword ptr [4]
+; CHECK: mov {{.*}}, 4
+; CHECK-NEXT: R_386_32
; This load + add are no longer optimized into one,
; though perhaps it should be legal as long as
; the load stays on the same side of the fence.
@@ -149,7 +157,8 @@
; CHECK: mfence
; CHECK: add {{.*}}, 1
; CHECK: mov dword ptr
-; CHECK: dword ptr [8]
+; CHECK: mov {{.*}}, 8
+; CHECK-NEXT: R_386_32
; CHECK: add {{.*}}, dword ptr
; CHECK: mov dword ptr
@@ -189,7 +198,8 @@
ret i32 %b1234
}
; CHECK-LABEL: could_have_fused_loads
-; CHECK: dword ptr [0]
+; CHECK: mov {{.*}}, 0
+; CHECK-NEXT: R_386_32
; CHECK: mov {{.*}}, byte ptr
; CHECK: mov {{.*}}, byte ptr
; CHECK: mov {{.*}}, byte ptr
@@ -213,7 +223,8 @@
ret i32 %z
}
; CHECK-LABEL: could_have_hoisted_loads
-; CHECK: dword ptr [0]
+; CHECK: mov {{.*}}, 0
+; CHECK-NEXT: R_386_32
; CHECK: jne {{.*}}
; CHECK: mov {{.*}}, dword ptr
; CHECK: ret
diff --git a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
index f1fc459..1fdefe4 100644
--- a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
+++ b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
@@ -74,11 +74,14 @@
%x = add i32 %__1, %__1
%__3 = inttoptr i32 %x to i32*
%v = load i32* %__3, align 1
+ %v_add = add i32 %v, 1
+
%ptr2 = call i8* @llvm.nacl.read.tp()
%__6 = ptrtoint i8* %ptr2 to i32
%y = add i32 %__6, 4
%__8 = inttoptr i32 %y to i32*
- store i32 %v, i32* %__8, align 1
+ %v_add2 = add i32 %v, 4
+ store i32 %v_add2, i32* %__8, align 1
ret i32 %v
}
; CHECK-LABEL: test_nacl_read_tp_more_addressing