Factor out legalization of undef, and handle more cases for ARM.
By factoring out legalizeUndef(), we can use the same
logic in prelowerPhis which may help if we ever change the
value used (though if we switch from zero-ing out regs to
using uninitialized regs, it'll take more work -- e.g.,
can't return a 64-bit reg).
For x86, use legalizeUndef where it's clear that the value
is immediately fed to loOperand/hiOperand then another
legalize() call. Otherwise, leave the general
X = legalize(X); alone where the code is counting on that
being the sole legalization.
For x86 legalize(const64) is a pass-through, which can then
be passed to loOperand/hiOperand nicely. However, for ARM,
legalize(const64) may end up trying to copy the const64 to
a register, but we don't have 64-bit registers. Instead do
legalizeUndef(X) where x86 would have just done
legalize(X). This happens to work because legalizeUndef
doesn't try to copy to reg, and we immediately pass the
result to loOperand/hiOperand() which then passes the
result to a real legalization call.
Add a few more undef tests.
BUG= https://code.google.com/p/nativeclient/issues/detail?id=4076
R=stichnot@chromium.org
Review URL: https://codereview.chromium.org/1233903002 .
diff --git a/src/IceTargetLoweringARM32.cpp b/src/IceTargetLoweringARM32.cpp
index 10fdfe1..0dbcfb1 100644
--- a/src/IceTargetLoweringARM32.cpp
+++ b/src/IceTargetLoweringARM32.cpp
@@ -863,14 +863,14 @@
assert(Operand->getType() == IceType_i64);
if (Operand->getType() != IceType_i64)
return Operand;
- if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
+ if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
split64(Var);
return Var->getLo();
}
- if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
+ if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
return Ctx->getConstantInt32(static_cast<uint32_t>(Const->getValue()));
}
- if (OperandARM32Mem *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand)) {
+ if (auto *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand)) {
// Conservatively disallow memory operands with side-effects (pre/post
// increment) in case of duplication.
assert(Mem->getAddrMode() == OperandARM32Mem::Offset ||
@@ -892,15 +892,15 @@
assert(Operand->getType() == IceType_i64);
if (Operand->getType() != IceType_i64)
return Operand;
- if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
+ if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
split64(Var);
return Var->getHi();
}
- if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
+ if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
return Ctx->getConstantInt32(
static_cast<uint32_t>(Const->getValue() >> 32));
}
- if (OperandARM32Mem *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand)) {
+ if (auto *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand)) {
// Conservatively disallow memory operands with side-effects
// in case of duplication.
assert(Mem->getAddrMode() == OperandARM32Mem::Offset ||
@@ -1012,7 +1012,7 @@
} else {
// Non-constant sizes need to be adjusted to the next highest
// multiple of the required alignment at runtime.
- TotalSize = legalize(TotalSize);
+ TotalSize = legalize(TotalSize, Legal_Reg | Legal_Flex);
Variable *T = makeReg(IceType_i32);
_mov(T, TotalSize);
Operand *AddAmount = legalize(Ctx->getConstantInt32(Alignment - 1));
@@ -1101,8 +1101,8 @@
// Or it may be the case that the operands aren't swapped, but the
// bits can be flipped and a different operation applied.
// E.g., use BIC (bit clear) instead of AND for some masks.
- Operand *Src0 = Inst->getSrc(0);
- Operand *Src1 = Inst->getSrc(1);
+ Operand *Src0 = legalizeUndef(Inst->getSrc(0));
+ Operand *Src1 = legalizeUndef(Inst->getSrc(1));
if (Dest->getType() == IceType_i64) {
// These helper-call-involved instructions are lowered in this
// separate switch. This is because we would otherwise assume that
@@ -1458,9 +1458,9 @@
Operand *Src0 = Inst->getSrc(0);
assert(Dest->getType() == Src0->getType());
if (Dest->getType() == IceType_i64) {
- Src0 = legalize(Src0);
- Operand *Src0Lo = loOperand(Src0);
- Operand *Src0Hi = hiOperand(Src0);
+ Src0 = legalizeUndef(Src0);
+ Operand *Src0Lo = legalize(loOperand(Src0), Legal_Reg | Legal_Flex);
+ Operand *Src0Hi = legalize(hiOperand(Src0), Legal_Reg | Legal_Flex);
Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
Variable *T_Lo = nullptr, *T_Hi = nullptr;
@@ -1523,7 +1523,7 @@
// Classify each argument operand according to the location where the
// argument is passed.
for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) {
- Operand *Arg = Instr->getArg(i);
+ Operand *Arg = legalizeUndef(Instr->getArg(i));
Type Ty = Arg->getType();
bool InRegs = false;
if (isVectorType(Ty)) {
@@ -1703,7 +1703,7 @@
void TargetARM32::lowerCast(const InstCast *Inst) {
InstCast::OpKind CastKind = Inst->getCastKind();
Variable *Dest = Inst->getDest();
- Operand *Src0 = Inst->getSrc(0);
+ Operand *Src0 = legalizeUndef(Inst->getSrc(0));
switch (CastKind) {
default:
Func->setError("Cast type not supported");
@@ -1808,7 +1808,6 @@
if (isVectorType(Dest->getType())) {
UnimplementedError(Func->getContext()->getFlags());
} else {
- Operand *Src0 = Inst->getSrc(0);
if (Src0->getType() == IceType_i64)
Src0 = loOperand(Src0);
Operand *Src0RF = legalize(Src0, Legal_Reg | Legal_Flex);
@@ -1866,8 +1865,8 @@
void TargetARM32::lowerIcmp(const InstIcmp *Inst) {
Variable *Dest = Inst->getDest();
- Operand *Src0 = Inst->getSrc(0);
- Operand *Src1 = Inst->getSrc(1);
+ Operand *Src0 = legalizeUndef(Inst->getSrc(0));
+ Operand *Src1 = legalizeUndef(Inst->getSrc(1));
if (isVectorType(Dest->getType())) {
UnimplementedError(Func->getContext()->getFlags());
@@ -2036,6 +2035,7 @@
Operand *Val = Instr->getArg(0);
Type Ty = Val->getType();
if (Ty == IceType_i64) {
+ Val = legalizeUndef(Val);
Variable *Val_Lo = legalizeToVar(loOperand(Val));
Variable *Val_Hi = legalizeToVar(hiOperand(Val));
Variable *T_Lo = makeReg(IceType_i32);
@@ -2088,6 +2088,7 @@
Variable *ValLoR;
Variable *ValHiR = nullptr;
if (Val->getType() == IceType_i64) {
+ Val = legalizeUndef(Val);
ValLoR = legalizeToVar(loOperand(Val));
ValHiR = legalizeToVar(hiOperand(Val));
} else {
@@ -2102,6 +2103,7 @@
Variable *ValLoR;
Variable *ValHiR = nullptr;
if (Val->getType() == IceType_i64) {
+ Val = legalizeUndef(Val);
ValLoR = legalizeToVar(loOperand(Val));
ValHiR = legalizeToVar(hiOperand(Val));
Variable *TLo = makeReg(IceType_i32);
@@ -2268,6 +2270,7 @@
if (Inst->hasRetValue()) {
Operand *Src0 = Inst->getRetValue();
if (Src0->getType() == IceType_i64) {
+ Src0 = legalizeUndef(Src0);
Variable *R0 = legalizeToVar(loOperand(Src0), RegARM32::Reg_r0);
Variable *R1 = legalizeToVar(hiOperand(Src0), RegARM32::Reg_r1);
Reg = R0;
@@ -2318,6 +2321,8 @@
_cmp(CmpOpnd0, CmpOpnd1);
CondARM32::Cond Cond = CondARM32::NE;
if (DestTy == IceType_i64) {
+ SrcT = legalizeUndef(SrcT);
+ SrcF = legalizeUndef(SrcF);
// Set the low portion.
Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
Variable *TLo = nullptr;
@@ -2351,6 +2356,7 @@
Type Ty = NewAddr->getType();
if (Ty == IceType_i64) {
+ Value = legalizeUndef(Value);
Variable *ValueHi = legalizeToVar(hiOperand(Value));
Variable *ValueLo = legalizeToVar(loOperand(Value));
_str(ValueHi, llvm::cast<OperandARM32Mem>(hiOperand(NewAddr)));
@@ -2373,7 +2379,7 @@
Operand *Src0 = Inst->getComparison();
SizeT NumCases = Inst->getNumCases();
if (Src0->getType() == IceType_i64) {
- // TODO(jvoung): handle and test undef for Src0
+ Src0 = legalizeUndef(Src0);
Variable *Src0Lo = legalizeToVar(loOperand(Src0));
Variable *Src0Hi = legalizeToVar(hiOperand(Src0));
for (SizeT I = 0; I < NumCases; ++I) {
@@ -2444,6 +2450,7 @@
Operand *TargetARM32::legalize(Operand *From, LegalMask Allowed,
int32_t RegNum) {
+ Type Ty = From->getType();
// Assert that a physical register is allowed. To date, all calls
// to legalize() allow a physical register. Legal_Flex converts
// registers to the right type OperandARM32FlexReg as needed.
@@ -2471,16 +2478,15 @@
// There is only a reg +/- reg or reg + imm form.
// Figure out which to re-create.
if (Mem->isRegReg()) {
- Mem = OperandARM32Mem::create(Func, Mem->getType(), RegBase, RegIndex,
+ Mem = OperandARM32Mem::create(Func, Ty, RegBase, RegIndex,
Mem->getShiftOp(), Mem->getShiftAmt(),
Mem->getAddrMode());
} else {
- Mem = OperandARM32Mem::create(Func, Mem->getType(), RegBase,
- Mem->getOffset(), Mem->getAddrMode());
+ Mem = OperandARM32Mem::create(Func, Ty, RegBase, Mem->getOffset(),
+ Mem->getAddrMode());
}
}
if (!(Allowed & Legal_Mem)) {
- Type Ty = Mem->getType();
Variable *Reg = makeReg(Ty, RegNum);
_ldr(Reg, Mem);
From = Reg;
@@ -2510,17 +2516,14 @@
if (llvm::isa<Constant>(From)) {
if (llvm::isa<ConstantUndef>(From)) {
- // Lower undefs to zero. Another option is to lower undefs to an
- // uninitialized register; however, using an uninitialized register
- // results in less predictable code.
- if (isVectorType(From->getType()))
- return makeVectorOfZeros(From->getType(), RegNum);
- From = Ctx->getConstantZero(From->getType());
+ From = legalizeUndef(From, RegNum);
+ if (isVectorType(Ty))
+ return From;
}
// There should be no constants of vector type (other than undef).
- assert(!isVectorType(From->getType()));
+ assert(!isVectorType(Ty));
bool CanBeFlex = Allowed & Legal_Flex;
- if (auto C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
+ if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
uint32_t RotateAmt;
uint32_t Immed_8;
uint32_t Value = static_cast<uint32_t>(C32->getValue());
@@ -2530,19 +2533,16 @@
// Also try the inverse and use MVN if possible.
if (CanBeFlex &&
OperandARM32FlexImm::canHoldImm(Value, &RotateAmt, &Immed_8)) {
- return OperandARM32FlexImm::create(Func, From->getType(), Immed_8,
- RotateAmt);
+ return OperandARM32FlexImm::create(Func, Ty, Immed_8, RotateAmt);
} else if (CanBeFlex && OperandARM32FlexImm::canHoldImm(
~Value, &RotateAmt, &Immed_8)) {
- auto InvertedFlex = OperandARM32FlexImm::create(Func, From->getType(),
- Immed_8, RotateAmt);
- Type Ty = From->getType();
+ auto InvertedFlex =
+ OperandARM32FlexImm::create(Func, Ty, Immed_8, RotateAmt);
Variable *Reg = makeReg(Ty, RegNum);
_mvn(Reg, InvertedFlex);
return Reg;
} else {
// Do a movw/movt to a register.
- Type Ty = From->getType();
Variable *Reg = makeReg(Ty, RegNum);
uint32_t UpperBits = (Value >> 16) & 0xFFFF;
_movw(Reg,
@@ -2552,8 +2552,7 @@
}
return Reg;
}
- } else if (auto C = llvm::dyn_cast<ConstantRelocatable>(From)) {
- Type Ty = From->getType();
+ } else if (auto *C = llvm::dyn_cast<ConstantRelocatable>(From)) {
Variable *Reg = makeReg(Ty, RegNum);
_movw(Reg, C);
_movt(Reg, C);
@@ -2586,11 +2585,33 @@
return From;
}
-// Provide a trivial wrapper to legalize() for this common usage.
+/// Provide a trivial wrapper to legalize() for this common usage.
Variable *TargetARM32::legalizeToVar(Operand *From, int32_t RegNum) {
return llvm::cast<Variable>(legalize(From, Legal_Reg, RegNum));
}
+/// Legalize undef values to concrete values.
+Operand *TargetARM32::legalizeUndef(Operand *From, int32_t RegNum) {
+ Type Ty = From->getType();
+ if (llvm::isa<ConstantUndef>(From)) {
+ // Lower undefs to zero. Another option is to lower undefs to an
+ // uninitialized register; however, using an uninitialized register
+ // results in less predictable code.
+ //
+ // If in the future the implementation is changed to lower undef
+ // values to uninitialized registers, a FakeDef will be needed:
+ // Context.insert(InstFakeDef::create(Func, Reg));
+ // This is in order to ensure that the live range of Reg is not
+ // overestimated. If the constant being lowered is a 64 bit value,
+ // then the result should be split and the lo and hi components will
+ // need to go in uninitialized registers.
+ if (isVectorType(Ty))
+ return makeVectorOfZeros(Ty, RegNum);
+ return Ctx->getConstantZero(Ty);
+ }
+ return From;
+}
+
OperandARM32Mem *TargetARM32::formMemoryOperand(Operand *Operand, Type Ty) {
OperandARM32Mem *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand);
// It may be the case that address mode optimization already creates
diff --git a/src/IceTargetLoweringARM32.h b/src/IceTargetLoweringARM32.h
index becb615..4c95d3c 100644
--- a/src/IceTargetLoweringARM32.h
+++ b/src/IceTargetLoweringARM32.h
@@ -146,6 +146,7 @@
Operand *legalize(Operand *From, LegalMask Allowed = Legal_All,
int32_t RegNum = Variable::NoRegister);
Variable *legalizeToVar(Operand *From, int32_t RegNum = Variable::NoRegister);
+ Operand *legalizeUndef(Operand *From, int32_t RegNum = Variable::NoRegister);
OperandARM32Mem *formMemoryOperand(Operand *Ptr, Type Ty);
Variable *makeReg(Type Ty, int32_t RegNum = Variable::NoRegister);
diff --git a/src/IceTargetLoweringX86Base.h b/src/IceTargetLoweringX86Base.h
index c00f0b0..69b88ee 100644
--- a/src/IceTargetLoweringX86Base.h
+++ b/src/IceTargetLoweringX86Base.h
@@ -230,6 +230,7 @@
Operand *legalize(Operand *From, LegalMask Allowed = Legal_All,
int32_t RegNum = Variable::NoRegister);
Variable *legalizeToVar(Operand *From, int32_t RegNum = Variable::NoRegister);
+ Operand *legalizeUndef(Operand *From, int32_t RegNum = Variable::NoRegister);
/// Legalize the first source operand for use in the cmp instruction.
Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1);
/// Turn a pointer operand into a memory operand that can be
diff --git a/src/IceTargetLoweringX86BaseImpl.h b/src/IceTargetLoweringX86BaseImpl.h
index a277db2..0ac63b8 100644
--- a/src/IceTargetLoweringX86BaseImpl.h
+++ b/src/IceTargetLoweringX86BaseImpl.h
@@ -1164,13 +1164,14 @@
Operand->getType() == IceType_f64);
if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
return Operand;
- if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
+ if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
split64(Var);
return Var->getLo();
}
- if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
- ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>(
+ if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
+ auto *ConstInt = llvm::dyn_cast<ConstantInteger32>(
Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue())));
+ // Check if we need to blind/pool the constant.
return legalize(ConstInt);
}
if (auto *Mem = llvm::dyn_cast<typename Traits::X86OperandMem>(Operand)) {
@@ -1192,25 +1193,23 @@
Operand->getType() == IceType_f64);
if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
return Operand;
- if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
+ if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
split64(Var);
return Var->getHi();
}
- if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
- ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>(
+ if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
+ auto *ConstInt = llvm::dyn_cast<ConstantInteger32>(
Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue() >> 32)));
- // check if we need to blind/pool the constant
+ // Check if we need to blind/pool the constant.
return legalize(ConstInt);
}
if (auto *Mem = llvm::dyn_cast<typename Traits::X86OperandMem>(Operand)) {
Constant *Offset = Mem->getOffset();
if (Offset == nullptr) {
Offset = Ctx->getConstantInt32(4);
- } else if (ConstantInteger32 *IntOffset =
- llvm::dyn_cast<ConstantInteger32>(Offset)) {
+ } else if (auto *IntOffset = llvm::dyn_cast<ConstantInteger32>(Offset)) {
Offset = Ctx->getConstantInt32(4 + IntOffset->getValue());
- } else if (ConstantRelocatable *SymOffset =
- llvm::dyn_cast<ConstantRelocatable>(Offset)) {
+ } else if (auto *SymOffset = llvm::dyn_cast<ConstantRelocatable>(Offset)) {
assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4));
Offset =
Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName(),
@@ -2453,7 +2452,7 @@
_pand(T, OneMask);
_movp(Dest, T);
} else {
- Operand *Src0 = Inst->getSrc(0);
+ Operand *Src0 = legalizeUndef(Inst->getSrc(0));
if (Src0->getType() == IceType_i64)
Src0 = loOperand(Src0);
Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
@@ -3261,9 +3260,9 @@
return;
}
Variable *DestPrev = Instr->getDest();
- Operand *PtrToMem = Instr->getArg(0);
- Operand *Expected = Instr->getArg(1);
- Operand *Desired = Instr->getArg(2);
+ Operand *PtrToMem = legalize(Instr->getArg(0));
+ Operand *Expected = legalize(Instr->getArg(1));
+ Operand *Desired = legalize(Instr->getArg(2));
if (tryOptimizedCmpxchgCmpBr(DestPrev, PtrToMem, Expected, Desired))
return;
lowerAtomicCmpxchg(DestPrev, PtrToMem, Expected, Desired);
@@ -3397,6 +3396,7 @@
// In 32-bit mode, bswap only works on 32-bit arguments, and the
// argument must be a register. Use rotate left for 16-bit bswap.
if (Val->getType() == IceType_i64) {
+ Val = legalizeUndef(Val);
Variable *T_Lo = legalizeToVar(loOperand(Val));
Variable *T_Hi = legalizeToVar(hiOperand(Val));
Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
@@ -3411,9 +3411,9 @@
_mov(Dest, T);
} else {
assert(Val->getType() == IceType_i16);
- Val = legalize(Val);
Constant *Eight = Ctx->getConstantInt16(8);
Variable *T = nullptr;
+ Val = legalize(Val);
_mov(T, Val);
_rol(T, Eight);
_mov(Dest, T);
@@ -4411,6 +4411,8 @@
Cond = InstX86Base<Machine>::getOppositeCondition(Cond);
}
if (DestTy == IceType_i64) {
+ SrcT = legalizeUndef(SrcT);
+ SrcF = legalizeUndef(SrcF);
// Set the low portion.
Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
Variable *TLo = nullptr;
@@ -4448,7 +4450,7 @@
Type Ty = NewAddr->getType();
if (Ty == IceType_i64) {
- Value = legalize(Value);
+ Value = legalizeUndef(Value);
Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm);
Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm);
_store(ValueHi,
@@ -4497,7 +4499,7 @@
Operand *Src0 = Inst->getComparison();
SizeT NumCases = Inst->getNumCases();
if (Src0->getType() == IceType_i64) {
- Src0 = legalize(Src0); // get Base/Index into physical registers
+ Src0 = legalizeUndef(Src0);
Operand *Src0Lo = loOperand(Src0);
Operand *Src0Hi = hiOperand(Src0);
if (NumCases >= 2) {
@@ -4613,6 +4615,7 @@
Type Ty = Src->getType();
typename Traits::X86OperandMem *Addr = formMemoryOperand(RMW->getAddr(), Ty);
if (Ty == IceType_i64) {
+ Src = legalizeUndef(Src);
Operand *SrcLo = legalize(loOperand(Src), Legal_Reg | Legal_Imm);
Operand *SrcHi = legalize(hiOperand(Src), Legal_Reg | Legal_Imm);
typename Traits::X86OperandMem *AddrLo =
@@ -4708,8 +4711,7 @@
for (SizeT I = 0; I < Phi->getSrcSize(); ++I) {
Operand *Src = Phi->getSrc(I);
CfgNode *Label = Phi->getLabel(I);
- if (llvm::isa<ConstantUndef>(Src))
- Src = Ctx->getConstantZero(Dest->getType());
+ Src = legalizeUndef(Src);
PhiLo->addArgument(loOperand(Src), Label);
PhiHi->addArgument(hiOperand(Src), Label);
}
@@ -4791,22 +4793,18 @@
auto Assign = llvm::dyn_cast<InstAssign>(&I);
Variable *Dest = Assign->getDest();
- // If the source operand is ConstantUndef, do not legalize it.
- // In function test_split_undef_int_vec, the advanced phi
- // lowering process will find an assignment of undefined
- // vector. This vector, as the Src here, will crash if it
- // go through legalize(). legalize() will create new variable
- // with makeVectorOfZeros(), but this new variable will be
- // assigned a stack slot. This will fail the assertion in
- // IceInstX8632.cpp:789, as XmmEmitterRegOp() complain:
- // Var->hasReg() fails. Note this failure is irrelevant to
- // randomization or pooling of constants.
- // So, we do not call legalize() to add pool label for the
- // src operands of phi assignment instructions.
- // Instead, we manually add pool label for constant float and
- // constant double values here.
- // Note going through legalize() does not affect the testing
- // results of SPEC2K and xtests.
+ // If the source operand is ConstantUndef, do not legalize it. In function
+ // test_split_undef_int_vec, the advanced phi lowering process will find an
+ // assignment of undefined vector. This vector, as the Src here, will crash
+ // if it go through legalize(). legalize() will create a new variable with
+ // makeVectorOfZeros(), but this new variable will be assigned a stack
+ // slot. This will fail with pxor(Var, Var) because it is an illegal
+ // instruction form. Note this failure is irrelevant to randomization or
+ // pooling of constants. So, we do not call legalize() to add pool label
+ // for the src operands of phi assignment instructions. Instead, we
+ // manually add pool label for constant float and constant double values
+ // here. Note going through legalize() does not affect the testing results
+ // of SPEC2K and xtests.
Operand *Src = Assign->getSrc(0);
if (!llvm::isa<ConstantUndef>(Assign->getSrc(0))) {
Src = legalize(Src);
@@ -5029,21 +5027,10 @@
}
if (auto *Const = llvm::dyn_cast<Constant>(From)) {
if (llvm::isa<ConstantUndef>(Const)) {
- // Lower undefs to zero. Another option is to lower undefs to an
- // uninitialized register; however, using an uninitialized register
- // results in less predictable code.
- //
- // If in the future the implementation is changed to lower undef
- // values to uninitialized registers, a FakeDef will be needed:
- // Context.insert(InstFakeDef::create(Func, Reg));
- // This is in order to ensure that the live range of Reg is not
- // overestimated. If the constant being lowered is a 64 bit value,
- // then the result should be split and the lo and hi components will
- // need to go in uninitialized registers.
+ From = legalizeUndef(Const, RegNum);
if (isVectorType(Ty))
- return makeVectorOfZeros(Ty, RegNum);
- Const = Ctx->getConstantZero(Ty);
- From = Const;
+ return From;
+ Const = llvm::cast<Constant>(From);
}
// There should be no constants of vector type (other than undef).
assert(!isVectorType(Ty));
@@ -5105,6 +5092,29 @@
return llvm::cast<Variable>(legalize(From, Legal_Reg, RegNum));
}
+/// Legalize undef values to concrete values.
+template <class Machine>
+Operand *TargetX86Base<Machine>::legalizeUndef(Operand *From, int32_t RegNum) {
+ Type Ty = From->getType();
+ if (llvm::isa<ConstantUndef>(From)) {
+ // Lower undefs to zero. Another option is to lower undefs to an
+ // uninitialized register; however, using an uninitialized register
+ // results in less predictable code.
+ //
+ // If in the future the implementation is changed to lower undef
+ // values to uninitialized registers, a FakeDef will be needed:
+ // Context.insert(InstFakeDef::create(Func, Reg));
+ // This is in order to ensure that the live range of Reg is not
+ // overestimated. If the constant being lowered is a 64 bit value,
+ // then the result should be split and the lo and hi components will
+ // need to go in uninitialized registers.
+ if (isVectorType(Ty))
+ return makeVectorOfZeros(Ty, RegNum);
+ return Ctx->getConstantZero(Ty);
+ }
+ return From;
+}
+
/// For the cmp instruction, if Src1 is an immediate, or known to be a
/// physical register, we can allow Src0 to be a memory operand.
/// Otherwise, Src0 must be copied into a physical register.
@@ -5117,7 +5127,7 @@
bool IsSrc1ImmOrReg = false;
if (llvm::isa<Constant>(Src1)) {
IsSrc1ImmOrReg = true;
- } else if (Variable *Var = llvm::dyn_cast<Variable>(Src1)) {
+ } else if (auto *Var = llvm::dyn_cast<Variable>(Src1)) {
if (Var->hasReg())
IsSrc1ImmOrReg = true;
}
@@ -5141,7 +5151,7 @@
// the constant offset, we will work on the whole memory
// operand later as one entity later, this save one instruction.
// By turning blinding and pooling off, we guarantee
- // legalize(Offset) will return a constant*.
+ // legalize(Offset) will return a Constant*.
{
BoolFlagSaver B(RandomizationPoolingPaused, true);
@@ -5357,8 +5367,8 @@
MemOperand->getShift(),
MemOperand->getSegmentRegister());
- // Label this memory operand as randomize, so we won't randomize it
- // again in case we call legalize() mutiple times on this memory
+ // Label this memory operand as randomized, so we won't randomize it
+ // again in case we call legalize() multiple times on this memory
// operand.
NewMemOperand->setRandomized(true);
return NewMemOperand;
diff --git a/tests_lit/llvm2ice_tests/bool-folding.ll b/tests_lit/llvm2ice_tests/bool-folding.ll
index 13a56b6..13ee8c4 100644
--- a/tests_lit/llvm2ice_tests/bool-folding.ll
+++ b/tests_lit/llvm2ice_tests/bool-folding.ll
@@ -109,6 +109,17 @@
; CHECK: cmovl
; CHECK: cmovl
+define i64 @fold_cmp_select_64_undef(i64 %arg1) {
+entry:
+ %arg1_trunc = trunc i64 %arg1 to i32
+ %cmp1 = icmp slt i32 undef, %arg1_trunc
+ %result = select i1 %cmp1, i64 %arg1, i64 undef
+ ret i64 %result
+}
+; CHECK-LABEL: fold_cmp_select_64_undef
+; CHECK: cmp
+; CHECK: cmovl
+; CHECK: cmovl
; Cmp/select folding with intervening instructions.
define i32 @fold_cmp_select_intervening_insts(i32 %arg1, i32 %arg2) {
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll b/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll
index ee81a09..4b8d520 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll
@@ -771,6 +771,16 @@
; need to be reshuffled via movs. The next test stores the result
; somewhere, so in that case they do need to be mov'ed.
+define i64 @test_atomic_cmpxchg_64_undef(i32 %iptr, i64 %desired) {
+entry:
+ %ptr = inttoptr i32 %iptr to i64*
+ %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 undef,
+ i64 %desired, i32 6, i32 6)
+ ret i64 %old
+}
+; CHECK-LABEL: test_atomic_cmpxchg_64_undef
+; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}}+0x0]
+
; Test a case where %old really does need to be copied out of edx:eax.
define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expected, i64 %desired) {
entry:
diff --git a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
index ef72d6e..ebc39c2 100644
--- a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
+++ b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
@@ -416,6 +416,18 @@
; ARM32: rev
; ARM32: rev
+define i64 @test_bswap_64_undef() {
+entry:
+ %r = call i64 @llvm.bswap.i64(i64 undef)
+ ret i64 %r
+}
+; CHECK-LABEL: test_bswap_64_undef
+; CHECK: bswap e{{.*}}
+; CHECK: bswap e{{.*}}
+; ARM32-LABEL: test_bswap_64
+; ARM32: rev
+; ARM32: rev
+
define i32 @test_ctlz_32(i32 %x) {
entry:
%r = call i32 @llvm.ctlz.i32(i32 %x, i1 false)
diff --git a/tests_lit/llvm2ice_tests/rmw.ll b/tests_lit/llvm2ice_tests/rmw.ll
index 12d365d..cd78977 100644
--- a/tests_lit/llvm2ice_tests/rmw.ll
+++ b/tests_lit/llvm2ice_tests/rmw.ll
@@ -130,3 +130,15 @@
}
; CHECK-LABEL: no_rmw_sub_i32_var
; CHECK: sub e{{ax|bx|cx|dx|bp|di|si}},DWORD PTR [e{{ax|bx|cx|dx|bp|di|si}}]
+
+define internal void @rmw_add_i64_undef(i32 %addr_arg) {
+entry:
+ %addr = inttoptr i32 %addr_arg to i64*
+ %val = load i64, i64* %addr, align 1
+ %rmw = add i64 %val, undef
+ store i64 %rmw, i64* %addr, align 1
+ ret void
+}
+; CHECK-LABEL: rmw_add_i64_undef
+; CHECK: add DWORD PTR [e{{ax|bx|cx|dx|bp|di|si}}],0x0
+; CHECK: adc DWORD PTR [e{{ax|bx|cx|dx|bp|di|si}}+0x4],0x0
diff --git a/tests_lit/llvm2ice_tests/switch-opt.ll b/tests_lit/llvm2ice_tests/switch-opt.ll
index 3cf0daf..5202b8d 100644
--- a/tests_lit/llvm2ice_tests/switch-opt.ll
+++ b/tests_lit/llvm2ice_tests/switch-opt.ll
@@ -147,3 +147,18 @@
; ARM32-NEXT: beq [[ADDR:[0-9a-f]+]]
; ARM32-NEXT: b [[ADDR]]
+define i32 @testSwitchUndef64() {
+entry:
+ switch i64 undef, label %sw.default [
+ i64 1, label %sw.default
+ ]
+
+sw.default:
+ ret i32 20
+}
+; CHECK-LABEL: testSwitchUndef64
+; CHECK: mov {{.*}},0x0
+; CHECK: mov {{.*}},0x0
+; ARM32-LABEL: testSwitchUndef64
+; ARM32: movw {{.*}}, #0
+; ARM32: movw {{.*}}, #0
diff --git a/tests_lit/llvm2ice_tests/undef.ll b/tests_lit/llvm2ice_tests/undef.ll
index 9d9014a..bf06b05 100644
--- a/tests_lit/llvm2ice_tests/undef.ll
+++ b/tests_lit/llvm2ice_tests/undef.ll
@@ -25,6 +25,15 @@
; CHECK: ret
}
+define i32 @trunc_undef_i64() {
+entry:
+ %ret = trunc i64 undef to i32
+ ret i32 %ret
+; CHECK-LABEL: trunc_undef_i64
+; CHECK: mov eax,0x0
+; CHECK: ret
+}
+
define float @undef_float() {
entry:
ret float undef