blob: 89fc2032c6d95afd46d69a544a3e2c211ff533d8 [file] [log] [blame]
John Porto2fea26c2015-07-28 16:28:07 -07001//===- subzero/src/IceTargetLoweringX8664Traits.h - x86-64 traits -*- C++ -*-=//
2//
3// The Subzero Code Generator
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// \file
11/// This file declares the X8664 Target Lowering Traits.
12///
13//===----------------------------------------------------------------------===//
14
15#ifndef SUBZERO_SRC_ICETARGETLOWERINGX8664TRAITS_H
16#define SUBZERO_SRC_ICETARGETLOWERINGX8664TRAITS_H
17
18#include "IceAssembler.h"
19#include "IceConditionCodesX8664.h"
20#include "IceDefs.h"
21#include "IceInst.h"
22#include "IceInstX8664.def"
23#include "IceOperand.h"
24#include "IceRegistersX8664.h"
25#include "IceTargetLowering.h"
John Porto453660f2015-07-31 14:52:52 -070026#include "IceTargetLoweringX8664.def"
John Porto2fea26c2015-07-28 16:28:07 -070027
28namespace Ice {
29
30class TargetX8664;
31
32namespace X8664 {
33class AssemblerX8664;
34} // end of namespace X8664
35
36namespace X86Internal {
37
38template <class Machine> struct Insts;
39template <class Machine> struct MachineTraits;
John Porto453660f2015-07-31 14:52:52 -070040template <class Machine> class TargetX86Base;
John Porto2fea26c2015-07-28 16:28:07 -070041
42template <> struct MachineTraits<TargetX8664> {
43 //----------------------------------------------------------------------------
44 // ______ ______ __ __
45 // /\ __ \/\ ___\/\ "-./ \
46 // \ \ __ \ \___ \ \ \-./\ \
47 // \ \_\ \_\/\_____\ \_\ \ \_\
48 // \/_/\/_/\/_____/\/_/ \/_/
49 //
50 //----------------------------------------------------------------------------
51 static constexpr bool Is64Bit = true;
52 static constexpr bool HasPopa = false;
53 static constexpr bool HasPusha = false;
54 static constexpr bool UsesX87 = false;
55 static constexpr ::Ice::RegX8664::GPRRegister Last8BitGPR =
56 ::Ice::RegX8664::GPRRegister::Encoded_Reg_r15d;
57
58 enum ScaleFactor { TIMES_1 = 0, TIMES_2 = 1, TIMES_4 = 2, TIMES_8 = 3 };
59
60 using GPRRegister = ::Ice::RegX8664::GPRRegister;
61 using XmmRegister = ::Ice::RegX8664::XmmRegister;
62 using ByteRegister = ::Ice::RegX8664::ByteRegister;
63
64 using Cond = ::Ice::CondX8664;
65
66 using RegisterSet = ::Ice::RegX8664;
67 static const GPRRegister Encoded_Reg_Accumulator = RegX8664::Encoded_Reg_eax;
68 static const GPRRegister Encoded_Reg_Counter = RegX8664::Encoded_Reg_ecx;
69 static const FixupKind PcRelFixup = llvm::ELF::R_386_PC32; // TODO(jpp): ???
70
71 class Operand {
72 public:
73 enum RexBits {
74 RexNone = 0x00,
75 RexBase = 0x40,
76 RexW = RexBase | (1 << 3),
77 RexR = RexBase | (1 << 2),
78 RexX = RexBase | (1 << 1),
79 RexB = RexBase | (1 << 0),
80 };
81
82 Operand(const Operand &other)
83 : fixup_(other.fixup_), rex_(other.rex_), length_(other.length_) {
84 memmove(&encoding_[0], &other.encoding_[0], other.length_);
85 }
86
87 Operand &operator=(const Operand &other) {
88 length_ = other.length_;
89 fixup_ = other.fixup_;
90 rex_ = other.rex_;
91 memmove(&encoding_[0], &other.encoding_[0], other.length_);
92 return *this;
93 }
94
95 uint8_t mod() const { return (encoding_at(0) >> 6) & 3; }
96
97 uint8_t rexX() const { return (rex_ & RexX) != RexX ? RexNone : RexX; }
98 uint8_t rexB() const { return (rex_ & RexB) != RexB ? RexNone : RexB; }
99
100 GPRRegister rm() const {
101 return static_cast<GPRRegister>((rexB() != 0 ? 0x08 : 0) |
102 (encoding_at(0) & 7));
103 }
104
105 ScaleFactor scale() const {
106 return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
107 }
108
109 GPRRegister index() const {
110 return static_cast<GPRRegister>((rexX() != 0 ? 0x08 : 0) |
111 ((encoding_at(1) >> 3) & 7));
112 }
113
114 GPRRegister base() const {
115 return static_cast<GPRRegister>((rexB() != 0 ? 0x08 : 0) |
116 (encoding_at(1) & 7));
117 }
118
119 int8_t disp8() const {
120 assert(length_ >= 2);
121 return static_cast<int8_t>(encoding_[length_ - 1]);
122 }
123
124 int32_t disp32() const {
125 assert(length_ >= 5);
126 return bit_copy<int32_t>(encoding_[length_ - 4]);
127 }
128
129 AssemblerFixup *fixup() const { return fixup_; }
130
131 protected:
132 Operand() : fixup_(nullptr), length_(0) {} // Needed by subclass Address.
133
134 void SetModRM(int mod, GPRRegister rm) {
135 assert((mod & ~3) == 0);
136 encoding_[0] = (mod << 6) | (rm & 0x07);
137 rex_ = (rm & 0x08) ? RexB : RexNone;
138 length_ = 1;
139 }
140
141 void SetSIB(ScaleFactor scale, GPRRegister index, GPRRegister base) {
142 assert(length_ == 1);
143 assert((scale & ~3) == 0);
144 encoding_[1] = (scale << 6) | ((index & 0x07) << 3) | (base & 0x07);
145 rex_ =
146 ((base & 0x08) ? RexB : RexNone) | ((index & 0x08) ? RexX : RexNone);
147 length_ = 2;
148 }
149
150 void SetDisp8(int8_t disp) {
151 assert(length_ == 1 || length_ == 2);
152 encoding_[length_++] = static_cast<uint8_t>(disp);
153 }
154
155 void SetDisp32(int32_t disp) {
156 assert(length_ == 1 || length_ == 2);
157 intptr_t disp_size = sizeof(disp);
158 memmove(&encoding_[length_], &disp, disp_size);
159 length_ += disp_size;
160 }
161
162 void SetFixup(AssemblerFixup *fixup) { fixup_ = fixup; }
163
164 private:
165 AssemblerFixup *fixup_;
166 uint8_t rex_ = 0;
167 uint8_t encoding_[6];
168 uint8_t length_;
169
170 explicit Operand(GPRRegister reg) : fixup_(nullptr) { SetModRM(3, reg); }
171
172 /// Get the operand encoding byte at the given index.
173 uint8_t encoding_at(intptr_t index) const {
174 assert(index >= 0 && index < length_);
175 return encoding_[index];
176 }
177
178 /// Returns whether or not this operand is really the given register in
179 /// disguise. Used from the assembler to generate better encodings.
180 bool IsRegister(GPRRegister reg) const {
181 return ((encoding_[0] & 0xF8) ==
182 0xC0) // Addressing mode is register only.
183 &&
184 (rm() == reg); // Register codes match.
185 }
186
187 template <class> friend class AssemblerX86Base;
188 };
189
190 class Address : public Operand {
191 Address() = delete;
192
193 public:
194 Address(const Address &other) : Operand(other) {}
195
196 Address &operator=(const Address &other) {
197 Operand::operator=(other);
198 return *this;
199 }
200
201 Address(GPRRegister base, int32_t disp) {
202 if (disp == 0 && (base & 7) != RegX8664::Encoded_Reg_ebp) {
203 SetModRM(0, base);
204 if ((base & 7) == RegX8664::Encoded_Reg_esp)
205 SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, base);
206 } else if (Utils::IsInt(8, disp)) {
207 SetModRM(1, base);
208 if ((base & 7) == RegX8664::Encoded_Reg_esp)
209 SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, base);
210 SetDisp8(disp);
211 } else {
212 SetModRM(2, base);
213 if ((base & 7) == RegX8664::Encoded_Reg_esp)
214 SetSIB(TIMES_1, RegX8664::Encoded_Reg_esp, base);
215 SetDisp32(disp);
216 }
217 }
218
219 Address(GPRRegister index, ScaleFactor scale, int32_t disp) {
220 assert(index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
221 SetModRM(0, RegX8664::Encoded_Reg_esp);
222 SetSIB(scale, index, RegX8664::Encoded_Reg_ebp);
223 SetDisp32(disp);
224 }
225
226 Address(GPRRegister base, GPRRegister index, ScaleFactor scale,
227 int32_t disp) {
228 assert(index != RegX8664::Encoded_Reg_esp); // Illegal addressing mode.
229 if (disp == 0 && (base & 7) != RegX8664::Encoded_Reg_ebp) {
230 SetModRM(0, RegX8664::Encoded_Reg_esp);
231 SetSIB(scale, index, base);
232 } else if (Utils::IsInt(8, disp)) {
233 SetModRM(1, RegX8664::Encoded_Reg_esp);
234 SetSIB(scale, index, base);
235 SetDisp8(disp);
236 } else {
237 SetModRM(2, RegX8664::Encoded_Reg_esp);
238 SetSIB(scale, index, base);
239 SetDisp32(disp);
240 }
241 }
242
243 // PcRelTag is a special tag for requesting rip-relative addressing in
244 // X86-64.
245 // TODO(jpp): this is bogus. remove.
246 enum AbsoluteTag { ABSOLUTE };
247
248 Address(AbsoluteTag, const uintptr_t Addr) {
249 SetModRM(0, RegX8664::Encoded_Reg_ebp);
250 SetDisp32(Addr);
251 }
252
253 // TODO(jpp): remove this.
254 static Address Absolute(const uintptr_t Addr) {
255 return Address(ABSOLUTE, Addr);
256 }
257
258 Address(AbsoluteTag, RelocOffsetT Offset, AssemblerFixup *Fixup) {
259 SetModRM(0, RegX8664::Encoded_Reg_ebp);
260 // Use the Offset in the displacement for now. If we decide to process
261 // fixups later, we'll need to patch up the emitted displacement.
262 SetDisp32(Offset);
263 SetFixup(Fixup);
264 }
265
266 // TODO(jpp): remove this.
267 static Address Absolute(RelocOffsetT Offset, AssemblerFixup *Fixup) {
268 return Address(ABSOLUTE, Offset, Fixup);
269 }
270
271 static Address ofConstPool(Assembler *Asm, const Constant *Imm) {
272 // TODO(jpp): ???
273 AssemblerFixup *Fixup = Asm->createFixup(llvm::ELF::R_386_32, Imm);
274 const RelocOffsetT Offset = 0;
275 return Address(ABSOLUTE, Offset, Fixup);
276 }
277 };
278
279 //----------------------------------------------------------------------------
280 // __ ______ __ __ ______ ______ __ __ __ ______
281 // /\ \ /\ __ \/\ \ _ \ \/\ ___\/\ == \/\ \/\ "-.\ \/\ ___\
282 // \ \ \___\ \ \/\ \ \ \/ ".\ \ \ __\\ \ __<\ \ \ \ \-. \ \ \__ \
283 // \ \_____\ \_____\ \__/".~\_\ \_____\ \_\ \_\ \_\ \_\\"\_\ \_____\
284 // \/_____/\/_____/\/_/ \/_/\/_____/\/_/ /_/\/_/\/_/ \/_/\/_____/
285 //
286 //----------------------------------------------------------------------------
John Porto453660f2015-07-31 14:52:52 -0700287 enum InstructionSet {
288 Begin,
289 // SSE2 is the PNaCl baseline instruction set.
290 SSE2 = Begin,
291 SSE4_1,
292 End
293 };
294
295 static const char *TargetName;
296
297 static IceString getRegName(SizeT RegNum, Type Ty) {
298 assert(RegNum < RegisterSet::Reg_NUM);
299 static const struct {
300 const char *const Name8;
301 const char *const Name16;
302 const char *const Name /*32*/;
303 const char *const Name64;
304 } RegNames[] = {
305#define X(val, encode, name64, name32, name16, name8, scratch, preserved, \
306 stackptr, frameptr, isInt, isFP) \
307 { name8, name16, name32, name64 } \
308 ,
309 REGX8664_TABLE
310#undef X
311 };
312
313 switch (Ty) {
314 case IceType_i1:
315 case IceType_i8:
316 return RegNames[RegNum].Name8;
317 case IceType_i16:
318 return RegNames[RegNum].Name16;
319 case IceType_i64:
320 return RegNames[RegNum].Name64;
321 default:
322 return RegNames[RegNum].Name;
323 }
324 }
325
326 static void initRegisterSet(llvm::SmallBitVector *IntegerRegisters,
327 llvm::SmallBitVector *IntegerRegistersI8,
328 llvm::SmallBitVector *FloatRegisters,
329 llvm::SmallBitVector *VectorRegisters,
330 llvm::SmallBitVector *ScratchRegs) {
331#define X(val, encode, name64, name32, name16, name8, scratch, preserved, \
332 stackptr, frameptr, isInt, isFP) \
333 (*IntegerRegisters)[RegisterSet::val] = isInt; \
334 (*IntegerRegistersI8)[RegisterSet::val] = 1; \
335 (*FloatRegisters)[RegisterSet::val] = isFP; \
336 (*VectorRegisters)[RegisterSet::val] = isFP; \
337 (*ScratchRegs)[RegisterSet::val] = scratch;
338 REGX8664_TABLE;
339#undef X
340 }
341
342 static llvm::SmallBitVector
343 getRegisterSet(TargetLowering::RegSetMask Include,
344 TargetLowering::RegSetMask Exclude) {
345 llvm::SmallBitVector Registers(RegisterSet::Reg_NUM);
346
347#define X(val, encode, name64, name32, name16, name8, scratch, preserved, \
348 stackptr, frameptr, isInt, isFP) \
349 if (scratch && (Include & ::Ice::TargetLowering::RegSet_CallerSave)) \
350 Registers[RegisterSet::val] = true; \
351 if (preserved && (Include & ::Ice::TargetLowering::RegSet_CalleeSave)) \
352 Registers[RegisterSet::val] = true; \
353 if (stackptr && (Include & ::Ice::TargetLowering::RegSet_StackPointer)) \
354 Registers[RegisterSet::val] = true; \
355 if (frameptr && (Include & ::Ice::TargetLowering::RegSet_FramePointer)) \
356 Registers[RegisterSet::val] = true; \
357 if (scratch && (Exclude & ::Ice::TargetLowering::RegSet_CallerSave)) \
358 Registers[RegisterSet::val] = false; \
359 if (preserved && (Exclude & ::Ice::TargetLowering::RegSet_CalleeSave)) \
360 Registers[RegisterSet::val] = false; \
361 if (stackptr && (Exclude & ::Ice::TargetLowering::RegSet_StackPointer)) \
362 Registers[RegisterSet::val] = false; \
363 if (frameptr && (Exclude & ::Ice::TargetLowering::RegSet_FramePointer)) \
364 Registers[RegisterSet::val] = false;
365
366 REGX8664_TABLE
367
368#undef X
369
370 return Registers;
371 }
372
373 static void
374 makeRandomRegisterPermutation(GlobalContext *Ctx, Cfg *Func,
375 llvm::SmallVectorImpl<int32_t> &Permutation,
376 const llvm::SmallBitVector &ExcludeRegisters) {
377 // TODO(stichnot): Declaring Permutation this way loses type/size
378 // information. Fix this in conjunction with the caller-side TODO.
379 assert(Permutation.size() >= RegisterSet::Reg_NUM);
380 // Expected upper bound on the number of registers in a single equivalence
381 // class. For x86-64, this would comprise the 16 XMM registers. This is
382 // for performance, not correctness.
383 static const unsigned MaxEquivalenceClassSize = 8;
384 typedef llvm::SmallVector<int32_t, MaxEquivalenceClassSize> RegisterList;
385 typedef std::map<uint32_t, RegisterList> EquivalenceClassMap;
386 EquivalenceClassMap EquivalenceClasses;
387 SizeT NumShuffled = 0, NumPreserved = 0;
388
389// Build up the equivalence classes of registers by looking at the register
390// properties as well as whether the registers should be explicitly excluded
391// from shuffling.
392#define X(val, encode, name64, name32, name16, name8, scratch, preserved, \
393 stackptr, frameptr, isInt, isFP) \
394 if (ExcludeRegisters[RegisterSet::val]) { \
395 /* val stays the same in the resulting permutation. */ \
396 Permutation[RegisterSet::val] = RegisterSet::val; \
397 ++NumPreserved; \
398 } else { \
399 const uint32_t Index = (scratch << 0) | (preserved << 1) | \
400 (/*isI8=*/1 << 2) | (isInt << 3) | (isFP << 4); \
401 /* val is assigned to an equivalence class based on its properties. */ \
402 EquivalenceClasses[Index].push_back(RegisterSet::val); \
403 }
404 REGX8664_TABLE
405#undef X
406
407 RandomNumberGeneratorWrapper RNG(Ctx->getRNG());
408
409 // Shuffle the resulting equivalence classes.
410 for (auto I : EquivalenceClasses) {
411 const RegisterList &List = I.second;
412 RegisterList Shuffled(List);
413 RandomShuffle(Shuffled.begin(), Shuffled.end(), RNG);
414 for (size_t SI = 0, SE = Shuffled.size(); SI < SE; ++SI) {
415 Permutation[List[SI]] = Shuffled[SI];
416 ++NumShuffled;
417 }
418 }
419
420 assert(NumShuffled + NumPreserved == RegisterSet::Reg_NUM);
421
422 if (Func->isVerbose(IceV_Random)) {
423 OstreamLocker L(Func->getContext());
424 Ostream &Str = Func->getContext()->getStrDump();
425 Str << "Register equivalence classes:\n";
426 for (auto I : EquivalenceClasses) {
427 Str << "{";
428 const RegisterList &List = I.second;
429 bool First = true;
430 for (int32_t Register : List) {
431 if (!First)
432 Str << " ";
433 First = false;
434 Str << getRegName(Register, IceType_i32);
435 }
436 Str << "}\n";
437 }
438 }
439 }
440
441 /// The maximum number of arguments to pass in XMM registers
John Portoe0d9afa2015-08-05 10:13:44 -0700442 static const uint32_t X86_MAX_XMM_ARGS = 8;
443 /// The maximum number of arguments to pass in GPR registers
444 static const uint32_t X86_MAX_GPR_ARGS = 6;
John Porto453660f2015-07-31 14:52:52 -0700445 /// The number of bits in a byte
446 static const uint32_t X86_CHAR_BIT = 8;
447 /// Stack alignment. This is defined in IceTargetLoweringX8664.cpp because it
448 /// is used as an argument to std::max(), and the default std::less<T> has an
449 /// operator(T const&, T const&) which requires this member to have an
450 /// address.
451 static const uint32_t X86_STACK_ALIGNMENT_BYTES;
452 /// Size of the return address on the stack
453 static const uint32_t X86_RET_IP_SIZE_BYTES = 4;
454 /// The number of different NOP instructions
455 static const uint32_t X86_NUM_NOP_VARIANTS = 5;
456
457 /// Value is in bytes. Return Value adjusted to the next highest multiple
458 /// of the stack alignment.
459 static uint32_t applyStackAlignment(uint32_t Value) {
460 return Utils::applyAlignment(Value, X86_STACK_ALIGNMENT_BYTES);
461 }
462
463 /// Return the type which the elements of the vector have in the X86
464 /// representation of the vector.
465 static Type getInVectorElementType(Type Ty) {
466 assert(isVectorType(Ty));
467 size_t Index = static_cast<size_t>(Ty);
468 (void)Index;
469 assert(Index < TableTypeX8664AttributesSize);
470 return TableTypeX8664Attributes[Ty].InVectorElementType;
471 }
472
473 // Note: The following data structures are defined in
474 // IceTargetLoweringX8664.cpp.
475
476 /// The following table summarizes the logic for lowering the fcmp
477 /// instruction. There is one table entry for each of the 16 conditions.
478 ///
479 /// The first four columns describe the case when the operands are floating
480 /// point scalar values. A comment in lowerFcmp() describes the lowering
481 /// template. In the most general case, there is a compare followed by two
482 /// conditional branches, because some fcmp conditions don't map to a single
483 /// x86 conditional branch. However, in many cases it is possible to swap the
484 /// operands in the comparison and have a single conditional branch. Since
485 /// it's quite tedious to validate the table by hand, good execution tests are
486 /// helpful.
487 ///
488 /// The last two columns describe the case when the operands are vectors of
489 /// floating point values. For most fcmp conditions, there is a clear mapping
490 /// to a single x86 cmpps instruction variant. Some fcmp conditions require
491 /// special code to handle and these are marked in the table with a
492 /// Cmpps_Invalid predicate.
493 /// {@
494 static const struct TableFcmpType {
495 uint32_t Default;
496 bool SwapScalarOperands;
497 Cond::BrCond C1, C2;
498 bool SwapVectorOperands;
499 Cond::CmppsCond Predicate;
500 } TableFcmp[];
501 static const size_t TableFcmpSize;
502 /// @}
503
504 /// The following table summarizes the logic for lowering the icmp instruction
505 /// for i32 and narrower types. Each icmp condition has a clear mapping to an
506 /// x86 conditional branch instruction.
507 /// {@
508 static const struct TableIcmp32Type { Cond::BrCond Mapping; } TableIcmp32[];
509 static const size_t TableIcmp32Size;
510 /// @}
511
512 /// The following table summarizes the logic for lowering the icmp instruction
513 /// for the i64 type. For Eq and Ne, two separate 32-bit comparisons and
514 /// conditional branches are needed. For the other conditions, three separate
515 /// conditional branches are needed.
516 /// {@
517 static const struct TableIcmp64Type {
518 Cond::BrCond C1, C2, C3;
519 } TableIcmp64[];
520 static const size_t TableIcmp64Size;
521 /// @}
522
523 static Cond::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) {
524 size_t Index = static_cast<size_t>(Cond);
525 assert(Index < TableIcmp32Size);
526 return TableIcmp32[Index].Mapping;
527 }
528
529 static const struct TableTypeX8664AttributesType {
530 Type InVectorElementType;
531 } TableTypeX8664Attributes[];
532 static const size_t TableTypeX8664AttributesSize;
533
534 //----------------------------------------------------------------------------
535 // __ __ __ ______ ______
536 // /\ \/\ "-.\ \/\ ___\/\__ _\
537 // \ \ \ \ \-. \ \___ \/_/\ \/
538 // \ \_\ \_\\"\_\/\_____\ \ \_\
539 // \/_/\/_/ \/_/\/_____/ \/_/
540 //
541 //----------------------------------------------------------------------------
542 using Insts = ::Ice::X86Internal::Insts<TargetX8664>;
543
544 using TargetLowering = ::Ice::X86Internal::TargetX86Base<TargetX8664>;
John Porto2fea26c2015-07-28 16:28:07 -0700545 using Assembler = X8664::AssemblerX8664;
John Porto453660f2015-07-31 14:52:52 -0700546
547 /// X86Operand extends the Operand hierarchy. Its subclasses are
548 /// X86OperandMem and VariableSplit.
549 class X86Operand : public ::Ice::Operand {
550 X86Operand() = delete;
551 X86Operand(const X86Operand &) = delete;
552 X86Operand &operator=(const X86Operand &) = delete;
553
554 public:
555 enum OperandKindX8664 { k__Start = ::Ice::Operand::kTarget, kMem, kSplit };
556 using ::Ice::Operand::dump;
557
558 void dump(const Cfg *, Ostream &Str) const override;
559
560 protected:
561 X86Operand(OperandKindX8664 Kind, Type Ty)
562 : Operand(static_cast<::Ice::Operand::OperandKind>(Kind), Ty) {}
563 };
564
565 /// X86OperandMem represents the m64 addressing mode, with optional base and
566 /// index registers, a constant offset, and a fixed shift value for the index
567 /// register.
568 class X86OperandMem : public X86Operand {
569 X86OperandMem() = delete;
570 X86OperandMem(const X86OperandMem &) = delete;
571 X86OperandMem &operator=(const X86OperandMem &) = delete;
572
573 public:
574 enum SegmentRegisters { DefaultSegment = -1, SegReg_NUM };
575 static X86OperandMem *
576 create(Cfg *Func, Type Ty, Variable *Base, Constant *Offset,
577 Variable *Index = nullptr, uint16_t Shift = 0,
578 SegmentRegisters SegmentRegister = DefaultSegment) {
579 assert(SegmentRegister == DefaultSegment);
580 (void)SegmentRegister;
581 return new (Func->allocate<X86OperandMem>())
582 X86OperandMem(Func, Ty, Base, Offset, Index, Shift);
583 }
584 Variable *getBase() const { return Base; }
585 Constant *getOffset() const { return Offset; }
586 Variable *getIndex() const { return Index; }
587 uint16_t getShift() const { return Shift; }
588 SegmentRegisters getSegmentRegister() const { return DefaultSegment; }
589 void emitSegmentOverride(Assembler *) const {}
590 Address toAsmAddress(Assembler *Asm) const;
591
592 void emit(const Cfg *Func) const override;
593 using X86Operand::dump;
594 void dump(const Cfg *Func, Ostream &Str) const override;
595
596 static bool classof(const Operand *Operand) {
597 return Operand->getKind() == static_cast<OperandKind>(kMem);
598 }
599
600 void setRandomized(bool R) { Randomized = R; }
601
602 bool getRandomized() const { return Randomized; }
603
604 private:
605 X86OperandMem(Cfg *Func, Type Ty, Variable *Base, Constant *Offset,
606 Variable *Index, uint16_t Shift);
607
608 Variable *Base;
609 Constant *Offset;
610 Variable *Index;
611 uint16_t Shift;
612 /// A flag to show if this memory operand is a randomized one. Randomized
613 /// memory operands are generated in
614 /// TargetX86Base::randomizeOrPoolImmediate()
615 bool Randomized = false;
616 };
617
618 /// VariableSplit is a way to treat an f64 memory location as a pair of i32
619 /// locations (Low and High). This is needed for some cases of the Bitcast
620 /// instruction. Since it's not possible for integer registers to access the
621 /// XMM registers and vice versa, the lowering forces the f64 to be spilled to
622 /// the stack and then accesses through the VariableSplit.
623 // TODO(jpp): remove references to VariableSplit from IceInstX86Base as 64bit
624 // targets can natively handle these.
625 class VariableSplit : public X86Operand {
626 VariableSplit() = delete;
627 VariableSplit(const VariableSplit &) = delete;
628 VariableSplit &operator=(const VariableSplit &) = delete;
629
630 public:
631 enum Portion { Low, High };
632 static VariableSplit *create(Cfg *Func, Variable *Var, Portion Part) {
633 return new (Func->allocate<VariableSplit>())
634 VariableSplit(Func, Var, Part);
635 }
636 int32_t getOffset() const { return Part == High ? 4 : 0; }
637
638 Address toAsmAddress(const Cfg *Func) const;
639 void emit(const Cfg *Func) const override;
640 using X86Operand::dump;
641 void dump(const Cfg *Func, Ostream &Str) const override;
642
643 static bool classof(const Operand *Operand) {
644 return Operand->getKind() == static_cast<OperandKind>(kSplit);
645 }
646
647 private:
648 VariableSplit(Cfg *Func, Variable *Var, Portion Part)
649 : X86Operand(kSplit, IceType_i32), Var(Var), Part(Part) {
650 assert(Var->getType() == IceType_f64);
651 Vars = Func->allocateArrayOf<Variable *>(1);
652 Vars[0] = Var;
653 NumVars = 1;
654 }
655
656 Variable *Var;
657 Portion Part;
658 };
659
660 /// SpillVariable decorates a Variable by linking it to another Variable.
661 /// When stack frame offsets are computed, the SpillVariable is given a
662 /// distinct stack slot only if its linked Variable has a register. If the
663 /// linked Variable has a stack slot, then the Variable and SpillVariable
664 /// share that slot.
665 class SpillVariable : public Variable {
666 SpillVariable() = delete;
667 SpillVariable(const SpillVariable &) = delete;
668 SpillVariable &operator=(const SpillVariable &) = delete;
669
670 public:
671 static SpillVariable *create(Cfg *Func, Type Ty, SizeT Index) {
672 return new (Func->allocate<SpillVariable>()) SpillVariable(Ty, Index);
673 }
674 const static OperandKind SpillVariableKind =
675 static_cast<OperandKind>(kVariable_Target);
676 static bool classof(const Operand *Operand) {
677 return Operand->getKind() == SpillVariableKind;
678 }
679 void setLinkedTo(Variable *Var) { LinkedTo = Var; }
680 Variable *getLinkedTo() const { return LinkedTo; }
681 // Inherit dump() and emit() from Variable.
682
683 private:
684 SpillVariable(Type Ty, SizeT Index)
685 : Variable(SpillVariableKind, Ty, Index), LinkedTo(nullptr) {}
686 Variable *LinkedTo;
687 };
688
689 // Note: The following data structures are defined in IceInstX8664.cpp.
690
691 static const struct InstBrAttributesType {
692 Cond::BrCond Opposite;
693 const char *DisplayString;
694 const char *EmitString;
695 } InstBrAttributes[];
696
697 static const struct InstCmppsAttributesType {
698 const char *EmitString;
699 } InstCmppsAttributes[];
700
701 static const struct TypeAttributesType {
702 const char *CvtString; // i (integer), s (single FP), d (double FP)
703 const char *SdSsString; // ss, sd, or <blank>
704 const char *PackString; // b, w, d, or <blank>
705 const char *WidthString; // b, w, l, q, or <blank>
706 const char *FldString; // s, l, or <blank>
707 } TypeAttributes[];
John Porto2fea26c2015-07-28 16:28:07 -0700708};
709
710} // end of namespace X86Internal
711
712namespace X8664 {
713using Traits = ::Ice::X86Internal::MachineTraits<TargetX8664>;
714} // end of namespace X8664
715
716} // end of namespace Ice
717
718#endif // SUBZERO_SRC_ICETARGETLOWERINGX8664TRAITS_H