blob: f06150fc1b5dc9370997fe3ea5ad613e79224a85 [file] [log] [blame]
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -07001//===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===//
2//
3// The Subzero Code Generator
4//
John Porto5d0acff2015-06-30 15:29:21 -07005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -07008//===----------------------------------------------------------------------===//
Andrew Scull9612d322015-07-06 14:53:25 -07009///
10/// \file
11/// This file implements the TargetLoweringX8632 class, which
12/// consists almost entirely of the lowering sequence for each
13/// high-level instruction.
14///
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070015//===----------------------------------------------------------------------===//
16
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070017#include "IceTargetLoweringX8632.h"
John Porto7e93c622015-06-23 10:58:57 -070018
John Porto5d0acff2015-06-30 15:29:21 -070019#include "IceTargetLoweringX8632Traits.h"
John Porto7e93c622015-06-23 10:58:57 -070020#include "IceTargetLoweringX86Base.h"
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070021
22namespace Ice {
John Porto5d0acff2015-06-30 15:29:21 -070023
John Portoe0d9afa2015-08-05 10:13:44 -070024//------------------------------------------------------------------------------
25// ______ ______ ______ __ ______ ______
26// /\__ _\ /\ == \ /\ __ \ /\ \ /\__ _\ /\ ___\
27// \/_/\ \/ \ \ __< \ \ __ \ \ \ \ \/_/\ \/ \ \___ \
28// \ \_\ \ \_\ \_\ \ \_\ \_\ \ \_\ \ \_\ \/\_____\
29// \/_/ \/_/ /_/ \/_/\/_/ \/_/ \/_/ \/_____/
30//
31//------------------------------------------------------------------------------
John Porto7e93c622015-06-23 10:58:57 -070032namespace X86Internal {
John Porto7e93c622015-06-23 10:58:57 -070033const MachineTraits<TargetX8632>::TableFcmpType
34 MachineTraits<TargetX8632>::TableFcmp[] = {
Matt Walace0ca8f2014-07-24 12:34:20 -070035#define X(val, dflt, swapS, C1, C2, swapV, pred) \
John Porto5d0acff2015-06-30 15:29:21 -070036 { \
37 dflt, swapS, X8632::Traits::Cond::C1, X8632::Traits::Cond::C2, swapV, \
38 X8632::Traits::Cond::pred \
39 } \
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070040 ,
John Porto7e93c622015-06-23 10:58:57 -070041 FCMPX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070042#undef X
Jim Stichnothdd842db2015-01-27 12:53:53 -080043};
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070044
John Porto36087cd2015-06-24 16:16:13 -070045const size_t MachineTraits<TargetX8632>::TableFcmpSize =
John Porto7e93c622015-06-23 10:58:57 -070046 llvm::array_lengthof(TableFcmp);
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070047
John Porto7e93c622015-06-23 10:58:57 -070048const MachineTraits<TargetX8632>::TableIcmp32Type
49 MachineTraits<TargetX8632>::TableIcmp32[] = {
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070050#define X(val, C_32, C1_64, C2_64, C3_64) \
John Porto5d0acff2015-06-30 15:29:21 -070051 { X8632::Traits::Cond::C_32 } \
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070052 ,
John Porto7e93c622015-06-23 10:58:57 -070053 ICMPX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070054#undef X
Jim Stichnothdd842db2015-01-27 12:53:53 -080055};
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070056
John Porto36087cd2015-06-24 16:16:13 -070057const size_t MachineTraits<TargetX8632>::TableIcmp32Size =
John Porto7e93c622015-06-23 10:58:57 -070058 llvm::array_lengthof(TableIcmp32);
59
60const MachineTraits<TargetX8632>::TableIcmp64Type
61 MachineTraits<TargetX8632>::TableIcmp64[] = {
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070062#define X(val, C_32, C1_64, C2_64, C3_64) \
John Porto5d0acff2015-06-30 15:29:21 -070063 { \
64 X8632::Traits::Cond::C1_64, X8632::Traits::Cond::C2_64, \
65 X8632::Traits::Cond::C3_64 \
66 } \
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070067 ,
John Porto7e93c622015-06-23 10:58:57 -070068 ICMPX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070069#undef X
Jim Stichnothdd842db2015-01-27 12:53:53 -080070};
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070071
John Porto36087cd2015-06-24 16:16:13 -070072const size_t MachineTraits<TargetX8632>::TableIcmp64Size =
John Porto7e93c622015-06-23 10:58:57 -070073 llvm::array_lengthof(TableIcmp64);
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -070074
John Porto7e93c622015-06-23 10:58:57 -070075const MachineTraits<TargetX8632>::TableTypeX8632AttributesType
76 MachineTraits<TargetX8632>::TableTypeX8632Attributes[] = {
Jim Stichnothbca2f652014-11-01 10:13:54 -070077#define X(tag, elementty, cvt, sdss, pack, width, fld) \
Matt Wala49889232014-07-18 12:45:09 -070078 { elementty } \
79 ,
John Porto7e93c622015-06-23 10:58:57 -070080 ICETYPEX8632_TABLE
Matt Wala49889232014-07-18 12:45:09 -070081#undef X
Jim Stichnothdd842db2015-01-27 12:53:53 -080082};
John Porto7e93c622015-06-23 10:58:57 -070083
John Porto36087cd2015-06-24 16:16:13 -070084const size_t MachineTraits<TargetX8632>::TableTypeX8632AttributesSize =
Matt Wala49889232014-07-18 12:45:09 -070085 llvm::array_lengthof(TableTypeX8632Attributes);
86
John Porto7e93c622015-06-23 10:58:57 -070087const uint32_t MachineTraits<TargetX8632>::X86_STACK_ALIGNMENT_BYTES = 16;
John Porto921856d2015-07-07 11:56:26 -070088const char *MachineTraits<TargetX8632>::TargetName = "X8632";
John Porto5d0acff2015-06-30 15:29:21 -070089
John Porto7e93c622015-06-23 10:58:57 -070090} // end of namespace X86Internal
91
John Porto729b5f62015-08-06 07:44:30 -070092//------------------------------------------------------------------------------
93// __ ______ __ __ ______ ______ __ __ __ ______
94// /\ \ /\ __ \/\ \ _ \ \/\ ___\/\ == \/\ \/\ "-.\ \/\ ___\
95// \ \ \___\ \ \/\ \ \ \/ ".\ \ \ __\\ \ __<\ \ \ \ \-. \ \ \__ \
96// \ \_____\ \_____\ \__/".~\_\ \_____\ \_\ \_\ \_\ \_\\"\_\ \_____\
97// \/_____/\/_____/\/_/ \/_/\/_____/\/_/ /_/\/_/\/_/ \/_/\/_____/
98//
99//------------------------------------------------------------------------------
100void TargetX8632::lowerCall(const InstCall *Instr) {
101 // x86-32 calling convention:
102 //
103 // * At the point before the call, the stack must be aligned to 16
104 // bytes.
105 //
106 // * The first four arguments of vector type, regardless of their
107 // position relative to the other arguments in the argument list, are
108 // placed in registers xmm0 - xmm3.
109 //
110 // * Other arguments are pushed onto the stack in right-to-left order,
111 // such that the left-most argument ends up on the top of the stack at
112 // the lowest memory address.
113 //
114 // * Stack arguments of vector type are aligned to start at the next
115 // highest multiple of 16 bytes. Other stack arguments are aligned to
116 // 4 bytes.
117 //
118 // This intends to match the section "IA-32 Function Calling
119 // Convention" of the document "OS X ABI Function Call Guide" by
120 // Apple.
121 NeedsStackAlignment = true;
122
Andrew Scull8072bae2015-09-14 16:01:26 -0700123 using OperandList = std::vector<Operand *>;
John Porto729b5f62015-08-06 07:44:30 -0700124 OperandList XmmArgs;
125 OperandList StackArgs, StackArgLocations;
126 uint32_t ParameterAreaSizeBytes = 0;
127
128 // Classify each argument operand according to the location where the
129 // argument is passed.
130 for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) {
131 Operand *Arg = Instr->getArg(i);
132 Type Ty = Arg->getType();
133 // The PNaCl ABI requires the width of arguments to be at least 32 bits.
134 assert(typeWidthInBytes(Ty) >= 4);
135 if (isVectorType(Ty) && XmmArgs.size() < Traits::X86_MAX_XMM_ARGS) {
136 XmmArgs.push_back(Arg);
137 } else {
138 StackArgs.push_back(Arg);
139 if (isVectorType(Arg->getType())) {
140 ParameterAreaSizeBytes =
141 Traits::applyStackAlignment(ParameterAreaSizeBytes);
142 }
143 Variable *esp =
144 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
145 Constant *Loc = Ctx->getConstantInt32(ParameterAreaSizeBytes);
146 StackArgLocations.push_back(
147 Traits::X86OperandMem::create(Func, Ty, esp, Loc));
148 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Arg->getType());
149 }
150 }
151
152 // Adjust the parameter area so that the stack is aligned. It is
153 // assumed that the stack is already aligned at the start of the
154 // calling sequence.
155 ParameterAreaSizeBytes = Traits::applyStackAlignment(ParameterAreaSizeBytes);
156
157 // Subtract the appropriate amount for the argument area. This also
158 // takes care of setting the stack adjustment during emission.
159 //
160 // TODO: If for some reason the call instruction gets dead-code
161 // eliminated after lowering, we would need to ensure that the
162 // pre-call and the post-call esp adjustment get eliminated as well.
163 if (ParameterAreaSizeBytes) {
164 _adjust_stack(ParameterAreaSizeBytes);
165 }
166
167 // Copy arguments that are passed on the stack to the appropriate
168 // stack locations.
169 for (SizeT i = 0, e = StackArgs.size(); i < e; ++i) {
170 lowerStore(InstStore::create(Func, StackArgs[i], StackArgLocations[i]));
171 }
172
173 // Copy arguments to be passed in registers to the appropriate
174 // registers.
175 // TODO: Investigate the impact of lowering arguments passed in
176 // registers after lowering stack arguments as opposed to the other
177 // way around. Lowering register arguments after stack arguments may
178 // reduce register pressure. On the other hand, lowering register
179 // arguments first (before stack arguments) may result in more compact
180 // code, as the memory operand displacements may end up being smaller
181 // before any stack adjustment is done.
182 for (SizeT i = 0, NumXmmArgs = XmmArgs.size(); i < NumXmmArgs; ++i) {
183 Variable *Reg =
184 legalizeToReg(XmmArgs[i], Traits::RegisterSet::Reg_xmm0 + i);
185 // Generate a FakeUse of register arguments so that they do not get
186 // dead code eliminated as a result of the FakeKill of scratch
187 // registers after the call.
188 Context.insert(InstFakeUse::create(Func, Reg));
189 }
190 // Generate the call instruction. Assign its result to a temporary
191 // with high register allocation weight.
192 Variable *Dest = Instr->getDest();
193 // ReturnReg doubles as ReturnRegLo as necessary.
194 Variable *ReturnReg = nullptr;
195 Variable *ReturnRegHi = nullptr;
196 if (Dest) {
197 switch (Dest->getType()) {
198 case IceType_NUM:
199 case IceType_void:
200 llvm::report_fatal_error("Invalid Call dest type");
201 break;
202 case IceType_i1:
203 case IceType_i8:
204 case IceType_i16:
205 case IceType_i32:
206 ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_eax);
207 break;
208 case IceType_i64:
209 ReturnReg = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
210 ReturnRegHi = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
211 break;
212 case IceType_f32:
213 case IceType_f64:
214 // Leave ReturnReg==ReturnRegHi==nullptr, and capture the result with
215 // the fstp instruction.
216 break;
217 case IceType_v4i1:
218 case IceType_v8i1:
219 case IceType_v16i1:
220 case IceType_v16i8:
221 case IceType_v8i16:
222 case IceType_v4i32:
223 case IceType_v4f32:
224 ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_xmm0);
225 break;
226 }
227 }
228 Operand *CallTarget = legalize(Instr->getCallTarget());
229 const bool NeedSandboxing = Ctx->getFlags().getUseSandboxing();
230 if (NeedSandboxing) {
231 if (llvm::isa<Constant>(CallTarget)) {
232 _bundle_lock(InstBundleLock::Opt_AlignToEnd);
233 } else {
234 Variable *CallTargetVar = nullptr;
235 _mov(CallTargetVar, CallTarget);
236 _bundle_lock(InstBundleLock::Opt_AlignToEnd);
237 const SizeT BundleSize =
238 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
239 _and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1)));
240 CallTarget = CallTargetVar;
241 }
242 }
243 Inst *NewCall = Traits::Insts::Call::create(Func, ReturnReg, CallTarget);
244 Context.insert(NewCall);
245 if (NeedSandboxing)
246 _bundle_unlock();
247 if (ReturnRegHi)
248 Context.insert(InstFakeDef::create(Func, ReturnRegHi));
249
250 // Add the appropriate offset to esp. The call instruction takes care
251 // of resetting the stack offset during emission.
252 if (ParameterAreaSizeBytes) {
253 Variable *esp =
254 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
255 _add(esp, Ctx->getConstantInt32(ParameterAreaSizeBytes));
256 }
257
258 // Insert a register-kill pseudo instruction.
259 Context.insert(InstFakeKill::create(Func, NewCall));
260
261 // Generate a FakeUse to keep the call live if necessary.
262 if (Instr->hasSideEffects() && ReturnReg) {
263 Inst *FakeUse = InstFakeUse::create(Func, ReturnReg);
264 Context.insert(FakeUse);
265 }
266
267 if (!Dest)
268 return;
269
270 // Assign the result of the call to Dest.
271 if (ReturnReg) {
272 if (ReturnRegHi) {
273 assert(Dest->getType() == IceType_i64);
274 split64(Dest);
275 Variable *DestLo = Dest->getLo();
276 Variable *DestHi = Dest->getHi();
277 _mov(DestLo, ReturnReg);
278 _mov(DestHi, ReturnRegHi);
279 } else {
280 assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i16 ||
281 Dest->getType() == IceType_i8 || Dest->getType() == IceType_i1 ||
282 isVectorType(Dest->getType()));
283 if (isVectorType(Dest->getType())) {
284 _movp(Dest, ReturnReg);
285 } else {
286 _mov(Dest, ReturnReg);
287 }
288 }
289 } else if (isScalarFloatingType(Dest->getType())) {
290 // Special treatment for an FP function which returns its result in
291 // st(0).
292 // If Dest ends up being a physical xmm register, the fstp emit code
293 // will route st(0) through a temporary stack slot.
294 _fstp(Dest);
295 // Create a fake use of Dest in case it actually isn't used,
296 // because st(0) still needs to be popped.
297 Context.insert(InstFakeUse::create(Func, Dest));
298 }
299}
300
301void TargetX8632::lowerArguments() {
302 VarList &Args = Func->getArgs();
303 // The first four arguments of vector type, regardless of their
304 // position relative to the other arguments in the argument list, are
305 // passed in registers xmm0 - xmm3.
306 unsigned NumXmmArgs = 0;
307
308 Context.init(Func->getEntryNode());
309 Context.setInsertPoint(Context.getCur());
310
311 for (SizeT I = 0, E = Args.size();
312 I < E && NumXmmArgs < Traits::X86_MAX_XMM_ARGS; ++I) {
313 Variable *Arg = Args[I];
314 Type Ty = Arg->getType();
315 if (!isVectorType(Ty))
316 continue;
317 // Replace Arg in the argument list with the home register. Then
318 // generate an instruction in the prolog to copy the home register
319 // to the assigned location of Arg.
320 int32_t RegNum = Traits::RegisterSet::Reg_xmm0 + NumXmmArgs;
321 ++NumXmmArgs;
322 Variable *RegisterArg = Func->makeVariable(Ty);
323 if (BuildDefs::dump())
324 RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func));
325 RegisterArg->setRegNum(RegNum);
326 RegisterArg->setIsArg();
327 Arg->setIsArg(false);
328
329 Args[I] = RegisterArg;
330 Context.insert(InstAssign::create(Func, Arg, RegisterArg));
331 }
332}
333
334void TargetX8632::lowerRet(const InstRet *Inst) {
335 Variable *Reg = nullptr;
336 if (Inst->hasRetValue()) {
337 Operand *Src0 = legalize(Inst->getRetValue());
338 // TODO(jpp): this is not needed.
339 if (Src0->getType() == IceType_i64) {
340 Variable *eax =
341 legalizeToReg(loOperand(Src0), Traits::RegisterSet::Reg_eax);
342 Variable *edx =
343 legalizeToReg(hiOperand(Src0), Traits::RegisterSet::Reg_edx);
344 Reg = eax;
345 Context.insert(InstFakeUse::create(Func, edx));
346 } else if (isScalarFloatingType(Src0->getType())) {
347 _fld(Src0);
348 } else if (isVectorType(Src0->getType())) {
349 Reg = legalizeToReg(Src0, Traits::RegisterSet::Reg_xmm0);
350 } else {
351 _mov(Reg, Src0, Traits::RegisterSet::Reg_eax);
352 }
353 }
354 // Add a ret instruction even if sandboxing is enabled, because
355 // addEpilog explicitly looks for a ret instruction as a marker for
356 // where to insert the frame removal instructions.
357 _ret(Reg);
358 // Add a fake use of esp to make sure esp stays alive for the entire
359 // function. Otherwise post-call esp adjustments get dead-code
360 // eliminated. TODO: Are there more places where the fake use
361 // should be inserted? E.g. "void f(int n){while(1) g(n);}" may not
362 // have a ret instruction.
363 Variable *esp =
364 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
365 Context.insert(InstFakeUse::create(Func, esp));
366}
367
368void TargetX8632::addProlog(CfgNode *Node) {
369 // Stack frame layout:
370 //
371 // +------------------------+
372 // | 1. return address |
373 // +------------------------+
374 // | 2. preserved registers |
375 // +------------------------+
376 // | 3. padding |
377 // +------------------------+
378 // | 4. global spill area |
379 // +------------------------+
380 // | 5. padding |
381 // +------------------------+
382 // | 6. local spill area |
383 // +------------------------+
384 // | 7. padding |
385 // +------------------------+
386 // | 8. allocas |
387 // +------------------------+
388 //
389 // The following variables record the size in bytes of the given areas:
390 // * X86_RET_IP_SIZE_BYTES: area 1
391 // * PreservedRegsSizeBytes: area 2
392 // * SpillAreaPaddingBytes: area 3
393 // * GlobalsSize: area 4
394 // * GlobalsAndSubsequentPaddingSize: areas 4 - 5
395 // * LocalsSpillAreaSize: area 6
396 // * SpillAreaSizeBytes: areas 3 - 7
397
398 // Determine stack frame offsets for each Variable without a
399 // register assignment. This can be done as one variable per stack
400 // slot. Or, do coalescing by running the register allocator again
401 // with an infinite set of registers (as a side effect, this gives
402 // variables a second chance at physical register assignment).
403 //
404 // A middle ground approach is to leverage sparsity and allocate one
405 // block of space on the frame for globals (variables with
406 // multi-block lifetime), and one block to share for locals
407 // (single-block lifetime).
408
409 Context.init(Node);
410 Context.setInsertPoint(Context.getCur());
411
412 llvm::SmallBitVector CalleeSaves =
413 getRegisterSet(RegSet_CalleeSave, RegSet_None);
414 RegsUsed = llvm::SmallBitVector(CalleeSaves.size());
415 VarList SortedSpilledVariables, VariablesLinkedToSpillSlots;
416 size_t GlobalsSize = 0;
417 // If there is a separate locals area, this represents that area.
418 // Otherwise it counts any variable not counted by GlobalsSize.
419 SpillAreaSizeBytes = 0;
420 // If there is a separate locals area, this specifies the alignment
421 // for it.
422 uint32_t LocalsSlotsAlignmentBytes = 0;
423 // The entire spill locations area gets aligned to largest natural
424 // alignment of the variables that have a spill slot.
425 uint32_t SpillAreaAlignmentBytes = 0;
426 // A spill slot linked to a variable with a stack slot should reuse
427 // that stack slot.
428 std::function<bool(Variable *)> TargetVarHook =
429 [&VariablesLinkedToSpillSlots](Variable *Var) {
430 if (auto *SpillVar =
431 llvm::dyn_cast<typename Traits::SpillVariable>(Var)) {
Andrew Scull11c9a322015-08-28 14:24:14 -0700432 assert(Var->mustNotHaveReg());
John Porto729b5f62015-08-06 07:44:30 -0700433 if (SpillVar->getLinkedTo() && !SpillVar->getLinkedTo()->hasReg()) {
434 VariablesLinkedToSpillSlots.push_back(Var);
435 return true;
436 }
437 }
438 return false;
439 };
440
441 // Compute the list of spilled variables and bounds for GlobalsSize, etc.
442 getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize,
443 &SpillAreaSizeBytes, &SpillAreaAlignmentBytes,
444 &LocalsSlotsAlignmentBytes, TargetVarHook);
445 uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes;
446 SpillAreaSizeBytes += GlobalsSize;
447
448 // Add push instructions for preserved registers.
449 uint32_t NumCallee = 0;
450 size_t PreservedRegsSizeBytes = 0;
451 for (SizeT i = 0; i < CalleeSaves.size(); ++i) {
452 if (CalleeSaves[i] && RegsUsed[i]) {
453 ++NumCallee;
454 PreservedRegsSizeBytes += typeWidthInBytes(IceType_i32);
455 _push(getPhysicalRegister(i));
456 }
457 }
458 Ctx->statsUpdateRegistersSaved(NumCallee);
459
460 // Generate "push ebp; mov ebp, esp"
461 if (IsEbpBasedFrame) {
462 assert((RegsUsed & getRegisterSet(RegSet_FramePointer, RegSet_None))
463 .count() == 0);
464 PreservedRegsSizeBytes += typeWidthInBytes(IceType_i32);
465 Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp);
466 Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
467 _push(ebp);
468 _mov(ebp, esp);
469 // Keep ebp live for late-stage liveness analysis
470 // (e.g. asm-verbose mode).
471 Context.insert(InstFakeUse::create(Func, ebp));
472 }
473
474 // Align the variables area. SpillAreaPaddingBytes is the size of
475 // the region after the preserved registers and before the spill areas.
476 // LocalsSlotsPaddingBytes is the amount of padding between the globals
477 // and locals area if they are separate.
478 assert(SpillAreaAlignmentBytes <= Traits::X86_STACK_ALIGNMENT_BYTES);
479 assert(LocalsSlotsAlignmentBytes <= SpillAreaAlignmentBytes);
480 uint32_t SpillAreaPaddingBytes = 0;
481 uint32_t LocalsSlotsPaddingBytes = 0;
482 alignStackSpillAreas(Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes,
483 SpillAreaAlignmentBytes, GlobalsSize,
484 LocalsSlotsAlignmentBytes, &SpillAreaPaddingBytes,
485 &LocalsSlotsPaddingBytes);
486 SpillAreaSizeBytes += SpillAreaPaddingBytes + LocalsSlotsPaddingBytes;
487 uint32_t GlobalsAndSubsequentPaddingSize =
488 GlobalsSize + LocalsSlotsPaddingBytes;
489
490 // Align esp if necessary.
491 if (NeedsStackAlignment) {
492 uint32_t StackOffset =
493 Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes;
494 uint32_t StackSize =
495 Traits::applyStackAlignment(StackOffset + SpillAreaSizeBytes);
496 SpillAreaSizeBytes = StackSize - StackOffset;
497 }
498
499 // Generate "sub esp, SpillAreaSizeBytes"
500 if (SpillAreaSizeBytes)
501 _sub(getPhysicalRegister(Traits::RegisterSet::Reg_esp),
502 Ctx->getConstantInt32(SpillAreaSizeBytes));
503 Ctx->statsUpdateFrameBytes(SpillAreaSizeBytes);
504
505 resetStackAdjustment();
506
507 // Fill in stack offsets for stack args, and copy args into registers
508 // for those that were register-allocated. Args are pushed right to
509 // left, so Arg[0] is closest to the stack/frame pointer.
510 Variable *FramePtr = getPhysicalRegister(getFrameOrStackReg());
511 size_t BasicFrameOffset =
512 PreservedRegsSizeBytes + Traits::X86_RET_IP_SIZE_BYTES;
513 if (!IsEbpBasedFrame)
514 BasicFrameOffset += SpillAreaSizeBytes;
515
516 const VarList &Args = Func->getArgs();
517 size_t InArgsSizeBytes = 0;
518 unsigned NumXmmArgs = 0;
519 for (Variable *Arg : Args) {
520 // Skip arguments passed in registers.
521 if (isVectorType(Arg->getType()) && NumXmmArgs < Traits::X86_MAX_XMM_ARGS) {
522 ++NumXmmArgs;
523 continue;
524 }
525 finishArgumentLowering(Arg, FramePtr, BasicFrameOffset, InArgsSizeBytes);
526 }
527
528 // Fill in stack offsets for locals.
529 assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes,
530 SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize,
531 IsEbpBasedFrame);
532 // Assign stack offsets to variables that have been linked to spilled
533 // variables.
534 for (Variable *Var : VariablesLinkedToSpillSlots) {
535 Variable *Linked =
536 (llvm::cast<typename Traits::SpillVariable>(Var))->getLinkedTo();
537 Var->setStackOffset(Linked->getStackOffset());
538 }
539 this->HasComputedFrame = true;
540
541 if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) {
542 OstreamLocker L(Func->getContext());
543 Ostream &Str = Func->getContext()->getStrDump();
544
545 Str << "Stack layout:\n";
546 uint32_t EspAdjustmentPaddingSize =
547 SpillAreaSizeBytes - LocalsSpillAreaSize -
548 GlobalsAndSubsequentPaddingSize - SpillAreaPaddingBytes;
549 Str << " in-args = " << InArgsSizeBytes << " bytes\n"
550 << " return address = " << Traits::X86_RET_IP_SIZE_BYTES << " bytes\n"
551 << " preserved registers = " << PreservedRegsSizeBytes << " bytes\n"
552 << " spill area padding = " << SpillAreaPaddingBytes << " bytes\n"
553 << " globals spill area = " << GlobalsSize << " bytes\n"
554 << " globals-locals spill areas intermediate padding = "
555 << GlobalsAndSubsequentPaddingSize - GlobalsSize << " bytes\n"
556 << " locals spill area = " << LocalsSpillAreaSize << " bytes\n"
557 << " esp alignment padding = " << EspAdjustmentPaddingSize
558 << " bytes\n";
559
560 Str << "Stack details:\n"
561 << " esp adjustment = " << SpillAreaSizeBytes << " bytes\n"
562 << " spill area alignment = " << SpillAreaAlignmentBytes << " bytes\n"
563 << " locals spill area alignment = " << LocalsSlotsAlignmentBytes
564 << " bytes\n"
565 << " is ebp based = " << IsEbpBasedFrame << "\n";
566 }
567}
568
569void TargetX8632::addEpilog(CfgNode *Node) {
570 InstList &Insts = Node->getInsts();
571 InstList::reverse_iterator RI, E;
572 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) {
573 if (llvm::isa<typename Traits::Insts::Ret>(*RI))
574 break;
575 }
576 if (RI == E)
577 return;
578
579 // Convert the reverse_iterator position into its corresponding
580 // (forward) iterator position.
581 InstList::iterator InsertPoint = RI.base();
582 --InsertPoint;
583 Context.init(Node);
584 Context.setInsertPoint(InsertPoint);
585
586 Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
587 if (IsEbpBasedFrame) {
588 Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp);
589 // For late-stage liveness analysis (e.g. asm-verbose mode),
590 // adding a fake use of esp before the assignment of esp=ebp keeps
591 // previous esp adjustments from being dead-code eliminated.
592 Context.insert(InstFakeUse::create(Func, esp));
593 _mov(esp, ebp);
594 _pop(ebp);
595 } else {
596 // add esp, SpillAreaSizeBytes
597 if (SpillAreaSizeBytes)
598 _add(esp, Ctx->getConstantInt32(SpillAreaSizeBytes));
599 }
600
601 // Add pop instructions for preserved registers.
602 llvm::SmallBitVector CalleeSaves =
603 getRegisterSet(RegSet_CalleeSave, RegSet_None);
604 for (SizeT i = 0; i < CalleeSaves.size(); ++i) {
605 SizeT j = CalleeSaves.size() - i - 1;
606 if (j == Traits::RegisterSet::Reg_ebp && IsEbpBasedFrame)
607 continue;
608 if (CalleeSaves[j] && RegsUsed[j]) {
609 _pop(getPhysicalRegister(j));
610 }
611 }
612
613 if (!Ctx->getFlags().getUseSandboxing())
614 return;
615 // Change the original ret instruction into a sandboxed return sequence.
616 // t:ecx = pop
617 // bundle_lock
618 // and t, ~31
619 // jmp *t
620 // bundle_unlock
621 // FakeUse <original_ret_operand>
622 Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx);
623 _pop(T_ecx);
624 lowerIndirectJump(T_ecx);
625 if (RI->getSrcSize()) {
626 Variable *RetValue = llvm::cast<Variable>(RI->getSrc(0));
627 Context.insert(InstFakeUse::create(Func, RetValue));
628 }
629 RI->setDeleted();
630}
631
632void TargetX8632::emitJumpTable(const Cfg *Func,
633 const InstJumpTable *JumpTable) const {
634 if (!BuildDefs::dump())
635 return;
636 Ostream &Str = Ctx->getStrEmit();
637 IceString MangledName = Ctx->mangleName(Func->getFunctionName());
638 Str << "\t.section\t.rodata." << MangledName
639 << "$jumptable,\"a\",@progbits\n";
640 Str << "\t.align\t" << typeWidthInBytes(getPointerType()) << "\n";
641 Str << InstJumpTable::makeName(MangledName, JumpTable->getId()) << ":";
642
643 // On X8632 pointers are 32-bit hence the use of .long
644 for (SizeT I = 0; I < JumpTable->getNumTargets(); ++I)
645 Str << "\n\t.long\t" << JumpTable->getTarget(I)->getAsmName();
646 Str << "\n";
647}
648
John Porto7e93c622015-06-23 10:58:57 -0700649TargetDataX8632::TargetDataX8632(GlobalContext *Ctx)
650 : TargetDataLowering(Ctx) {}
Matt Wala105b7042014-08-11 19:56:19 -0700651
John Porto7e93c622015-06-23 10:58:57 -0700652namespace {
653template <typename T> struct PoolTypeConverter {};
654
655template <> struct PoolTypeConverter<float> {
Andrew Scull8072bae2015-09-14 16:01:26 -0700656 using PrimitiveIntType = uint32_t;
657 using IceType = ConstantFloat;
John Porto7e93c622015-06-23 10:58:57 -0700658 static const Type Ty = IceType_f32;
659 static const char *TypeName;
660 static const char *AsmTag;
661 static const char *PrintfString;
662};
663const char *PoolTypeConverter<float>::TypeName = "float";
664const char *PoolTypeConverter<float>::AsmTag = ".long";
665const char *PoolTypeConverter<float>::PrintfString = "0x%x";
666
667template <> struct PoolTypeConverter<double> {
Andrew Scull8072bae2015-09-14 16:01:26 -0700668 using PrimitiveIntType = uint64_t;
669 using IceType = ConstantDouble;
John Porto7e93c622015-06-23 10:58:57 -0700670 static const Type Ty = IceType_f64;
671 static const char *TypeName;
672 static const char *AsmTag;
673 static const char *PrintfString;
674};
675const char *PoolTypeConverter<double>::TypeName = "double";
676const char *PoolTypeConverter<double>::AsmTag = ".quad";
677const char *PoolTypeConverter<double>::PrintfString = "0x%llx";
678
679// Add converter for int type constant pooling
680template <> struct PoolTypeConverter<uint32_t> {
Andrew Scull8072bae2015-09-14 16:01:26 -0700681 using PrimitiveIntType = uint32_t;
682 using IceType = ConstantInteger32;
John Porto7e93c622015-06-23 10:58:57 -0700683 static const Type Ty = IceType_i32;
684 static const char *TypeName;
685 static const char *AsmTag;
686 static const char *PrintfString;
687};
688const char *PoolTypeConverter<uint32_t>::TypeName = "i32";
689const char *PoolTypeConverter<uint32_t>::AsmTag = ".long";
690const char *PoolTypeConverter<uint32_t>::PrintfString = "0x%x";
691
692// Add converter for int type constant pooling
693template <> struct PoolTypeConverter<uint16_t> {
Andrew Scull8072bae2015-09-14 16:01:26 -0700694 using PrimitiveIntType = uint32_t;
695 using IceType = ConstantInteger32;
John Porto7e93c622015-06-23 10:58:57 -0700696 static const Type Ty = IceType_i16;
697 static const char *TypeName;
698 static const char *AsmTag;
699 static const char *PrintfString;
700};
701const char *PoolTypeConverter<uint16_t>::TypeName = "i16";
702const char *PoolTypeConverter<uint16_t>::AsmTag = ".short";
703const char *PoolTypeConverter<uint16_t>::PrintfString = "0x%x";
704
705// Add converter for int type constant pooling
706template <> struct PoolTypeConverter<uint8_t> {
Andrew Scull8072bae2015-09-14 16:01:26 -0700707 using PrimitiveIntType = uint32_t;
708 using IceType = ConstantInteger32;
John Porto7e93c622015-06-23 10:58:57 -0700709 static const Type Ty = IceType_i8;
710 static const char *TypeName;
711 static const char *AsmTag;
712 static const char *PrintfString;
713};
714const char *PoolTypeConverter<uint8_t>::TypeName = "i8";
715const char *PoolTypeConverter<uint8_t>::AsmTag = ".byte";
716const char *PoolTypeConverter<uint8_t>::PrintfString = "0x%x";
717} // end of anonymous namespace
718
719template <typename T>
720void TargetDataX8632::emitConstantPool(GlobalContext *Ctx) {
Jim Stichnoth20b71f52015-06-24 15:52:24 -0700721 if (!BuildDefs::dump())
John Porto7e93c622015-06-23 10:58:57 -0700722 return;
723 Ostream &Str = Ctx->getStrEmit();
724 Type Ty = T::Ty;
725 SizeT Align = typeAlignInBytes(Ty);
726 ConstantList Pool = Ctx->getConstantPool(Ty);
727
728 Str << "\t.section\t.rodata.cst" << Align << ",\"aM\",@progbits," << Align
729 << "\n";
730 Str << "\t.align\t" << Align << "\n";
Qining Lu7cd53512015-06-26 09:36:00 -0700731
732 // If reorder-pooled-constants option is set to true, we need to shuffle the
733 // constant pool before emitting it.
Qining Luaee5fa82015-08-20 14:59:03 -0700734 if (Ctx->getFlags().shouldReorderPooledConstants() && !Pool.empty()) {
735 // Use the constant's kind value as the salt for creating random number
736 // generator.
737 Operand::OperandKind K = (*Pool.begin())->getKind();
738
739 RandomNumberGenerator RNG(Ctx->getFlags().getRandomSeed(),
740 RPE_PooledConstantReordering, K);
741 RandomShuffle(Pool.begin(), Pool.end(),
742 [&RNG](uint64_t N) { return (uint32_t)RNG.next(N); });
743 }
Qining Lu7cd53512015-06-26 09:36:00 -0700744
John Porto7e93c622015-06-23 10:58:57 -0700745 for (Constant *C : Pool) {
746 if (!C->getShouldBePooled())
747 continue;
748 typename T::IceType *Const = llvm::cast<typename T::IceType>(C);
749 typename T::IceType::PrimType Value = Const->getValue();
750 // Use memcpy() to copy bits from Value into RawValue in a way
751 // that avoids breaking strict-aliasing rules.
752 typename T::PrimitiveIntType RawValue;
753 memcpy(&RawValue, &Value, sizeof(Value));
754 char buf[30];
755 int CharsPrinted =
756 snprintf(buf, llvm::array_lengthof(buf), T::PrintfString, RawValue);
757 assert(CharsPrinted >= 0 &&
758 (size_t)CharsPrinted < llvm::array_lengthof(buf));
759 (void)CharsPrinted; // avoid warnings if asserts are disabled
760 Const->emitPoolLabel(Str);
761 Str << ":\n\t" << T::AsmTag << "\t" << buf << "\t# " << T::TypeName << " "
762 << Value << "\n";
763 }
Matt Wala105b7042014-08-11 19:56:19 -0700764}
Matt Wala45a06232014-07-09 16:33:22 -0700765
John Porto7e93c622015-06-23 10:58:57 -0700766void TargetDataX8632::lowerConstants() {
767 if (Ctx->getFlags().getDisableTranslation())
768 return;
769 // No need to emit constants from the int pool since (for x86) they
770 // are embedded as immediates in the instructions, just emit float/double.
771 switch (Ctx->getFlags().getOutFileType()) {
772 case FT_Elf: {
773 ELFObjectWriter *Writer = Ctx->getObjectWriter();
774
775 Writer->writeConstantPool<ConstantInteger32>(IceType_i8);
776 Writer->writeConstantPool<ConstantInteger32>(IceType_i16);
777 Writer->writeConstantPool<ConstantInteger32>(IceType_i32);
778
779 Writer->writeConstantPool<ConstantFloat>(IceType_f32);
780 Writer->writeConstantPool<ConstantDouble>(IceType_f64);
781 } break;
782 case FT_Asm:
783 case FT_Iasm: {
784 OstreamLocker L(Ctx);
785
786 emitConstantPool<PoolTypeConverter<uint8_t>>(Ctx);
787 emitConstantPool<PoolTypeConverter<uint16_t>>(Ctx);
788 emitConstantPool<PoolTypeConverter<uint32_t>>(Ctx);
789
790 emitConstantPool<PoolTypeConverter<float>>(Ctx);
791 emitConstantPool<PoolTypeConverter<double>>(Ctx);
792 } break;
793 }
794}
795
Andrew Scull86df4e92015-07-30 13:54:44 -0700796void TargetDataX8632::lowerJumpTables() {
797 switch (Ctx->getFlags().getOutFileType()) {
798 case FT_Elf: {
799 ELFObjectWriter *Writer = Ctx->getObjectWriter();
Andrew Scull1eda90a2015-08-04 17:03:19 -0700800 for (const JumpTableData &JT : Ctx->getJumpTables())
John Porto1d235422015-08-12 12:37:53 -0700801 Writer->writeJumpTable(JT, TargetX8632::Traits::RelFixup);
Andrew Scull86df4e92015-07-30 13:54:44 -0700802 } break;
803 case FT_Asm:
804 // Already emitted from Cfg
805 break;
806 case FT_Iasm: {
807 if (!BuildDefs::dump())
808 return;
809 Ostream &Str = Ctx->getStrEmit();
Andrew Scull1eda90a2015-08-04 17:03:19 -0700810 for (const JumpTableData &JT : Ctx->getJumpTables()) {
Andrew Scull86df4e92015-07-30 13:54:44 -0700811 Str << "\t.section\t.rodata." << JT.getFunctionName()
812 << "$jumptable,\"a\",@progbits\n";
813 Str << "\t.align\t" << typeWidthInBytes(getPointerType()) << "\n";
814 Str << InstJumpTable::makeName(JT.getFunctionName(), JT.getId()) << ":";
815
816 // On X8632 pointers are 32-bit hence the use of .long
817 for (intptr_t TargetOffset : JT.getTargetOffsets())
818 Str << "\n\t.long\t" << JT.getFunctionName() << "+" << TargetOffset;
819 Str << "\n";
820 }
821 } break;
822 }
823}
824
John Porto7e93c622015-06-23 10:58:57 -0700825void TargetDataX8632::lowerGlobals(const VariableDeclarationList &Vars,
826 const IceString &SectionSuffix) {
827 switch (Ctx->getFlags().getOutFileType()) {
828 case FT_Elf: {
829 ELFObjectWriter *Writer = Ctx->getObjectWriter();
John Porto1d235422015-08-12 12:37:53 -0700830 Writer->writeDataSection(Vars, TargetX8632::Traits::RelFixup,
831 SectionSuffix);
John Porto7e93c622015-06-23 10:58:57 -0700832 } break;
833 case FT_Asm:
834 case FT_Iasm: {
835 const IceString &TranslateOnly = Ctx->getFlags().getTranslateOnly();
836 OstreamLocker L(Ctx);
837 for (const VariableDeclaration *Var : Vars) {
838 if (GlobalContext::matchSymbolName(Var->getName(), TranslateOnly)) {
839 emitGlobal(*Var, SectionSuffix);
840 }
841 }
842 } break;
843 }
844}
845
846TargetHeaderX8632::TargetHeaderX8632(GlobalContext *Ctx)
847 : TargetHeaderLowering(Ctx) {}
848
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700849// In some cases, there are x-macros tables for both high-level and
850// low-level instructions/operands that use the same enum key value.
851// The tables are kept separate to maintain a proper separation
Jim Stichnothfac55172014-10-01 13:06:21 -0700852// between abstraction layers. There is a risk that the tables could
853// get out of sync if enum values are reordered or if entries are
854// added or deleted. The following dummy namespaces use
855// static_asserts to ensure everything is kept in sync.
856
John Porto7e93c622015-06-23 10:58:57 -0700857namespace {
Jim Stichnothfac55172014-10-01 13:06:21 -0700858// Validate the enum values in FCMPX8632_TABLE.
859namespace dummy1 {
860// Define a temporary set of enum values based on low-level table
861// entries.
862enum _tmp_enum {
Matt Walace0ca8f2014-07-24 12:34:20 -0700863#define X(val, dflt, swapS, C1, C2, swapV, pred) _tmp_##val,
Jim Stichnothfac55172014-10-01 13:06:21 -0700864 FCMPX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700865#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700866 _num
867};
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700868// Define a set of constants based on high-level table entries.
869#define X(tag, str) static const int _table1_##tag = InstFcmp::tag;
JF Bastien8427ea22015-01-27 12:56:49 -0800870ICEINSTFCMP_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700871#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700872// Define a set of constants based on low-level table entries, and
873// ensure the table entry keys are consistent.
Matt Walace0ca8f2014-07-24 12:34:20 -0700874#define X(val, dflt, swapS, C1, C2, swapV, pred) \
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700875 static const int _table2_##val = _tmp_##val; \
Jim Stichnothfac55172014-10-01 13:06:21 -0700876 static_assert( \
877 _table1_##val == _table2_##val, \
878 "Inconsistency between FCMPX8632_TABLE and ICEINSTFCMP_TABLE");
JF Bastien8427ea22015-01-27 12:56:49 -0800879FCMPX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700880#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700881// Repeat the static asserts with respect to the high-level table
882// entries in case the high-level table has extra entries.
883#define X(tag, str) \
884 static_assert( \
885 _table1_##tag == _table2_##tag, \
886 "Inconsistency between FCMPX8632_TABLE and ICEINSTFCMP_TABLE");
JF Bastien8427ea22015-01-27 12:56:49 -0800887ICEINSTFCMP_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700888#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700889} // end of namespace dummy1
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700890
Jim Stichnothfac55172014-10-01 13:06:21 -0700891// Validate the enum values in ICMPX8632_TABLE.
892namespace dummy2 {
893// Define a temporary set of enum values based on low-level table
894// entries.
895enum _tmp_enum {
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700896#define X(val, C_32, C1_64, C2_64, C3_64) _tmp_##val,
Jim Stichnothfac55172014-10-01 13:06:21 -0700897 ICMPX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700898#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700899 _num
900};
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700901// Define a set of constants based on high-level table entries.
902#define X(tag, str) static const int _table1_##tag = InstIcmp::tag;
JF Bastien8427ea22015-01-27 12:56:49 -0800903ICEINSTICMP_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700904#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700905// Define a set of constants based on low-level table entries, and
906// ensure the table entry keys are consistent.
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700907#define X(val, C_32, C1_64, C2_64, C3_64) \
908 static const int _table2_##val = _tmp_##val; \
Jim Stichnothfac55172014-10-01 13:06:21 -0700909 static_assert( \
910 _table1_##val == _table2_##val, \
911 "Inconsistency between ICMPX8632_TABLE and ICEINSTICMP_TABLE");
JF Bastien8427ea22015-01-27 12:56:49 -0800912ICMPX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700913#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700914// Repeat the static asserts with respect to the high-level table
915// entries in case the high-level table has extra entries.
916#define X(tag, str) \
917 static_assert( \
918 _table1_##tag == _table2_##tag, \
919 "Inconsistency between ICMPX8632_TABLE and ICEINSTICMP_TABLE");
JF Bastien8427ea22015-01-27 12:56:49 -0800920ICEINSTICMP_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700921#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700922} // end of namespace dummy2
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700923
Jim Stichnothfac55172014-10-01 13:06:21 -0700924// Validate the enum values in ICETYPEX8632_TABLE.
925namespace dummy3 {
926// Define a temporary set of enum values based on low-level table
927// entries.
928enum _tmp_enum {
Jim Stichnothbca2f652014-11-01 10:13:54 -0700929#define X(tag, elementty, cvt, sdss, pack, width, fld) _tmp_##tag,
Jim Stichnothfac55172014-10-01 13:06:21 -0700930 ICETYPEX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700931#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700932 _num
933};
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700934// Define a set of constants based on high-level table entries.
Andrew Scull87f80c12015-07-20 10:19:16 -0700935#define X(tag, sizeLog2, align, elts, elty, str) \
Matt Wala928f1292014-07-07 16:50:46 -0700936 static const int _table1_##tag = tag;
JF Bastien8427ea22015-01-27 12:56:49 -0800937ICETYPE_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700938#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700939// Define a set of constants based on low-level table entries, and
940// ensure the table entry keys are consistent.
Jim Stichnothbca2f652014-11-01 10:13:54 -0700941#define X(tag, elementty, cvt, sdss, pack, width, fld) \
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700942 static const int _table2_##tag = _tmp_##tag; \
Jim Stichnothfac55172014-10-01 13:06:21 -0700943 static_assert(_table1_##tag == _table2_##tag, \
944 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE");
JF Bastien8427ea22015-01-27 12:56:49 -0800945ICETYPEX8632_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700946#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700947// Repeat the static asserts with respect to the high-level table
948// entries in case the high-level table has extra entries.
Andrew Scull87f80c12015-07-20 10:19:16 -0700949#define X(tag, sizeLog2, align, elts, elty, str) \
Jim Stichnothfac55172014-10-01 13:06:21 -0700950 static_assert(_table1_##tag == _table2_##tag, \
951 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE");
JF Bastien8427ea22015-01-27 12:56:49 -0800952ICETYPE_TABLE
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700953#undef X
Jim Stichnothfac55172014-10-01 13:06:21 -0700954} // end of namespace dummy3
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700955} // end of anonymous namespace
956
Jim Stichnoth5bc2b1d2014-05-22 13:38:48 -0700957} // end of namespace Ice