Initial skeleton of Subzero.
This includes just enough code to build the high-level ICE IR and dump it back out again. There is a script szdiff.py that does a fuzzy diff of the input and output for verification. See the comment in szdiff.py for a description of the fuzziness.
Building llvm2ice requires LLVM headers, libs, and tools (e.g. FileCheck) to be present. These default to something like llvm_i686_linux_work/Release+Asserts/ based on the checked-out and built pnacl-llvm code; I'll try to figure out how to more automatically detect the build configuration.
"make check" runs the lit tests.
This CL has under 2000 lines of "interesting" Ice*.{h,cpp} code, plus 600 lines of llvm2ice.cpp driver code, and the rest is tests.
Here is the high-level mapping of source files to functionality:
IceDefs.h, IceTypes.h, IceTypes.cpp:
Commonly used types and utilities.
IceCfg.h, IceCfg.cpp:
Operations at the function level.
IceCfgNode.h, IceCfgNode.cpp:
Operations on basic blocks (nodes).
IceInst.h, IceInst.cpp:
Operations on instructions.
IceOperand.h, IceOperand.cpp:
Operations on operands, such as stack locations, physical registers, and constants.
BUG= none
R=jfb@chromium.org
Review URL: https://codereview.chromium.org/205613002
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a19ccde
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,12 @@
+# Ignore filename patterns wherever they appear
+*~
+*.o
+*.orig
+*.pyc
+*.swp
+.#*
+\#*
+
+# Ignore specific patterns at the top-level directory
+/llvm2ice
+/build/
diff --git a/LICENSE.TXT b/LICENSE.TXT
new file mode 100644
index 0000000..954387d
--- /dev/null
+++ b/LICENSE.TXT
@@ -0,0 +1,42 @@
+==============================================================================
+Subzero Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2014 Google Inc.
+All rights reserved.
+
+Developed by:
+
+ Native Client Team
+
+ Google Inc.
+
+ http://www.chromium.org/nativeclient
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the Native Client Team, Google Inc., nor the names of
+ its contributors may be used to endorse or promote products derived from
+ this Software without specific prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..e32ae89
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,74 @@
+# The following variables will likely need to be modified, depending on where
+# and how you built LLVM & Clang. They can be overridden in a command-line
+# invocation of make, like:
+#
+# make LLVM_SRC_PATH=<path> LLVM_BIN_PATH=<path> ...
+#
+
+# LLVM_SRC_PATH is the path to the root of the checked out source code. This
+# directory should contain the configure script, the include/ and lib/
+# directories of LLVM, Clang in tools/clang/, etc.
+# Alternatively, if you're building vs. a binary download of LLVM, then
+# LLVM_SRC_PATH can point to the main untarred directory.
+LLVM_SRC_PATH ?= ../llvm
+
+# LLVM_BIN_PATH is the directory where binaries are placed by the LLVM build
+# process. It should contain the tools like opt, llc and clang. The default
+# reflects a debug build with autotools (configure & make).
+LLVM_BIN_PATH ?= $(shell readlink -e \
+ ../../out/llvm_i686_linux_work/Release+Asserts/bin)
+
+$(info -----------------------------------------------)
+$(info Using LLVM_SRC_PATH = $(LLVM_SRC_PATH))
+$(info Using LLVM_BIN_PATH = $(LLVM_BIN_PATH))
+$(info -----------------------------------------------)
+
+LLVM_CXXFLAGS := `$(LLVM_BIN_PATH)/llvm-config --cxxflags`
+LLVM_LDFLAGS := `$(LLVM_BIN_PATH)/llvm-config --ldflags --libs`
+
+# It's recommended that CXX matches the compiler you used to build LLVM itself.
+OPTLEVEL := -O0
+CXX := g++
+CXXFLAGS := -Wall -Werror -fno-rtti -fno-exceptions \
+ $(OPTLEVEL) -g $(LLVM_CXXFLAGS) -m32
+LDFLAGS := -m32
+
+SRCS= \
+ IceCfg.cpp \
+ IceCfgNode.cpp \
+ IceGlobalContext.cpp \
+ IceInst.cpp \
+ IceOperand.cpp \
+ IceTypes.cpp \
+ llvm2ice.cpp
+
+OBJS=$(patsubst %.cpp, build/%.o, $(SRCS))
+
+# Keep all the first target so it's the default.
+all: llvm2ice
+
+.PHONY: all
+
+llvm2ice: $(OBJS)
+ $(CXX) $(LDFLAGS) -o $@ $^ $(LLVM_LDFLAGS) -ldl
+
+# TODO: Be more precise than "*.h" here and elsewhere.
+$(OBJS): build/%.o: src/%.cpp src/*.h src/*.def
+ $(CXX) -c $(CXXFLAGS) $< -o $@
+
+$(OBJS): | build
+
+build:
+ @mkdir -p $@
+
+check: llvm2ice
+ LLVM_BIN_PATH=$(LLVM_BIN_PATH) \
+ $(LLVM_SRC_PATH)/utils/lit/lit.py -sv tests_lit
+
+# TODO: Fix the use of wildcards.
+format:
+ $(LLVM_BIN_PATH)/clang-format -style=LLVM -i \
+ src/Ice*.h src/Ice*.cpp src/llvm2ice.cpp
+
+clean:
+ rm -rf llvm2ice *.o build/
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..9de4602
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,71 @@
+Subzero - Fast code generator for PNaCl bitcode
+===============================================
+
+Building
+--------
+
+You must have LLVM trunk source code available and built. See
+http://llvm.org/docs/GettingStarted.html#getting-started-quickly-a-summary for
+guidance.
+
+Set variables ``LLVM_SRC_PATH`` and ``LLVM_BIN_PATH`` to point to the
+appropriate directories in the LLVM source and build directories. These can be
+set as environment variables, or you can modify the top-level Makefile.
+
+Run ``make`` at the top level to build the main target ``llvm2ice``.
+
+``llvm2ice``
+------------
+
+The ``llvm2ice`` program uses the LLVM infrastructure to parse an LLVM bitcode
+file and translate it into ICE. It then invokes ICE's translate method to lower
+it to target-specific machine code, dumping the IR at various stages of the
+translation.
+
+The program can be run as follows::
+
+ ../llvm2ice ./ir_samples/<file>.ll
+ ../llvm2ice ./tests_lit/llvm2ice_tests/<file>.ll
+
+At this time, ``llvm2ice`` accepts a few arguments:
+
+ ``-help`` -- Show available arguments and possible values.
+
+ ``-notranslate`` -- Suppress the ICE translation phase, which is useful if
+ ICE is missing some support.
+
+ ``-target=<TARGET>`` -- Set the target architecture. The default is x8632,
+ and x8632fast (generate x8632 code as fast as possible at the cost of code
+ quality) is also available. Future targets include x8664, arm32, and arm64.
+
+ ``-verbose=<list>`` -- Set verbosity flags. This argument allows a
+ comma-separated list of values. The default is ``none``, and the value
+ ``inst,pred`` will roughly match the .ll bitcode file. Of particular use
+ are ``all`` and ``none``.
+
+See ir_samples/README.rst for more details.
+
+Running the test suite
+----------------------
+
+Subzero uses the LLVM ``lit`` testing tool for its test suite, which lives in
+``tests_lit``. To execute the test suite, first build Subzero, and then run::
+
+ python <path_to_lit.py> -sv tests_lit
+
+``path_to_lit`` is the direct path to the lit script in the LLVM source
+(``$LLVM_SRC_PATH/utils/lit/lit.py``).
+
+The above ``lit`` execution also needs the LLVM binary path in the
+``LLVM_BIN_PATH`` env var.
+
+Assuming the LLVM paths are set up, ``make check`` is a convenient way to run
+the test suite.
+
+Assembling ``llvm2ice`` output
+------------------------------
+
+Currently ``llvm2ice`` produces textual assembly code in a structure suitable
+for input to ``llvm-mc`` and currently using "intel" assembly syntax. The first
+line of output is a convenient comment indicating how to pipe the output to
+``llvm-mc`` to produce object code.
diff --git a/src/IceCfg.cpp b/src/IceCfg.cpp
new file mode 100644
index 0000000..f2b1cc9
--- /dev/null
+++ b/src/IceCfg.cpp
@@ -0,0 +1,93 @@
+//===- subzero/src/IceCfg.cpp - Control flow graph implementation ---------===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Cfg class, including constant pool
+// management.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceCfg.h"
+#include "IceCfgNode.h"
+#include "IceDefs.h"
+#include "IceInst.h"
+#include "IceOperand.h"
+
+namespace Ice {
+
+Cfg::Cfg(GlobalContext *Ctx)
+ : Ctx(Ctx), FunctionName(""), ReturnType(IceType_void),
+ IsInternalLinkage(false), HasError(false), ErrorMessage(""), Entry(NULL),
+ NextInstNumber(1), CurrentNode(NULL) {}
+
+Cfg::~Cfg() {}
+
+void Cfg::setError(const IceString &Message) {
+ HasError = true;
+ ErrorMessage = Message;
+ Ctx->getStrDump() << "ICE translation error: " << ErrorMessage << "\n";
+}
+
+CfgNode *Cfg::makeNode(const IceString &Name) {
+ SizeT LabelIndex = Nodes.size();
+ CfgNode *Node = CfgNode::create(this, LabelIndex, Name);
+ Nodes.push_back(Node);
+ return Node;
+}
+
+// Create a new Variable with a particular type and an optional
+// name. The Node argument is the node where the variable is defined.
+Variable *Cfg::makeVariable(Type Ty, const CfgNode *Node,
+ const IceString &Name) {
+ SizeT Index = Variables.size();
+ Variables.push_back(Variable::create(this, Ty, Node, Index, Name));
+ return Variables[Index];
+}
+
+void Cfg::addArg(Variable *Arg) {
+ Arg->setIsArg(this);
+ Args.push_back(Arg);
+}
+
+void Cfg::computePredecessors() {
+ for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) {
+ (*I)->computePredecessors();
+ }
+}
+
+// ======================== Dump routines ======================== //
+
+void Cfg::dump() {
+ Ostream &Str = Ctx->getStrDump();
+ setCurrentNode(getEntryNode());
+ // Print function name+args
+ if (getContext()->isVerbose(IceV_Instructions)) {
+ Str << "define ";
+ if (getInternal())
+ Str << "internal ";
+ Str << ReturnType << " @" << getFunctionName() << "(";
+ for (SizeT i = 0; i < Args.size(); ++i) {
+ if (i > 0)
+ Str << ", ";
+ Str << Args[i]->getType() << " ";
+ Args[i]->dump(this);
+ }
+ Str << ") {\n";
+ }
+ setCurrentNode(NULL);
+ // Print each basic block
+ for (NodeList::const_iterator I = Nodes.begin(), E = Nodes.end(); I != E;
+ ++I) {
+ (*I)->dump(this);
+ }
+ if (getContext()->isVerbose(IceV_Instructions)) {
+ Str << "}\n";
+ }
+}
+
+} // end of namespace Ice
diff --git a/src/IceCfg.h b/src/IceCfg.h
new file mode 100644
index 0000000..05e1e3b
--- /dev/null
+++ b/src/IceCfg.h
@@ -0,0 +1,144 @@
+//===- subzero/src/IceCfg.h - Control flow graph ----------------*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Cfg class, which represents the control flow
+// graph and the overall per-function compilation context.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICECFG_H
+#define SUBZERO_SRC_ICECFG_H
+
+#include "IceDefs.h"
+#include "IceTypes.h"
+#include "IceGlobalContext.h"
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/Allocator.h"
+
+namespace Ice {
+
+class Cfg {
+public:
+ Cfg(GlobalContext *Ctx);
+ ~Cfg();
+
+ GlobalContext *getContext() const { return Ctx; }
+
+ // Manage the name and return type of the function being translated.
+ void setFunctionName(const IceString &Name) { FunctionName = Name; }
+ IceString getFunctionName() const { return FunctionName; }
+ void setReturnType(Type Ty) { ReturnType = Ty; }
+
+ // Manage the "internal" attribute of the function.
+ void setInternal(bool Internal) { IsInternalLinkage = Internal; }
+ bool getInternal() const { return IsInternalLinkage; }
+
+ // Translation error flagging. If support for some construct is
+ // known to be missing, instead of an assertion failure, setError()
+ // should be called and the error should be propagated back up.
+ // This way, we can gracefully fail to translate and let a fallback
+ // translator handle the function.
+ void setError(const IceString &Message);
+ bool hasError() const { return HasError; }
+ IceString getError() const { return ErrorMessage; }
+
+ // Manage nodes (a.k.a. basic blocks, CfgNodes).
+ void setEntryNode(CfgNode *EntryNode) { Entry = EntryNode; }
+ CfgNode *getEntryNode() const { return Entry; }
+ // Create a node and append it to the end of the linearized list.
+ CfgNode *makeNode(const IceString &Name = "");
+ SizeT getNumNodes() const { return Nodes.size(); }
+ const NodeList &getNodes() const { return Nodes; }
+
+ // Manage instruction numbering.
+ int newInstNumber() { return NextInstNumber++; }
+
+ // Manage Variables.
+ Variable *makeVariable(Type Ty, const CfgNode *Node,
+ const IceString &Name = "");
+ SizeT getNumVariables() const { return Variables.size(); }
+ const VarList &getVariables() const { return Variables; }
+
+ // Manage arguments to the function.
+ void addArg(Variable *Arg);
+ const VarList &getArgs() const { return Args; }
+
+ // After the CFG is fully constructed, iterate over the nodes and
+ // compute the predecessor edges, in the form of
+ // CfgNode::InEdges[].
+ void computePredecessors();
+
+ // Manage the CurrentNode field, which is used for validating the
+ // Variable::DefNode field during dumping/emitting.
+ void setCurrentNode(const CfgNode *Node) { CurrentNode = Node; }
+ const CfgNode *getCurrentNode() const { return CurrentNode; }
+
+ void dump();
+
+ // Allocate data of type T using the per-Cfg allocator.
+ template <typename T> T *allocate() { return Allocator.Allocate<T>(); }
+
+ // Allocate an instruction of type T using the per-Cfg instruction allocator.
+ template <typename T> T *allocateInst() { return Allocator.Allocate<T>(); }
+
+ // Allocate an array of data of type T using the per-Cfg allocator.
+ template <typename T> T *allocateArrayOf(size_t NumElems) {
+ return Allocator.Allocate<T>(NumElems);
+ }
+
+ // Deallocate data that was allocated via allocate<T>().
+ template <typename T> void deallocate(T *Object) {
+ Allocator.Deallocate(Object);
+ }
+
+ // Deallocate data that was allocated via allocateInst<T>().
+ template <typename T> void deallocateInst(T *Instr) {
+ Allocator.Deallocate(Instr);
+ }
+
+ // Deallocate data that was allocated via allocateArrayOf<T>().
+ template <typename T> void deallocateArrayOf(T *Array) {
+ Allocator.Deallocate(Array);
+ }
+
+private:
+ // TODO: for now, everything is allocated from the same allocator. In the
+ // future we may want to split this to several allocators, for example in
+ // order to use a "Recycler" to preserve memory. If we keep all allocation
+ // requests from the Cfg exposed via methods, we can always switch the
+ // implementation over at a later point.
+ llvm::BumpPtrAllocator Allocator;
+
+ GlobalContext *Ctx;
+ IceString FunctionName;
+ Type ReturnType;
+ bool IsInternalLinkage;
+ bool HasError;
+ IceString ErrorMessage;
+ CfgNode *Entry; // entry basic block
+ NodeList Nodes; // linearized node list; Entry should be first
+ int NextInstNumber;
+ VarList Variables;
+ VarList Args; // subset of Variables, in argument order
+
+ // CurrentNode is maintained during dumping/emitting just for
+ // validating Variable::DefNode. Normally, a traversal over
+ // CfgNodes maintains this, but before global operations like
+ // register allocation, setCurrentNode(NULL) should be called to
+ // avoid spurious validation failures.
+ const CfgNode *CurrentNode;
+
+ Cfg(const Cfg &) LLVM_DELETED_FUNCTION;
+ Cfg &operator=(const Cfg &) LLVM_DELETED_FUNCTION;
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICECFG_H
diff --git a/src/IceCfgNode.cpp b/src/IceCfgNode.cpp
new file mode 100644
index 0000000..fe8b70e0
--- /dev/null
+++ b/src/IceCfgNode.cpp
@@ -0,0 +1,109 @@
+//===- subzero/src/IceCfgNode.cpp - Basic block (node) implementation -----===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CfgNode class, including the
+// complexities of instruction insertion and in-edge calculation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceCfg.h"
+#include "IceCfgNode.h"
+#include "IceInst.h"
+#include "IceOperand.h"
+
+namespace Ice {
+
+CfgNode::CfgNode(Cfg *Func, SizeT LabelNumber, IceString Name)
+ : Func(Func), Number(LabelNumber), Name(Name) {}
+
+// Returns the name the node was created with. If no name was given,
+// it synthesizes a (hopefully) unique name.
+IceString CfgNode::getName() const {
+ if (!Name.empty())
+ return Name;
+ char buf[30];
+ snprintf(buf, llvm::array_lengthof(buf), "__%u", getIndex());
+ return buf;
+}
+
+// Adds an instruction to either the Phi list or the regular
+// instruction list. Validates that all Phis are added before all
+// regular instructions.
+void CfgNode::appendInst(Inst *Inst) {
+ if (InstPhi *Phi = llvm::dyn_cast<InstPhi>(Inst)) {
+ if (!Insts.empty()) {
+ Func->setError("Phi instruction added to the middle of a block");
+ return;
+ }
+ Phis.push_back(Phi);
+ } else {
+ Insts.push_back(Inst);
+ }
+ Inst->updateVars(this);
+}
+
+// When a node is created, the OutEdges are immediately knows, but the
+// InEdges have to be built up incrementally. After the CFG has been
+// constructed, the computePredecessors() pass finalizes it by
+// creating the InEdges list.
+void CfgNode::computePredecessors() {
+ OutEdges = (*Insts.rbegin())->getTerminatorEdges();
+ for (NodeList::const_iterator I = OutEdges.begin(), E = OutEdges.end();
+ I != E; ++I) {
+ CfgNode *Node = *I;
+ Node->InEdges.push_back(this);
+ }
+}
+
+// ======================== Dump routines ======================== //
+
+void CfgNode::dump(Cfg *Func) const {
+ Func->setCurrentNode(this);
+ Ostream &Str = Func->getContext()->getStrDump();
+ if (Func->getContext()->isVerbose(IceV_Instructions)) {
+ Str << getName() << ":\n";
+ }
+ // Dump list of predecessor nodes.
+ if (Func->getContext()->isVerbose(IceV_Preds) && !InEdges.empty()) {
+ Str << " // preds = ";
+ for (NodeList::const_iterator I = InEdges.begin(), E = InEdges.end();
+ I != E; ++I) {
+ if (I != InEdges.begin())
+ Str << ", ";
+ Str << "%" << (*I)->getName();
+ }
+ Str << "\n";
+ }
+ // Dump each instruction.
+ if (Func->getContext()->isVerbose(IceV_Instructions)) {
+ for (PhiList::const_iterator I = Phis.begin(), E = Phis.end(); I != E;
+ ++I) {
+ const Inst *Inst = *I;
+ Inst->dumpDecorated(Func);
+ }
+ InstList::const_iterator I = Insts.begin(), E = Insts.end();
+ while (I != E) {
+ Inst *Inst = *I++;
+ Inst->dumpDecorated(Func);
+ }
+ }
+ // Dump list of successor nodes.
+ if (Func->getContext()->isVerbose(IceV_Succs)) {
+ Str << " // succs = ";
+ for (NodeList::const_iterator I = OutEdges.begin(), E = OutEdges.end();
+ I != E; ++I) {
+ if (I != OutEdges.begin())
+ Str << ", ";
+ Str << "%" << (*I)->getName();
+ }
+ Str << "\n";
+ }
+}
+
+} // end of namespace Ice
diff --git a/src/IceCfgNode.h b/src/IceCfgNode.h
new file mode 100644
index 0000000..bf96aef
--- /dev/null
+++ b/src/IceCfgNode.h
@@ -0,0 +1,62 @@
+//===- subzero/src/IceCfgNode.h - Control flow graph node -------*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the CfgNode class, which represents a single
+// basic block as its instruction list, in-edge list, and out-edge
+// list.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICECFGNODE_H
+#define SUBZERO_SRC_ICECFGNODE_H
+
+#include "IceDefs.h"
+
+namespace Ice {
+
+class CfgNode {
+public:
+ static CfgNode *create(Cfg *Func, SizeT LabelIndex, IceString Name = "") {
+ return new (Func->allocate<CfgNode>()) CfgNode(Func, LabelIndex, Name);
+ }
+
+ // Access the label number and name for this node.
+ SizeT getIndex() const { return Number; }
+ IceString getName() const;
+
+ // Access predecessor and successor edge lists.
+ const NodeList &getInEdges() const { return InEdges; }
+ const NodeList &getOutEdges() const { return OutEdges; }
+
+ // Manage the instruction list.
+ InstList &getInsts() { return Insts; }
+ void appendInst(Inst *Inst);
+
+ // Add a predecessor edge to the InEdges list for each of this
+ // node's successors.
+ void computePredecessors();
+
+ void dump(Cfg *Func) const;
+
+private:
+ CfgNode(Cfg *Func, SizeT LabelIndex, IceString Name);
+ CfgNode(const CfgNode &) LLVM_DELETED_FUNCTION;
+ CfgNode &operator=(const CfgNode &) LLVM_DELETED_FUNCTION;
+ Cfg *const Func;
+ const SizeT Number; // label index
+ IceString Name; // for dumping only
+ NodeList InEdges; // in no particular order
+ NodeList OutEdges; // in no particular order
+ PhiList Phis; // unordered set of phi instructions
+ InstList Insts; // ordered list of non-phi instructions
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICECFGNODE_H
diff --git a/src/IceDefs.h b/src/IceDefs.h
new file mode 100644
index 0000000..25c384a
--- /dev/null
+++ b/src/IceDefs.h
@@ -0,0 +1,122 @@
+//===- subzero/src/IceDefs.h - Common Subzero declaraions -------*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares various useful types and classes that have
+// widespread use across Subzero. Every Subzero source file is
+// expected to include IceDefs.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEDEFS_H
+#define SUBZERO_SRC_ICEDEFS_H
+
+#include <stdint.h> // TODO: <cstdint> with C++11
+
+#include <cassert>
+#include <cstdio> // snprintf
+#include <functional> // std::less
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h" // LLVM_STATIC_ASSERT
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Timer.h"
+
+namespace Ice {
+
+class CfgNode;
+class Constant;
+class GlobalContext;
+class Cfg;
+class Inst;
+class InstPhi;
+class InstTarget;
+class Operand;
+class Variable;
+
+// TODO: Switch over to LLVM's ADT container classes.
+// http://llvm.org/docs/ProgrammersManual.html#picking-the-right-data-structure-for-a-task
+typedef std::string IceString;
+typedef std::list<Inst *> InstList;
+typedef std::list<InstPhi *> PhiList;
+typedef std::vector<Variable *> VarList;
+typedef std::vector<CfgNode *> NodeList;
+
+// SizeT is for holding small-ish limits like number of source
+// operands in an instruction. It is used instead of size_t (which
+// may be 64-bits wide) when we want to save space.
+typedef uint32_t SizeT;
+
+enum VerboseItem {
+ IceV_None = 0,
+ IceV_Instructions = 1 << 0,
+ IceV_Deleted = 1 << 1,
+ IceV_InstNumbers = 1 << 2,
+ IceV_Preds = 1 << 3,
+ IceV_Succs = 1 << 4,
+ IceV_Liveness = 1 << 5,
+ IceV_RegManager = 1 << 6,
+ IceV_RegOrigins = 1 << 7,
+ IceV_LinearScan = 1 << 8,
+ IceV_Frame = 1 << 9,
+ IceV_Timing = 1 << 10,
+ IceV_All = ~IceV_None
+};
+typedef uint32_t VerboseMask;
+
+// The Ostream class wraps an output stream and a Cfg pointer, so
+// that dump routines have access to the Cfg object and can print
+// labels and variable names.
+
+class Ostream {
+public:
+ Ostream(llvm::raw_ostream *Stream) : Stream(Stream) {}
+
+ llvm::raw_ostream *Stream;
+
+private:
+ Ostream(const Ostream &) LLVM_DELETED_FUNCTION;
+ Ostream &operator=(const Ostream &) LLVM_DELETED_FUNCTION;
+};
+
+template <typename T> inline Ostream &operator<<(Ostream &Str, const T &Val) {
+ if (Str.Stream)
+ (*Str.Stream) << Val;
+ return Str;
+}
+
+// TODO: Implement in terms of std::chrono after switching to C++11.
+class Timer {
+public:
+ Timer() : Start(llvm::TimeRecord::getCurrentTime(false)) {}
+ uint64_t getElapsedNs() const { return getElapsedSec() * 1000 * 1000 * 1000; }
+ uint64_t getElapsedUs() const { return getElapsedSec() * 1000 * 1000; }
+ uint64_t getElapsedMs() const { return getElapsedSec() * 1000; }
+ double getElapsedSec() const {
+ llvm::TimeRecord End = llvm::TimeRecord::getCurrentTime(false);
+ return End.getWallTime() - Start.getWallTime();
+ }
+ void printElapsedUs(GlobalContext *Ctx, const IceString &Tag) const;
+
+private:
+ const llvm::TimeRecord Start;
+ Timer(const Timer &) LLVM_DELETED_FUNCTION;
+ Timer &operator=(const Timer &) LLVM_DELETED_FUNCTION;
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEDEFS_H
diff --git a/src/IceGlobalContext.cpp b/src/IceGlobalContext.cpp
new file mode 100644
index 0000000..11de013
--- /dev/null
+++ b/src/IceGlobalContext.cpp
@@ -0,0 +1,169 @@
+//===- subzero/src/IceGlobalContext.cpp - Global context defs ---*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines aspects of the compilation that persist across
+// multiple functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceDefs.h"
+#include "IceTypes.h"
+#include "IceCfg.h"
+#include "IceGlobalContext.h"
+#include "IceOperand.h"
+
+namespace Ice {
+
+// TypePool maps constants of type KeyType (e.g. float) to pointers to
+// type ValueType (e.g. ConstantFloat). KeyType values are compared
+// using memcmp() because of potential NaN values in KeyType values.
+// KeyTypeHasFP indicates whether KeyType is a floating-point type
+// whose values need to be compared using memcmp() for NaN
+// correctness. TODO: use std::is_floating_point<KeyType> instead of
+// KeyTypeHasFP with C++11.
+template <typename KeyType, typename ValueType, bool KeyTypeHasFP = false>
+class TypePool {
+ TypePool(const TypePool &) LLVM_DELETED_FUNCTION;
+ TypePool &operator=(const TypePool &) LLVM_DELETED_FUNCTION;
+
+public:
+ TypePool() {}
+ ValueType *getOrAdd(GlobalContext *Ctx, Type Ty, KeyType Key) {
+ TupleType TupleKey = std::make_pair(Ty, Key);
+ typename ContainerType::const_iterator Iter = Pool.find(TupleKey);
+ if (Iter != Pool.end())
+ return Iter->second;
+ ValueType *Result = ValueType::create(Ctx, Ty, Key);
+ Pool[TupleKey] = Result;
+ return Result;
+ }
+
+private:
+ typedef std::pair<Type, KeyType> TupleType;
+ struct TupleCompare {
+ bool operator()(const TupleType &A, const TupleType &B) {
+ if (A.first != B.first)
+ return A.first < B.first;
+ if (KeyTypeHasFP)
+ return memcmp(&A.second, &B.second, sizeof(KeyType)) < 0;
+ return A.second < B.second;
+ }
+ };
+ typedef std::map<const TupleType, ValueType *, TupleCompare> ContainerType;
+ ContainerType Pool;
+};
+
+// The global constant pool bundles individual pools of each type of
+// interest.
+class ConstantPool {
+ ConstantPool(const ConstantPool &) LLVM_DELETED_FUNCTION;
+ ConstantPool &operator=(const ConstantPool &) LLVM_DELETED_FUNCTION;
+
+public:
+ ConstantPool() {}
+ TypePool<float, ConstantFloat, true> Floats;
+ TypePool<double, ConstantDouble, true> Doubles;
+ TypePool<uint64_t, ConstantInteger> Integers;
+ TypePool<RelocatableTuple, ConstantRelocatable> Relocatables;
+};
+
+GlobalContext::GlobalContext(llvm::raw_ostream *OsDump,
+ llvm::raw_ostream *OsEmit, VerboseMask Mask,
+ IceString TestPrefix)
+ : StrDump(OsDump), StrEmit(OsEmit), VMask(Mask),
+ ConstPool(new ConstantPool()), TestPrefix(TestPrefix) {}
+
+// In this context, name mangling means to rewrite a symbol using a
+// given prefix. For a C++ symbol, nest the original symbol inside
+// the "prefix" namespace. For other symbols, just prepend the
+// prefix.
+IceString GlobalContext::mangleName(const IceString &Name) const {
+ // TODO: Add explicit tests (beyond the implicit tests in the linker
+ // that come from the cross tests).
+ //
+ // An already-nested name like foo::bar() gets pushed down one
+ // level, making it equivalent to Prefix::foo::bar().
+ // _ZN3foo3barExyz ==> _ZN6Prefix3foo3barExyz
+ // A non-nested but mangled name like bar() gets nested, making it
+ // equivalent to Prefix::bar().
+ // _Z3barxyz ==> ZN6Prefix3barExyz
+ // An unmangled, extern "C" style name, gets a simple prefix:
+ // bar ==> Prefixbar
+ if (getTestPrefix().empty())
+ return Name;
+
+ unsigned PrefixLength = getTestPrefix().length();
+ char NameBase[1 + Name.length()];
+ const size_t BufLen = 30 + Name.length() + getTestPrefix().length();
+ char NewName[BufLen];
+ uint32_t BaseLength = 0;
+
+ int ItemsParsed = sscanf(Name.c_str(), "_ZN%s", NameBase);
+ if (ItemsParsed == 1) {
+ // Transform _ZN3foo3barExyz ==> _ZN6Prefix3foo3barExyz
+ // (splice in "6Prefix") ^^^^^^^
+ snprintf(NewName, BufLen, "_ZN%u%s%s", PrefixLength,
+ getTestPrefix().c_str(), NameBase);
+ // We ignore the snprintf return value (here and below). If we
+ // somehow miscalculated the output buffer length, the output will
+ // be truncated, but it will be truncated consistently for all
+ // mangleName() calls on the same input string.
+ return NewName;
+ }
+
+ ItemsParsed = sscanf(Name.c_str(), "_Z%u%s", &BaseLength, NameBase);
+ if (ItemsParsed == 2) {
+ // Transform _Z3barxyz ==> ZN6Prefix3barExyz
+ // ^^^^^^^^ ^
+ // (splice in "N6Prefix", and insert "E" after "3bar")
+ char OrigName[Name.length()];
+ char OrigSuffix[Name.length()];
+ strncpy(OrigName, NameBase, BaseLength);
+ OrigName[BaseLength] = '\0';
+ strcpy(OrigSuffix, NameBase + BaseLength);
+ snprintf(NewName, BufLen, "_ZN%u%s%u%sE%s", PrefixLength,
+ getTestPrefix().c_str(), BaseLength, OrigName, OrigSuffix);
+ return NewName;
+ }
+
+ // Transform bar ==> Prefixbar
+ // ^^^^^^
+ return getTestPrefix() + Name;
+}
+
+GlobalContext::~GlobalContext() {}
+
+Constant *GlobalContext::getConstantInt(Type Ty, uint64_t ConstantInt64) {
+ return ConstPool->Integers.getOrAdd(this, Ty, ConstantInt64);
+}
+
+Constant *GlobalContext::getConstantFloat(float ConstantFloat) {
+ return ConstPool->Floats.getOrAdd(this, IceType_f32, ConstantFloat);
+}
+
+Constant *GlobalContext::getConstantDouble(double ConstantDouble) {
+ return ConstPool->Doubles.getOrAdd(this, IceType_f64, ConstantDouble);
+}
+
+Constant *GlobalContext::getConstantSym(Type Ty, int64_t Offset,
+ const IceString &Name,
+ bool SuppressMangling) {
+ return ConstPool->Relocatables.getOrAdd(
+ this, Ty, RelocatableTuple(Offset, Name, SuppressMangling));
+}
+
+void Timer::printElapsedUs(GlobalContext *Ctx, const IceString &Tag) const {
+ if (Ctx->isVerbose(IceV_Timing)) {
+ // Prefixing with '#' allows timing strings to be included
+ // without error in textual assembly output.
+ Ctx->getStrDump() << "# " << getElapsedUs() << " usec " << Tag << "\n";
+ }
+}
+
+} // end of namespace Ice
diff --git a/src/IceGlobalContext.h b/src/IceGlobalContext.h
new file mode 100644
index 0000000..9224d89
--- /dev/null
+++ b/src/IceGlobalContext.h
@@ -0,0 +1,85 @@
+//===- subzero/src/IceGlobalContext.h - Global context defs -----*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares aspects of the compilation that persist across
+// multiple functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEGLOBALCONTEXT_H
+#define SUBZERO_SRC_ICEGLOBALCONTEXT_H
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "IceDefs.h"
+#include "IceTypes.h"
+
+namespace Ice {
+
+// TODO: Accesses to all non-const fields of GlobalContext need to
+// be synchronized, especially the constant pool, the allocator, and
+// the output streams.
+class GlobalContext {
+public:
+ GlobalContext(llvm::raw_ostream *OsDump, llvm::raw_ostream *OsEmit,
+ VerboseMask Mask, IceString TestPrefix);
+ ~GlobalContext();
+
+ // Returns true if any of the specified options in the verbose mask
+ // are set. If the argument is omitted, it checks if any verbose
+ // options at all are set. IceV_Timing is treated specially, so
+ // that running with just IceV_Timing verbosity doesn't trigger an
+ // avalanche of extra output.
+ bool isVerbose(VerboseMask Mask = (IceV_All & ~IceV_Timing)) const {
+ return VMask & Mask;
+ }
+ void setVerbose(VerboseMask Mask) { VMask = Mask; }
+ void addVerbose(VerboseMask Mask) { VMask |= Mask; }
+ void subVerbose(VerboseMask Mask) { VMask &= ~Mask; }
+
+ Ostream &getStrDump() { return StrDump; }
+ Ostream &getStrEmit() { return StrEmit; }
+
+ // When emitting assembly, we allow a string to be prepended to
+ // names of translated functions. This makes it easier to create an
+ // execution test against a reference translator like llc, with both
+ // translators using the same bitcode as input.
+ IceString getTestPrefix() const { return TestPrefix; }
+ IceString mangleName(const IceString &Name) const;
+
+ // Manage Constants.
+ // getConstant*() functions are not const because they might add
+ // something to the constant pool.
+ Constant *getConstantInt(Type Ty, uint64_t ConstantInt64);
+ Constant *getConstantFloat(float Value);
+ Constant *getConstantDouble(double Value);
+ // Returns a symbolic constant.
+ Constant *getConstantSym(Type Ty, int64_t Offset, const IceString &Name = "",
+ bool SuppressMangling = false);
+
+ // Allocate data of type T using the global allocator.
+ template <typename T> T *allocate() { return Allocator.Allocate<T>(); }
+
+private:
+ Ostream StrDump; // Stream for dumping / diagnostics
+ Ostream StrEmit; // Stream for code emission
+
+ llvm::BumpPtrAllocator Allocator;
+ VerboseMask VMask;
+ llvm::OwningPtr<class ConstantPool> ConstPool;
+ const IceString TestPrefix;
+ GlobalContext(const GlobalContext &) LLVM_DELETED_FUNCTION;
+ GlobalContext &operator=(const GlobalContext &) LLVM_DELETED_FUNCTION;
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEGLOBALCONTEXT_H
diff --git a/src/IceInst.cpp b/src/IceInst.cpp
new file mode 100644
index 0000000..391f197
--- /dev/null
+++ b/src/IceInst.cpp
@@ -0,0 +1,436 @@
+//===- subzero/src/IceInst.cpp - High-level instruction implementation ----===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Inst class, primarily the various
+// subclass constructors and dump routines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceCfg.h"
+#include "IceCfgNode.h"
+#include "IceInst.h"
+#include "IceOperand.h"
+
+namespace Ice {
+
+namespace {
+
+// Using non-anonymous struct so that array_lengthof works.
+const struct _InstArithmeticAttributes {
+ const char *DisplayString;
+ bool IsCommutative;
+} InstArithmeticAttributes[] = {
+#define X(tag, str, commutative) \
+ { str, commutative } \
+ ,
+ ICEINSTARITHMETIC_TABLE
+#undef X
+ };
+const size_t InstArithmeticAttributesSize =
+ llvm::array_lengthof(InstArithmeticAttributes);
+
+// Using non-anonymous struct so that array_lengthof works.
+const struct _InstCastAttributes {
+ const char *DisplayString;
+} InstCastAttributes[] = {
+#define X(tag, str) \
+ { str } \
+ ,
+ ICEINSTCAST_TABLE
+#undef X
+ };
+const size_t InstCastAttributesSize = llvm::array_lengthof(InstCastAttributes);
+
+// Using non-anonymous struct so that array_lengthof works.
+const struct _InstFcmpAttributes {
+ const char *DisplayString;
+} InstFcmpAttributes[] = {
+#define X(tag, str) \
+ { str } \
+ ,
+ ICEINSTFCMP_TABLE
+#undef X
+ };
+const size_t InstFcmpAttributesSize = llvm::array_lengthof(InstFcmpAttributes);
+
+// Using non-anonymous struct so that array_lengthof works.
+const struct _InstIcmpAttributes {
+ const char *DisplayString;
+} InstIcmpAttributes[] = {
+#define X(tag, str) \
+ { str } \
+ ,
+ ICEINSTICMP_TABLE
+#undef X
+ };
+const size_t InstIcmpAttributesSize = llvm::array_lengthof(InstIcmpAttributes);
+
+} // end of anonymous namespace
+
+Inst::Inst(Cfg *Func, InstKind Kind, SizeT MaxSrcs, Variable *Dest)
+ : Kind(Kind), Number(Func->newInstNumber()), Deleted(false),
+ HasSideEffects(false), Dest(Dest), MaxSrcs(MaxSrcs), NumSrcs(0),
+ Srcs(Func->allocateArrayOf<Operand *>(MaxSrcs)) {}
+
+void Inst::updateVars(CfgNode *Node) {
+ if (Dest)
+ Dest->setDefinition(this, Node);
+
+ SizeT VarIndex = 0;
+ for (SizeT I = 0; I < getSrcSize(); ++I) {
+ Operand *Src = getSrc(I);
+ SizeT NumVars = Src->getNumVars();
+ for (SizeT J = 0; J < NumVars; ++J, ++VarIndex) {
+ Variable *Var = Src->getVar(J);
+ Var->setUse(this, Node);
+ }
+ }
+}
+
+InstAlloca::InstAlloca(Cfg *Func, Operand *ByteCount, uint32_t AlignInBytes,
+ Variable *Dest)
+ : Inst(Func, Inst::Alloca, 1, Dest), AlignInBytes(AlignInBytes) {
+ // Verify AlignInBytes is 0 or a power of 2.
+ assert(AlignInBytes == 0 || llvm::isPowerOf2_32(AlignInBytes));
+ addSource(ByteCount);
+}
+
+InstArithmetic::InstArithmetic(Cfg *Func, OpKind Op, Variable *Dest,
+ Operand *Source1, Operand *Source2)
+ : Inst(Func, Inst::Arithmetic, 2, Dest), Op(Op) {
+ addSource(Source1);
+ addSource(Source2);
+}
+
+bool InstArithmetic::isCommutative() const {
+ return InstArithmeticAttributes[getOp()].IsCommutative;
+}
+
+InstAssign::InstAssign(Cfg *Func, Variable *Dest, Operand *Source)
+ : Inst(Func, Inst::Assign, 1, Dest) {
+ addSource(Source);
+}
+
+// If TargetTrue==TargetFalse, we turn it into an unconditional
+// branch. This ensures that, along with the 'switch' instruction
+// semantics, there is at most one edge from one node to another.
+InstBr::InstBr(Cfg *Func, Operand *Source, CfgNode *TargetTrue,
+ CfgNode *TargetFalse)
+ : Inst(Func, Inst::Br, 1, NULL), TargetFalse(TargetFalse),
+ TargetTrue(TargetTrue) {
+ if (TargetTrue == TargetFalse) {
+ TargetTrue = NULL; // turn into unconditional version
+ } else {
+ addSource(Source);
+ }
+}
+
+InstBr::InstBr(Cfg *Func, CfgNode *Target)
+ : Inst(Func, Inst::Br, 0, NULL), TargetFalse(Target), TargetTrue(NULL) {}
+
+NodeList InstBr::getTerminatorEdges() const {
+ NodeList OutEdges;
+ OutEdges.push_back(TargetFalse);
+ if (TargetTrue)
+ OutEdges.push_back(TargetTrue);
+ return OutEdges;
+}
+
+InstCast::InstCast(Cfg *Func, OpKind CastKind, Variable *Dest, Operand *Source)
+ : Inst(Func, Inst::Cast, 1, Dest), CastKind(CastKind) {
+ addSource(Source);
+}
+
+InstFcmp::InstFcmp(Cfg *Func, FCond Condition, Variable *Dest, Operand *Source1,
+ Operand *Source2)
+ : Inst(Func, Inst::Fcmp, 2, Dest), Condition(Condition) {
+ addSource(Source1);
+ addSource(Source2);
+}
+
+InstIcmp::InstIcmp(Cfg *Func, ICond Condition, Variable *Dest, Operand *Source1,
+ Operand *Source2)
+ : Inst(Func, Inst::Icmp, 2, Dest), Condition(Condition) {
+ addSource(Source1);
+ addSource(Source2);
+}
+
+InstLoad::InstLoad(Cfg *Func, Variable *Dest, Operand *SourceAddr)
+ : Inst(Func, Inst::Load, 1, Dest) {
+ addSource(SourceAddr);
+}
+
+InstPhi::InstPhi(Cfg *Func, SizeT MaxSrcs, Variable *Dest)
+ : Inst(Func, Phi, MaxSrcs, Dest) {
+ Labels = Func->allocateArrayOf<CfgNode *>(MaxSrcs);
+}
+
+// TODO: A Switch instruction (and maybe others) can add duplicate
+// edges. We may want to de-dup Phis and validate consistency (i.e.,
+// the source operands are the same for duplicate edges), though it
+// seems the current lowering code is OK with this situation.
+void InstPhi::addArgument(Operand *Source, CfgNode *Label) {
+ Labels[getSrcSize()] = Label;
+ addSource(Source);
+}
+
+InstRet::InstRet(Cfg *Func, Operand *RetValue)
+ : Inst(Func, Ret, RetValue ? 1 : 0, NULL) {
+ if (RetValue)
+ addSource(RetValue);
+}
+
+InstSelect::InstSelect(Cfg *Func, Variable *Dest, Operand *Condition,
+ Operand *SourceTrue, Operand *SourceFalse)
+ : Inst(Func, Inst::Select, 3, Dest) {
+ assert(Condition->getType() == IceType_i1);
+ addSource(Condition);
+ addSource(SourceTrue);
+ addSource(SourceFalse);
+}
+
+InstStore::InstStore(Cfg *Func, Operand *Data, Operand *Addr)
+ : Inst(Func, Inst::Store, 2, NULL) {
+ addSource(Data);
+ addSource(Addr);
+}
+
+InstSwitch::InstSwitch(Cfg *Func, SizeT NumCases, Operand *Source,
+ CfgNode *LabelDefault)
+ : Inst(Func, Inst::Switch, 1, NULL), LabelDefault(LabelDefault),
+ NumCases(NumCases) {
+ addSource(Source);
+ Values = Func->allocateArrayOf<uint64_t>(NumCases);
+ Labels = Func->allocateArrayOf<CfgNode *>(NumCases);
+ // Initialize in case buggy code doesn't set all entries
+ for (SizeT I = 0; I < NumCases; ++I) {
+ Values[I] = 0;
+ Labels[I] = NULL;
+ }
+}
+
+void InstSwitch::addBranch(SizeT CaseIndex, uint64_t Value, CfgNode *Label) {
+ assert(CaseIndex < NumCases);
+ Values[CaseIndex] = Value;
+ Labels[CaseIndex] = Label;
+}
+
+NodeList InstSwitch::getTerminatorEdges() const {
+ NodeList OutEdges;
+ OutEdges.push_back(LabelDefault);
+ for (SizeT I = 0; I < NumCases; ++I) {
+ OutEdges.push_back(Labels[I]);
+ }
+ return OutEdges;
+}
+
+InstUnreachable::InstUnreachable(Cfg *Func)
+ : Inst(Func, Inst::Unreachable, 0, NULL) {}
+
+// ======================== Dump routines ======================== //
+
+void Inst::dumpDecorated(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ if (!Func->getContext()->isVerbose(IceV_Deleted) && isDeleted())
+ return;
+ if (Func->getContext()->isVerbose(IceV_InstNumbers)) {
+ char buf[30];
+ int32_t Number = getNumber();
+ if (Number < 0)
+ snprintf(buf, llvm::array_lengthof(buf), "[XXX]");
+ else
+ snprintf(buf, llvm::array_lengthof(buf), "[%3d]", Number);
+ Str << buf;
+ }
+ Str << " ";
+ if (isDeleted())
+ Str << " //";
+ dump(Func);
+ Str << "\n";
+}
+
+void Inst::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " =~ ";
+ dumpSources(Func);
+}
+
+void Inst::dumpSources(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ for (SizeT I = 0; I < getSrcSize(); ++I) {
+ if (I > 0)
+ Str << ", ";
+ getSrc(I)->dump(Func);
+ }
+}
+
+void Inst::dumpDest(const Cfg *Func) const {
+ if (getDest())
+ getDest()->dump(Func);
+}
+
+void InstAlloca::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = alloca i8, i32 ";
+ getSizeInBytes()->dump(Func);
+ Str << ", align " << getAlignInBytes();
+}
+
+void InstArithmetic::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = " << InstArithmeticAttributes[getOp()].DisplayString << " "
+ << getDest()->getType() << " ";
+ dumpSources(Func);
+}
+
+void InstAssign::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = " << getDest()->getType() << " ";
+ dumpSources(Func);
+}
+
+void InstBr::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << "br ";
+ if (!isUnconditional()) {
+ Str << "i1 ";
+ getCondition()->dump(Func);
+ Str << ", label %" << getTargetTrue()->getName() << ", ";
+ }
+ Str << "label %" << getTargetFalse()->getName();
+}
+
+void InstCall::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ if (getDest()) {
+ dumpDest(Func);
+ Str << " = ";
+ }
+ Str << "call ";
+ if (getDest())
+ Str << getDest()->getType();
+ else
+ Str << "void";
+ Str << " ";
+ getCallTarget()->dump(Func);
+ Str << "(";
+ for (SizeT I = 0; I < getNumArgs(); ++I) {
+ if (I > 0)
+ Str << ", ";
+ Str << getArg(I)->getType() << " ";
+ getArg(I)->dump(Func);
+ }
+ Str << ")";
+}
+
+void InstCast::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = " << InstCastAttributes[getCastKind()].DisplayString << " "
+ << getSrc(0)->getType() << " ";
+ dumpSources(Func);
+ Str << " to " << getDest()->getType();
+}
+
+void InstIcmp::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = icmp " << InstIcmpAttributes[getCondition()].DisplayString << " "
+ << getSrc(0)->getType() << " ";
+ dumpSources(Func);
+}
+
+void InstFcmp::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = fcmp " << InstFcmpAttributes[getCondition()].DisplayString << " "
+ << getSrc(0)->getType() << " ";
+ dumpSources(Func);
+}
+
+void InstLoad::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Type Ty = getDest()->getType();
+ Str << " = load " << Ty << "* ";
+ dumpSources(Func);
+ Str << ", align " << typeAlignInBytes(Ty);
+}
+
+void InstStore::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ Type Ty = getData()->getType();
+ Str << "store " << Ty << " ";
+ getData()->dump(Func);
+ Str << ", " << Ty << "* ";
+ getAddr()->dump(Func);
+ Str << ", align " << typeAlignInBytes(Ty);
+}
+
+void InstSwitch::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ Type Ty = getComparison()->getType();
+ Str << "switch " << Ty << " ";
+ getSrc(0)->dump(Func);
+ Str << ", label %" << getLabelDefault()->getName() << " [\n";
+ for (SizeT I = 0; I < getNumCases(); ++I) {
+ Str << " " << Ty << " " << getValue(I) << ", label %"
+ << getLabel(I)->getName() << "\n";
+ }
+ Str << " ]";
+}
+
+void InstPhi::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Str << " = phi " << getDest()->getType() << " ";
+ for (SizeT I = 0; I < getSrcSize(); ++I) {
+ if (I > 0)
+ Str << ", ";
+ Str << "[ ";
+ getSrc(I)->dump(Func);
+ Str << ", %" << Labels[I]->getName() << " ]";
+ }
+}
+
+void InstRet::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ Type Ty = hasRetValue() ? getSrc(0)->getType() : IceType_void;
+ Str << "ret " << Ty;
+ if (hasRetValue()) {
+ Str << " ";
+ dumpSources(Func);
+ }
+}
+
+void InstSelect::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ dumpDest(Func);
+ Operand *Condition = getCondition();
+ Operand *TrueOp = getTrueOperand();
+ Operand *FalseOp = getFalseOperand();
+ Str << " = select " << Condition->getType() << " ";
+ Condition->dump(Func);
+ Str << ", " << TrueOp->getType() << " ";
+ TrueOp->dump(Func);
+ Str << ", " << FalseOp->getType() << " ";
+ FalseOp->dump(Func);
+}
+
+void InstUnreachable::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ Str << "unreachable";
+}
+
+} // end of namespace Ice
diff --git a/src/IceInst.def b/src/IceInst.def
new file mode 100644
index 0000000..60c613d
--- /dev/null
+++ b/src/IceInst.def
@@ -0,0 +1,89 @@
+//===- subzero/src/IceInst.def - X-macros for ICE instructions -*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines properties of ICE instructions in the form of
+// x-macros.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEINST_DEF
+#define SUBZERO_SRC_ICEINST_DEF
+
+#define ICEINSTARITHMETIC_TABLE \
+ /* enum value, printable string, commutative */ \
+ X(Add, "add", 1) \
+ X(Fadd, "fadd", 0) \
+ X(Sub, "sub", 0) \
+ X(Fsub, "fsub", 0) \
+ X(Mul, "mul", 1) \
+ X(Fmul, "fmul", 0) \
+ X(Udiv, "udiv", 0) \
+ X(Sdiv, "sdiv", 0) \
+ X(Fdiv, "fdiv", 0) \
+ X(Urem, "urem", 0) \
+ X(Srem, "srem", 0) \
+ X(Frem, "frem", 0) \
+ X(Shl, "shl", 0) \
+ X(Lshr, "lshr", 0) \
+ X(Ashr, "ashr", 0) \
+ X(And, "and", 1) \
+ X(Or, "or", 1) \
+ X(Xor, "xor", 1)
+//#define X(tag, str, commutative)
+
+#define ICEINSTCAST_TABLE \
+ /* enum value, printable string */ \
+ X(Trunc, "trunc") \
+ X(Zext, "zext") \
+ X(Sext, "sext") \
+ X(Fptrunc, "fptrunc") \
+ X(Fpext, "fpext") \
+ X(Fptoui, "fptoui") \
+ X(Fptosi, "fptosi") \
+ X(Uitofp, "uitofp") \
+ X(Sitofp, "sitofp") \
+ X(Bitcast, "bitcast")
+//#define X(tag, str)
+
+#define ICEINSTFCMP_TABLE \
+ /* enum value, printable string */ \
+ X(False, "false") \
+ X(Oeq, "oeq") \
+ X(Ogt, "ogt") \
+ X(Oge, "oge") \
+ X(Olt, "olt") \
+ X(Ole, "ole") \
+ X(One, "one") \
+ X(Ord, "ord") \
+ X(Ueq, "ueq") \
+ X(Ugt, "ugt") \
+ X(Uge, "uge") \
+ X(Ult, "ult") \
+ X(Ule, "ule") \
+ X(Une, "une") \
+ X(Uno, "uno") \
+ X(True, "true")
+//#define X(tag, str)
+
+#define ICEINSTICMP_TABLE \
+ /* enum value, printable string */ \
+ X(Eq, "eq") \
+ X(Ne, "ne") \
+ X(Ugt, "ugt") \
+ X(Uge, "uge") \
+ X(Ult, "ult") \
+ X(Ule, "ule") \
+ X(Sgt, "sgt") \
+ X(Sge, "sge") \
+ X(Slt, "slt") \
+ X(Sle, "sle")
+//#define X(tag, str)
+
+
+#endif // SUBZERO_SRC_ICEINST_DEF
diff --git a/src/IceInst.h b/src/IceInst.h
new file mode 100644
index 0000000..57f8b9e
--- /dev/null
+++ b/src/IceInst.h
@@ -0,0 +1,527 @@
+//===- subzero/src/IceInst.h - High-level instructions ----------*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Inst class and its target-independent
+// subclasses, which represent the high-level Vanilla ICE instructions
+// and map roughly 1:1 to LLVM instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEINST_H
+#define SUBZERO_SRC_ICEINST_H
+
+#include "IceDefs.h"
+#include "IceInst.def"
+#include "IceTypes.h"
+
+// TODO: The Cfg structure, and instructions in particular, need to be
+// validated for things like valid operand types, valid branch
+// targets, proper ordering of Phi and non-Phi instructions, etc.
+// Most of the validity checking will be done in the bitcode reader.
+// We need a list of everything that should be validated, and tests
+// for each.
+
+namespace Ice {
+
+class Inst {
+public:
+ enum InstKind {
+ // Arbitrary (alphabetical) order, except put Unreachable first.
+ Unreachable,
+ Alloca,
+ Arithmetic,
+ Assign, // not part of LLVM/PNaCl bitcode
+ Br,
+ Call,
+ Cast,
+ Fcmp,
+ Icmp,
+ Load,
+ Phi,
+ Ret,
+ Select,
+ Store,
+ Switch
+ };
+ InstKind getKind() const { return Kind; }
+
+ int32_t getNumber() const { return Number; }
+
+ bool isDeleted() const { return Deleted; }
+ void setDeleted() { Deleted = true; }
+
+ bool hasSideEffects() const { return HasSideEffects; }
+
+ Variable *getDest() const { return Dest; }
+
+ SizeT getSrcSize() const { return NumSrcs; }
+ Operand *getSrc(SizeT I) const {
+ assert(I < getSrcSize());
+ return Srcs[I];
+ }
+
+ // Returns a list of out-edges corresponding to a terminator
+ // instruction, which is the last instruction of the block.
+ virtual NodeList getTerminatorEdges() const {
+ // All valid terminator instructions override this method. For
+ // the default implementation, we assert in case some CfgNode
+ // is constructed without a terminator instruction at the end.
+ llvm_unreachable(
+ "getTerminatorEdges() called on a non-terminator instruction");
+ return NodeList();
+ }
+
+ // Updates the status of the Variables contained within the
+ // instruction. In particular, it marks where the Dest variable is
+ // first assigned, and it tracks whether variables are live across
+ // basic blocks, i.e. used in a different block from their definition.
+ void updateVars(CfgNode *Node);
+
+ virtual void dump(const Cfg *Func) const;
+ void dumpDecorated(const Cfg *Func) const;
+ void dumpSources(const Cfg *Func) const;
+ void dumpDest(const Cfg *Func) const;
+
+ virtual ~Inst() {}
+
+protected:
+ Inst(Cfg *Func, InstKind Kind, SizeT MaxSrcs, Variable *Dest);
+ void addSource(Operand *Src) {
+ assert(Src);
+ assert(NumSrcs < MaxSrcs);
+ Srcs[NumSrcs++] = Src;
+ }
+ // The destroy() method lets the instruction cleanly release any
+ // memory that was allocated via the Cfg's allocator.
+ virtual void destroy(Cfg *Func) { Func->deallocateArrayOf<Operand *>(Srcs); }
+
+ const InstKind Kind;
+ // Number is the instruction number for describing live ranges.
+ int32_t Number;
+ // Deleted means irrevocably deleted.
+ bool Deleted;
+ // HasSideEffects means the instruction is something like a function
+ // call or a volatile load that can't be removed even if its Dest
+ // variable is not live.
+ bool HasSideEffects;
+
+ Variable *Dest;
+ const SizeT MaxSrcs; // only used for assert
+ SizeT NumSrcs;
+ Operand **Srcs;
+
+private:
+ Inst(const Inst &) LLVM_DELETED_FUNCTION;
+ Inst &operator=(const Inst &) LLVM_DELETED_FUNCTION;
+};
+
+// Alloca instruction. This captures the size in bytes as getSrc(0),
+// and the required alignment in bytes. The alignment must be either
+// 0 (no alignment required) or a power of 2.
+class InstAlloca : public Inst {
+public:
+ static InstAlloca *create(Cfg *Func, Operand *ByteCount,
+ uint32_t AlignInBytes, Variable *Dest) {
+ return new (Func->allocateInst<InstAlloca>())
+ InstAlloca(Func, ByteCount, AlignInBytes, Dest);
+ }
+ uint32_t getAlignInBytes() const { return AlignInBytes; }
+ Operand *getSizeInBytes() const { return getSrc(0); }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Alloca; }
+
+private:
+ InstAlloca(Cfg *Func, Operand *ByteCount, uint32_t AlignInBytes,
+ Variable *Dest);
+ InstAlloca(const InstAlloca &) LLVM_DELETED_FUNCTION;
+ InstAlloca &operator=(const InstAlloca &) LLVM_DELETED_FUNCTION;
+ virtual ~InstAlloca() {}
+ const uint32_t AlignInBytes;
+};
+
+// Binary arithmetic instruction. The source operands are captured in
+// getSrc(0) and getSrc(1).
+class InstArithmetic : public Inst {
+public:
+ enum OpKind {
+#define X(tag, str, commutative) tag,
+ ICEINSTARITHMETIC_TABLE
+#undef X
+ };
+ static InstArithmetic *create(Cfg *Func, OpKind Op, Variable *Dest,
+ Operand *Source1, Operand *Source2) {
+ return new (Func->allocateInst<InstArithmetic>())
+ InstArithmetic(Func, Op, Dest, Source1, Source2);
+ }
+ OpKind getOp() const { return Op; }
+ bool isCommutative() const;
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) {
+ return Inst->getKind() == Arithmetic;
+ }
+
+private:
+ InstArithmetic(Cfg *Func, OpKind Op, Variable *Dest, Operand *Source1,
+ Operand *Source2);
+ InstArithmetic(const InstArithmetic &) LLVM_DELETED_FUNCTION;
+ InstArithmetic &operator=(const InstArithmetic &) LLVM_DELETED_FUNCTION;
+ virtual ~InstArithmetic() {}
+
+ const OpKind Op;
+};
+
+// Assignment instruction. The source operand is captured in
+// getSrc(0). This is not part of the LLVM bitcode, but is a useful
+// abstraction for some of the lowering. E.g., if Phi instruction
+// lowering happens before target lowering, or for representing an
+// Inttoptr instruction, or as an intermediate step for lowering a
+// Load instruction.
+class InstAssign : public Inst {
+public:
+ static InstAssign *create(Cfg *Func, Variable *Dest, Operand *Source) {
+ return new (Func->allocateInst<InstAssign>())
+ InstAssign(Func, Dest, Source);
+ }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Assign; }
+
+private:
+ InstAssign(Cfg *Func, Variable *Dest, Operand *Source);
+ InstAssign(const InstAssign &) LLVM_DELETED_FUNCTION;
+ InstAssign &operator=(const InstAssign &) LLVM_DELETED_FUNCTION;
+ virtual ~InstAssign() {}
+};
+
+// Branch instruction. This represents both conditional and
+// unconditional branches.
+class InstBr : public Inst {
+public:
+ // Create a conditional branch. If TargetTrue==TargetFalse, it is
+ // optimized to an unconditional branch.
+ static InstBr *create(Cfg *Func, Operand *Source, CfgNode *TargetTrue,
+ CfgNode *TargetFalse) {
+ return new (Func->allocateInst<InstBr>())
+ InstBr(Func, Source, TargetTrue, TargetFalse);
+ }
+ // Create an unconditional branch.
+ static InstBr *create(Cfg *Func, CfgNode *Target) {
+ return new (Func->allocateInst<InstBr>()) InstBr(Func, Target);
+ }
+ bool isUnconditional() const { return getTargetTrue() == NULL; }
+ Operand *getCondition() const {
+ assert(!isUnconditional());
+ return getSrc(0);
+ }
+ CfgNode *getTargetTrue() const { return TargetTrue; }
+ CfgNode *getTargetFalse() const { return TargetFalse; }
+ CfgNode *getTargetUnconditional() const {
+ assert(isUnconditional());
+ return getTargetFalse();
+ }
+ virtual NodeList getTerminatorEdges() const;
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Br; }
+
+private:
+ // Conditional branch
+ InstBr(Cfg *Func, Operand *Source, CfgNode *TargetTrue, CfgNode *TargetFalse);
+ // Unconditional branch
+ InstBr(Cfg *Func, CfgNode *Target);
+ InstBr(const InstBr &) LLVM_DELETED_FUNCTION;
+ InstBr &operator=(const InstBr &) LLVM_DELETED_FUNCTION;
+ virtual ~InstBr() {}
+
+ CfgNode *const TargetFalse; // Doubles as unconditional branch target
+ CfgNode *const TargetTrue; // NULL if unconditional branch
+};
+
+// Call instruction. The call target is captured as getSrc(0), and
+// arg I is captured as getSrc(I+1).
+class InstCall : public Inst {
+public:
+ static InstCall *create(Cfg *Func, SizeT NumArgs, Variable *Dest,
+ Operand *CallTarget) {
+ return new (Func->allocateInst<InstCall>())
+ InstCall(Func, NumArgs, Dest, CallTarget);
+ }
+ void addArg(Operand *Arg) { addSource(Arg); }
+ Operand *getCallTarget() const { return getSrc(0); }
+ Operand *getArg(SizeT I) const { return getSrc(I + 1); }
+ SizeT getNumArgs() const { return getSrcSize() - 1; }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Call; }
+
+private:
+ InstCall(Cfg *Func, SizeT NumArgs, Variable *Dest, Operand *CallTarget)
+ : Inst(Func, Inst::Call, NumArgs + 1, Dest) {
+ // Set HasSideEffects so that the call instruction can't be
+ // dead-code eliminated. Don't set this for a deletable intrinsic
+ // call.
+ HasSideEffects = true;
+ addSource(CallTarget);
+ }
+ InstCall(const InstCall &) LLVM_DELETED_FUNCTION;
+ InstCall &operator=(const InstCall &) LLVM_DELETED_FUNCTION;
+ virtual ~InstCall() {}
+};
+
+// Cast instruction (a.k.a. conversion operation).
+class InstCast : public Inst {
+public:
+ enum OpKind {
+#define X(tag, str) tag,
+ ICEINSTCAST_TABLE
+#undef X
+ };
+ static InstCast *create(Cfg *Func, OpKind CastKind, Variable *Dest,
+ Operand *Source) {
+ return new (Func->allocateInst<InstCast>())
+ InstCast(Func, CastKind, Dest, Source);
+ }
+ OpKind getCastKind() const { return CastKind; }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Cast; }
+
+private:
+ InstCast(Cfg *Func, OpKind CastKind, Variable *Dest, Operand *Source);
+ InstCast(const InstCast &) LLVM_DELETED_FUNCTION;
+ InstCast &operator=(const InstCast &) LLVM_DELETED_FUNCTION;
+ virtual ~InstCast() {}
+ const OpKind CastKind;
+};
+
+// Floating-point comparison instruction. The source operands are
+// captured in getSrc(0) and getSrc(1).
+class InstFcmp : public Inst {
+public:
+ enum FCond {
+#define X(tag, str) tag,
+ ICEINSTFCMP_TABLE
+#undef X
+ };
+ static InstFcmp *create(Cfg *Func, FCond Condition, Variable *Dest,
+ Operand *Source1, Operand *Source2) {
+ return new (Func->allocateInst<InstFcmp>())
+ InstFcmp(Func, Condition, Dest, Source1, Source2);
+ }
+ FCond getCondition() const { return Condition; }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Fcmp; }
+
+private:
+ InstFcmp(Cfg *Func, FCond Condition, Variable *Dest, Operand *Source1,
+ Operand *Source2);
+ InstFcmp(const InstFcmp &) LLVM_DELETED_FUNCTION;
+ InstFcmp &operator=(const InstFcmp &) LLVM_DELETED_FUNCTION;
+ virtual ~InstFcmp() {}
+ const FCond Condition;
+};
+
+// Integer comparison instruction. The source operands are captured
+// in getSrc(0) and getSrc(1).
+class InstIcmp : public Inst {
+public:
+ enum ICond {
+#define X(tag, str) tag,
+ ICEINSTICMP_TABLE
+#undef X
+ };
+ static InstIcmp *create(Cfg *Func, ICond Condition, Variable *Dest,
+ Operand *Source1, Operand *Source2) {
+ return new (Func->allocateInst<InstIcmp>())
+ InstIcmp(Func, Condition, Dest, Source1, Source2);
+ }
+ ICond getCondition() const { return Condition; }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Icmp; }
+
+private:
+ InstIcmp(Cfg *Func, ICond Condition, Variable *Dest, Operand *Source1,
+ Operand *Source2);
+ InstIcmp(const InstIcmp &) LLVM_DELETED_FUNCTION;
+ InstIcmp &operator=(const InstIcmp &) LLVM_DELETED_FUNCTION;
+ virtual ~InstIcmp() {}
+ const ICond Condition;
+};
+
+// Load instruction. The source address is captured in getSrc(0).
+class InstLoad : public Inst {
+public:
+ static InstLoad *create(Cfg *Func, Variable *Dest, Operand *SourceAddr) {
+ return new (Func->allocateInst<InstLoad>())
+ InstLoad(Func, Dest, SourceAddr);
+ }
+ Operand *getSourceAddress() const { return getSrc(0); }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Load; }
+
+private:
+ InstLoad(Cfg *Func, Variable *Dest, Operand *SourceAddr);
+ InstLoad(const InstLoad &) LLVM_DELETED_FUNCTION;
+ InstLoad &operator=(const InstLoad &) LLVM_DELETED_FUNCTION;
+ virtual ~InstLoad() {}
+};
+
+// Phi instruction. For incoming edge I, the node is Labels[I] and
+// the Phi source operand is getSrc(I).
+class InstPhi : public Inst {
+public:
+ static InstPhi *create(Cfg *Func, SizeT MaxSrcs, Variable *Dest) {
+ return new (Func->allocateInst<InstPhi>()) InstPhi(Func, MaxSrcs, Dest);
+ }
+ void addArgument(Operand *Source, CfgNode *Label);
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Phi; }
+
+private:
+ InstPhi(Cfg *Func, SizeT MaxSrcs, Variable *Dest);
+ InstPhi(const InstPhi &) LLVM_DELETED_FUNCTION;
+ InstPhi &operator=(const InstPhi &) LLVM_DELETED_FUNCTION;
+ virtual void destroy(Cfg *Func) {
+ Func->deallocateArrayOf<CfgNode *>(Labels);
+ Inst::destroy(Func);
+ }
+ virtual ~InstPhi() {}
+
+ // Labels[] duplicates the InEdges[] information in the enclosing
+ // CfgNode, but the Phi instruction is created before InEdges[]
+ // is available, so it's more complicated to share the list.
+ CfgNode **Labels;
+};
+
+// Ret instruction. The return value is captured in getSrc(0), but if
+// there is no return value (void-type function), then
+// getSrcSize()==0 and hasRetValue()==false.
+class InstRet : public Inst {
+public:
+ static InstRet *create(Cfg *Func, Operand *RetValue = NULL) {
+ return new (Func->allocateInst<InstRet>()) InstRet(Func, RetValue);
+ }
+ bool hasRetValue() const { return getSrcSize(); }
+ Operand *getRetValue() const {
+ assert(hasRetValue());
+ return getSrc(0);
+ }
+ virtual NodeList getTerminatorEdges() const { return NodeList(); }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Ret; }
+
+private:
+ InstRet(Cfg *Func, Operand *RetValue);
+ InstRet(const InstRet &) LLVM_DELETED_FUNCTION;
+ InstRet &operator=(const InstRet &) LLVM_DELETED_FUNCTION;
+ virtual ~InstRet() {}
+};
+
+// Select instruction. The condition, true, and false operands are captured.
+class InstSelect : public Inst {
+public:
+ static InstSelect *create(Cfg *Func, Variable *Dest, Operand *Condition,
+ Operand *SourceTrue, Operand *SourceFalse) {
+ return new (Func->allocateInst<InstSelect>())
+ InstSelect(Func, Dest, Condition, SourceTrue, SourceFalse);
+ }
+ Operand *getCondition() const { return getSrc(0); }
+ Operand *getTrueOperand() const { return getSrc(1); }
+ Operand *getFalseOperand() const { return getSrc(2); }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Select; }
+
+private:
+ InstSelect(Cfg *Func, Variable *Dest, Operand *Condition, Operand *Source1,
+ Operand *Source2);
+ InstSelect(const InstSelect &) LLVM_DELETED_FUNCTION;
+ InstSelect &operator=(const InstSelect &) LLVM_DELETED_FUNCTION;
+ virtual ~InstSelect() {}
+};
+
+// Store instruction. The address operand is captured, along with the
+// data operand to be stored into the address.
+class InstStore : public Inst {
+public:
+ static InstStore *create(Cfg *Func, Operand *Data, Operand *Addr) {
+ return new (Func->allocateInst<InstStore>()) InstStore(Func, Data, Addr);
+ }
+ Operand *getAddr() const { return getSrc(1); }
+ Operand *getData() const { return getSrc(0); }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Store; }
+
+private:
+ InstStore(Cfg *Func, Operand *Data, Operand *Addr);
+ InstStore(const InstStore &) LLVM_DELETED_FUNCTION;
+ InstStore &operator=(const InstStore &) LLVM_DELETED_FUNCTION;
+ virtual ~InstStore() {}
+};
+
+// Switch instruction. The single source operand is captured as
+// getSrc(0).
+class InstSwitch : public Inst {
+public:
+ static InstSwitch *create(Cfg *Func, SizeT NumCases, Operand *Source,
+ CfgNode *LabelDefault) {
+ return new (Func->allocateInst<InstSwitch>())
+ InstSwitch(Func, NumCases, Source, LabelDefault);
+ }
+ Operand *getComparison() const { return getSrc(0); }
+ CfgNode *getLabelDefault() const { return LabelDefault; }
+ SizeT getNumCases() const { return NumCases; }
+ uint64_t getValue(SizeT I) const {
+ assert(I < NumCases);
+ return Values[I];
+ }
+ CfgNode *getLabel(SizeT I) const {
+ assert(I < NumCases);
+ return Labels[I];
+ }
+ void addBranch(SizeT CaseIndex, uint64_t Value, CfgNode *Label);
+ virtual NodeList getTerminatorEdges() const;
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) { return Inst->getKind() == Switch; }
+
+private:
+ InstSwitch(Cfg *Func, SizeT NumCases, Operand *Source, CfgNode *LabelDefault);
+ InstSwitch(const InstSwitch &) LLVM_DELETED_FUNCTION;
+ InstSwitch &operator=(const InstSwitch &) LLVM_DELETED_FUNCTION;
+ virtual void destroy(Cfg *Func) {
+ Func->deallocateArrayOf<uint64_t>(Values);
+ Func->deallocateArrayOf<CfgNode *>(Labels);
+ Inst::destroy(Func);
+ }
+ virtual ~InstSwitch() {}
+
+ CfgNode *LabelDefault;
+ SizeT NumCases; // not including the default case
+ uint64_t *Values; // size is NumCases
+ CfgNode **Labels; // size is NumCases
+};
+
+// Unreachable instruction. This is a terminator instruction with no
+// operands.
+class InstUnreachable : public Inst {
+public:
+ static InstUnreachable *create(Cfg *Func) {
+ return new (Func->allocateInst<InstUnreachable>()) InstUnreachable(Func);
+ }
+ virtual NodeList getTerminatorEdges() const { return NodeList(); }
+ virtual void dump(const Cfg *Func) const;
+ static bool classof(const Inst *Inst) {
+ return Inst->getKind() == Unreachable;
+ }
+
+private:
+ InstUnreachable(Cfg *Func);
+ InstUnreachable(const InstUnreachable &) LLVM_DELETED_FUNCTION;
+ InstUnreachable &operator=(const InstUnreachable &) LLVM_DELETED_FUNCTION;
+ virtual ~InstUnreachable() {}
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEINST_H
diff --git a/src/IceOperand.cpp b/src/IceOperand.cpp
new file mode 100644
index 0000000..1009a33
--- /dev/null
+++ b/src/IceOperand.cpp
@@ -0,0 +1,91 @@
+//===- subzero/src/IceOperand.cpp - High-level operand implementation -----===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Operand class and its
+// target-independent subclasses, primarily for the methods of the
+// Variable class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceCfg.h"
+#include "IceInst.h"
+#include "IceOperand.h"
+
+namespace Ice {
+
+bool operator<(const RelocatableTuple &A, const RelocatableTuple &B) {
+ if (A.Offset != B.Offset)
+ return A.Offset < B.Offset;
+ if (A.SuppressMangling != B.SuppressMangling)
+ return A.SuppressMangling < B.SuppressMangling;
+ return A.Name < B.Name;
+}
+
+void Variable::setUse(const Inst *Inst, const CfgNode *Node) {
+ if (DefNode == NULL)
+ return;
+ if (llvm::isa<InstPhi>(Inst) || Node != DefNode)
+ DefNode = NULL;
+}
+
+void Variable::setDefinition(Inst *Inst, const CfgNode *Node) {
+ if (DefNode == NULL)
+ return;
+ // Can first check preexisting DefInst if we care about multi-def vars.
+ DefInst = Inst;
+ if (Node != DefNode)
+ DefNode = NULL;
+}
+
+void Variable::replaceDefinition(Inst *Inst, const CfgNode *Node) {
+ DefInst = NULL;
+ setDefinition(Inst, Node);
+}
+
+void Variable::setIsArg(Cfg *Func) {
+ IsArgument = true;
+ if (DefNode == NULL)
+ return;
+ CfgNode *Entry = Func->getEntryNode();
+ if (DefNode == Entry)
+ return;
+ DefNode = NULL;
+}
+
+IceString Variable::getName() const {
+ if (!Name.empty())
+ return Name;
+ char buf[30];
+ snprintf(buf, llvm::array_lengthof(buf), "__%u", getIndex());
+ return buf;
+}
+
+// ======================== dump routines ======================== //
+
+void Variable::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ const CfgNode *CurrentNode = Func->getCurrentNode();
+ (void)CurrentNode; // used only in assert()
+ assert(CurrentNode == NULL || DefNode == NULL || DefNode == CurrentNode);
+ Str << "%" << getName();
+}
+
+void Operand::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ Str << "Operand<?>";
+}
+
+void ConstantRelocatable::dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ Str << "@" << Name;
+ if (Offset)
+ Str << "+" << Offset;
+}
+
+} // end of namespace Ice
diff --git a/src/IceOperand.h b/src/IceOperand.h
new file mode 100644
index 0000000..fcad7b9
--- /dev/null
+++ b/src/IceOperand.h
@@ -0,0 +1,251 @@
+//===- subzero/src/IceOperand.h - High-level operands -----------*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Operand class and its target-independent
+// subclasses. The main classes are Variable, which represents an
+// LLVM variable that is either register- or stack-allocated, and the
+// Constant hierarchy, which represents integer, floating-point,
+// and/or symbolic constants.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICEOPERAND_H
+#define SUBZERO_SRC_ICEOPERAND_H
+
+#include "IceDefs.h"
+#include "IceTypes.h"
+
+namespace Ice {
+
+class Operand {
+public:
+ enum OperandKind {
+ kConst_Base,
+ kConstInteger,
+ kConstFloat,
+ kConstDouble,
+ kConstRelocatable,
+ kConst_Num,
+ kVariable,
+ // Target-specific operand classes use kTarget as the starting
+ // point for their Kind enum space.
+ kTarget
+ };
+ OperandKind getKind() const { return Kind; }
+ Type getType() const { return Ty; }
+
+ // Every Operand keeps an array of the Variables referenced in
+ // the operand. This is so that the liveness operations can get
+ // quick access to the variables of interest, without having to dig
+ // so far into the operand.
+ SizeT getNumVars() const { return NumVars; }
+ Variable *getVar(SizeT I) const {
+ assert(I < getNumVars());
+ return Vars[I];
+ }
+ virtual void dump(const Cfg *Func) const = 0;
+
+ // Query whether this object was allocated in isolation, or added to
+ // some higher-level pool. This determines whether a containing
+ // object's destructor should delete this object. Generally,
+ // constants are pooled globally, variables are pooled per-CFG, and
+ // target-specific operands are not pooled.
+ virtual bool isPooled() const { return false; }
+
+ virtual ~Operand() {}
+
+protected:
+ Operand(OperandKind Kind, Type Ty)
+ : Ty(Ty), Kind(Kind), NumVars(0), Vars(NULL) {}
+
+ const Type Ty;
+ const OperandKind Kind;
+ // Vars and NumVars are initialized by the derived class.
+ SizeT NumVars;
+ Variable **Vars;
+
+private:
+ Operand(const Operand &) LLVM_DELETED_FUNCTION;
+ Operand &operator=(const Operand &) LLVM_DELETED_FUNCTION;
+};
+
+// Constant is the abstract base class for constants. All
+// constants are allocated from a global arena and are pooled.
+class Constant : public Operand {
+public:
+ virtual void dump(const Cfg *Func) const = 0;
+
+ static bool classof(const Operand *Operand) {
+ OperandKind Kind = Operand->getKind();
+ return Kind >= kConst_Base && Kind <= kConst_Num;
+ }
+
+protected:
+ Constant(OperandKind Kind, Type Ty) : Operand(Kind, Ty) {
+ Vars = NULL;
+ NumVars = 0;
+ }
+ virtual ~Constant() {}
+
+private:
+ Constant(const Constant &) LLVM_DELETED_FUNCTION;
+ Constant &operator=(const Constant &) LLVM_DELETED_FUNCTION;
+};
+
+// ConstantPrimitive<> wraps a primitive type.
+template <typename T, Operand::OperandKind K>
+class ConstantPrimitive : public Constant {
+public:
+ static ConstantPrimitive *create(GlobalContext *Ctx, Type Ty, T Value) {
+ return new (Ctx->allocate<ConstantPrimitive>())
+ ConstantPrimitive(Ty, Value);
+ }
+ T getValue() const { return Value; }
+ virtual void dump(const Cfg *Func) const {
+ Ostream &Str = Func->getContext()->getStrDump();
+ Str << getValue();
+ }
+
+ static bool classof(const Operand *Operand) {
+ return Operand->getKind() == K;
+ }
+
+private:
+ ConstantPrimitive(Type Ty, T Value) : Constant(K, Ty), Value(Value) {}
+ ConstantPrimitive(const ConstantPrimitive &) LLVM_DELETED_FUNCTION;
+ ConstantPrimitive &operator=(const ConstantPrimitive &) LLVM_DELETED_FUNCTION;
+ virtual ~ConstantPrimitive() {}
+ const T Value;
+};
+
+typedef ConstantPrimitive<uint64_t, Operand::kConstInteger> ConstantInteger;
+typedef ConstantPrimitive<float, Operand::kConstFloat> ConstantFloat;
+typedef ConstantPrimitive<double, Operand::kConstDouble> ConstantDouble;
+
+// RelocatableTuple bundles the parameters that are used to
+// construct an ConstantRelocatable. It is done this way so that
+// ConstantRelocatable can fit into the global constant pool
+// template mechanism.
+class RelocatableTuple {
+ RelocatableTuple &operator=(const RelocatableTuple &) LLVM_DELETED_FUNCTION;
+
+public:
+ RelocatableTuple(const int64_t Offset, const IceString &Name,
+ bool SuppressMangling)
+ : Offset(Offset), Name(Name), SuppressMangling(SuppressMangling) {}
+ RelocatableTuple(const RelocatableTuple &Other)
+ : Offset(Other.Offset), Name(Other.Name),
+ SuppressMangling(Other.SuppressMangling) {}
+
+ const int64_t Offset;
+ const IceString Name;
+ bool SuppressMangling;
+};
+
+bool operator<(const RelocatableTuple &A, const RelocatableTuple &B);
+
+// ConstantRelocatable represents a symbolic constant combined with
+// a fixed offset.
+class ConstantRelocatable : public Constant {
+public:
+ static ConstantRelocatable *create(GlobalContext *Ctx, Type Ty,
+ const RelocatableTuple &Tuple) {
+ return new (Ctx->allocate<ConstantRelocatable>()) ConstantRelocatable(
+ Ty, Tuple.Offset, Tuple.Name, Tuple.SuppressMangling);
+ }
+ int64_t getOffset() const { return Offset; }
+ IceString getName() const { return Name; }
+ void setSuppressMangling(bool Value) { SuppressMangling = Value; }
+ bool getSuppressMangling() const { return SuppressMangling; }
+ virtual void dump(const Cfg *Func) const;
+
+ static bool classof(const Operand *Operand) {
+ OperandKind Kind = Operand->getKind();
+ return Kind == kConstRelocatable;
+ }
+
+private:
+ ConstantRelocatable(Type Ty, int64_t Offset, const IceString &Name,
+ bool SuppressMangling)
+ : Constant(kConstRelocatable, Ty), Offset(Offset), Name(Name),
+ SuppressMangling(SuppressMangling) {}
+ ConstantRelocatable(const ConstantRelocatable &) LLVM_DELETED_FUNCTION;
+ ConstantRelocatable &
+ operator=(const ConstantRelocatable &) LLVM_DELETED_FUNCTION;
+ virtual ~ConstantRelocatable() {}
+ const int64_t Offset; // fixed offset to add
+ const IceString Name; // optional for debug/dump
+ bool SuppressMangling;
+};
+
+// Variable represents an operand that is register-allocated or
+// stack-allocated. If it is register-allocated, it will ultimately
+// have a non-negative RegNum field.
+class Variable : public Operand {
+public:
+ static Variable *create(Cfg *Func, Type Ty, const CfgNode *Node, SizeT Index,
+ const IceString &Name) {
+ return new (Func->allocate<Variable>()) Variable(Ty, Node, Index, Name);
+ }
+
+ SizeT getIndex() const { return Number; }
+ IceString getName() const;
+
+ Inst *getDefinition() const { return DefInst; }
+ void setDefinition(Inst *Inst, const CfgNode *Node);
+ void replaceDefinition(Inst *Inst, const CfgNode *Node);
+
+ const CfgNode *getLocalUseNode() const { return DefNode; }
+ bool isMultiblockLife() const { return (DefNode == NULL); }
+ void setUse(const Inst *Inst, const CfgNode *Node);
+
+ bool getIsArg() const { return IsArgument; }
+ void setIsArg(Cfg *Func);
+
+ virtual void dump(const Cfg *Func) const;
+
+ static bool classof(const Operand *Operand) {
+ return Operand->getKind() == kVariable;
+ }
+
+private:
+ Variable(Type Ty, const CfgNode *Node, SizeT Index, const IceString &Name)
+ : Operand(kVariable, Ty), Number(Index), Name(Name), DefInst(NULL),
+ DefNode(Node), IsArgument(false) {
+ Vars = VarsReal;
+ Vars[0] = this;
+ NumVars = 1;
+ }
+ Variable(const Variable &) LLVM_DELETED_FUNCTION;
+ Variable &operator=(const Variable &) LLVM_DELETED_FUNCTION;
+ virtual ~Variable() {}
+ // Number is unique across all variables, and is used as a
+ // (bit)vector index for liveness analysis.
+ const SizeT Number;
+ // Name is optional.
+ const IceString Name;
+ // DefInst is the instruction that produces this variable as its
+ // dest.
+ Inst *DefInst;
+ // DefNode is the node where this variable was produced, and is
+ // reset to NULL if it is used outside that node. This is used for
+ // detecting isMultiblockLife(). TODO: Collapse this to a single
+ // bit and use a separate pass to calculate the values across the
+ // Cfg. This saves space in the Variable, and removes the fragility
+ // of incrementally computing and maintaining the information.
+ const CfgNode *DefNode;
+ bool IsArgument;
+ // VarsReal (and Operand::Vars) are set up such that Vars[0] ==
+ // this.
+ Variable *VarsReal[1];
+};
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICEOPERAND_H
diff --git a/src/IceTypes.cpp b/src/IceTypes.cpp
new file mode 100644
index 0000000..b54c0d7
--- /dev/null
+++ b/src/IceTypes.cpp
@@ -0,0 +1,74 @@
+//===- subzero/src/IceTypes.cpp - Primitive type properties ---------------===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a few attributes of Subzero primitive types.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceDefs.h"
+#include "IceTypes.h"
+
+namespace Ice {
+
+namespace {
+
+const struct {
+ size_t TypeWidthInBytes;
+ size_t TypeAlignInBytes;
+ const char *DisplayString;
+} TypeAttributes[] = {
+#define X(tag, size, align, str) \
+ { size, align, str } \
+ ,
+ ICETYPE_TABLE
+#undef X
+ };
+
+const size_t TypeAttributesSize =
+ sizeof(TypeAttributes) / sizeof(*TypeAttributes);
+
+} // end anonymous namespace
+
+size_t typeWidthInBytes(Type Ty) {
+ size_t Width = 0;
+ size_t Index = static_cast<size_t>(Ty);
+ if (Index < TypeAttributesSize) {
+ Width = TypeAttributes[Index].TypeWidthInBytes;
+ } else {
+ assert(0 && "Invalid type for typeWidthInBytes()");
+ }
+ return Width;
+}
+
+size_t typeAlignInBytes(Type Ty) {
+ size_t Align = 0;
+ size_t Index = static_cast<size_t>(Ty);
+ if (Index < TypeAttributesSize) {
+ Align = TypeAttributes[Index].TypeAlignInBytes;
+ } else {
+ assert(0 && "Invalid type for typeAlignInBytes()");
+ }
+ return Align;
+}
+
+// ======================== Dump routines ======================== //
+
+template <> Ostream &operator<<(Ostream &Str, const Type &Ty) {
+ size_t Index = static_cast<size_t>(Ty);
+ if (Index < TypeAttributesSize) {
+ Str << TypeAttributes[Index].DisplayString;
+ } else {
+ Str << "???";
+ assert(0 && "Invalid type for printing");
+ }
+
+ return Str;
+}
+
+} // end of namespace Ice
diff --git a/src/IceTypes.def b/src/IceTypes.def
new file mode 100644
index 0000000..a54ab65
--- /dev/null
+++ b/src/IceTypes.def
@@ -0,0 +1,31 @@
+//===- subzero/src/IceTypes.def - X-macros for ICE types --------*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines properties of ICE primitive types in the form of
+// x-macros.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICETYPES_DEF
+#define SUBZERO_SRC_ICETYPES_DEF
+
+#define ICETYPE_TABLE \
+ /* enum value, size, align, printable string */ \
+ /* (size and alignment in bytes) */ \
+ X(IceType_void, 0, 0, "void") \
+ X(IceType_i1, 1, 1, "i1") \
+ X(IceType_i8, 1, 1, "i8") \
+ X(IceType_i16, 2, 1, "i16") \
+ X(IceType_i32, 4, 1, "i32") \
+ X(IceType_i64, 8, 1, "i64") \
+ X(IceType_f32, 4, 4, "float") \
+ X(IceType_f64, 8, 8, "double")
+//#define X(tag, size, align, str)
+
+#endif // SUBZERO_SRC_ICETYPES_DEF
diff --git a/src/IceTypes.h b/src/IceTypes.h
new file mode 100644
index 0000000..b3d28c3
--- /dev/null
+++ b/src/IceTypes.h
@@ -0,0 +1,36 @@
+//===- subzero/src/IceTypes.h - Primitive ICE types -------------*- C++ -*-===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a few properties of the primitive types allowed
+// in Subzero. Every Subzero source file is expected to include
+// IceTypes.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SUBZERO_SRC_ICETYPES_H
+#define SUBZERO_SRC_ICETYPES_H
+
+#include "IceTypes.def"
+
+namespace Ice {
+
+enum Type {
+#define X(tag, size, align, str) tag,
+ ICETYPE_TABLE
+#undef X
+};
+
+size_t typeWidthInBytes(Type Ty);
+size_t typeAlignInBytes(Type Ty);
+
+template <> Ostream &operator<<(class Ostream &Str, const Type &Ty);
+
+} // end of namespace Ice
+
+#endif // SUBZERO_SRC_ICETYPES_H
diff --git a/src/llvm2ice.cpp b/src/llvm2ice.cpp
new file mode 100644
index 0000000..df9061b
--- /dev/null
+++ b/src/llvm2ice.cpp
@@ -0,0 +1,655 @@
+//===- subzero/src/llvm2ice.cpp - Driver for testing ----------------------===//
+//
+// The Subzero Code Generator
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a driver that uses LLVM capabilities to parse a
+// bitcode file and build the LLVM IR, and then convert the LLVM basic
+// blocks, instructions, and operands into their Subzero equivalents.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IceCfg.h"
+#include "IceCfgNode.h"
+#include "IceDefs.h"
+#include "IceGlobalContext.h"
+#include "IceInst.h"
+#include "IceOperand.h"
+#include "IceTypes.h"
+
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IRReader/IRReader.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_os_ostream.h"
+#include "llvm/Support/SourceMgr.h"
+
+#include <fstream>
+#include <iostream>
+
+using namespace llvm;
+
+// Debugging helper
+template <typename T> static std::string LLVMObjectAsString(const T *O) {
+ std::string Dump;
+ raw_string_ostream Stream(Dump);
+ O->print(Stream);
+ return Stream.str();
+}
+
+// Converter from LLVM to ICE. The entry point is the convertFunction method.
+//
+// Note: this currently assumes that the given IR was verified to be valid PNaCl
+// bitcode:
+// https://developers.google.com/native-client/dev/reference/pnacl-bitcode-abi
+// If not, all kinds of assertions may fire.
+//
+class LLVM2ICEConverter {
+public:
+ LLVM2ICEConverter(Ice::GlobalContext *Ctx)
+ : Ctx(Ctx), Func(NULL), CurrentNode(NULL) {
+ // All PNaCl pointer widths are 32 bits because of the sandbox
+ // model.
+ SubzeroPointerType = Ice::IceType_i32;
+ }
+
+ Ice::Cfg *convertFunction(const Function *F) {
+ VarMap.clear();
+ NodeMap.clear();
+ Func = new Ice::Cfg(Ctx);
+ Func->setFunctionName(F->getName());
+ Func->setReturnType(convertType(F->getReturnType()));
+ Func->setInternal(F->hasInternalLinkage());
+
+ // The initial definition/use of each arg is the entry node.
+ CurrentNode = mapBasicBlockToNode(&F->getEntryBlock());
+ for (Function::const_arg_iterator ArgI = F->arg_begin(),
+ ArgE = F->arg_end();
+ ArgI != ArgE; ++ArgI) {
+ Func->addArg(mapValueToIceVar(ArgI));
+ }
+
+ // Make an initial pass through the block list just to resolve the
+ // blocks in the original linearized order. Otherwise the ICE
+ // linearized order will be affected by branch targets in
+ // terminator instructions.
+ for (Function::const_iterator BBI = F->begin(), BBE = F->end(); BBI != BBE;
+ ++BBI) {
+ mapBasicBlockToNode(BBI);
+ }
+ for (Function::const_iterator BBI = F->begin(), BBE = F->end(); BBI != BBE;
+ ++BBI) {
+ CurrentNode = mapBasicBlockToNode(BBI);
+ convertBasicBlock(BBI);
+ }
+ Func->setEntryNode(mapBasicBlockToNode(&F->getEntryBlock()));
+ Func->computePredecessors();
+
+ return Func;
+ }
+
+private:
+ // LLVM values (instructions, etc.) are mapped directly to ICE variables.
+ // mapValueToIceVar has a version that forces an ICE type on the variable,
+ // and a version that just uses convertType on V.
+ Ice::Variable *mapValueToIceVar(const Value *V, Ice::Type IceTy) {
+ if (IceTy == Ice::IceType_void)
+ return NULL;
+ if (VarMap.find(V) == VarMap.end()) {
+ assert(CurrentNode);
+ VarMap[V] = Func->makeVariable(IceTy, CurrentNode, V->getName());
+ }
+ return VarMap[V];
+ }
+
+ Ice::Variable *mapValueToIceVar(const Value *V) {
+ return mapValueToIceVar(V, convertType(V->getType()));
+ }
+
+ Ice::CfgNode *mapBasicBlockToNode(const BasicBlock *BB) {
+ if (NodeMap.find(BB) == NodeMap.end()) {
+ NodeMap[BB] = Func->makeNode(BB->getName());
+ }
+ return NodeMap[BB];
+ }
+
+ Ice::Type convertIntegerType(const IntegerType *IntTy) const {
+ switch (IntTy->getBitWidth()) {
+ case 1:
+ return Ice::IceType_i1;
+ case 8:
+ return Ice::IceType_i8;
+ case 16:
+ return Ice::IceType_i16;
+ case 32:
+ return Ice::IceType_i32;
+ case 64:
+ return Ice::IceType_i64;
+ default:
+ report_fatal_error(std::string("Invalid PNaCl int type: ") +
+ LLVMObjectAsString(IntTy));
+ return Ice::IceType_void;
+ }
+ }
+
+ Ice::Type convertType(const Type *Ty) const {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID:
+ return Ice::IceType_void;
+ case Type::IntegerTyID:
+ return convertIntegerType(cast<IntegerType>(Ty));
+ case Type::FloatTyID:
+ return Ice::IceType_f32;
+ case Type::DoubleTyID:
+ return Ice::IceType_f64;
+ case Type::PointerTyID:
+ return SubzeroPointerType;
+ case Type::FunctionTyID:
+ return SubzeroPointerType;
+ default:
+ report_fatal_error(std::string("Invalid PNaCl type: ") +
+ LLVMObjectAsString(Ty));
+ }
+
+ llvm_unreachable("convertType");
+ return Ice::IceType_void;
+ }
+
+ // Given a LLVM instruction and an operand number, produce the Operand this
+ // refers to. If there's no such operand, return NULL.
+ Ice::Operand *convertOperand(const Instruction *Inst, unsigned OpNum) {
+ if (OpNum >= Inst->getNumOperands()) {
+ return NULL;
+ }
+ const Value *Op = Inst->getOperand(OpNum);
+ return convertValue(Op);
+ }
+
+ Ice::Operand *convertValue(const Value *Op) {
+ if (const Constant *Const = dyn_cast<Constant>(Op)) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(Const)) {
+ return Ctx->getConstantSym(convertType(GV->getType()), 0,
+ GV->getName());
+ } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(Const)) {
+ return Ctx->getConstantInt(convertIntegerType(CI->getType()),
+ CI->getZExtValue());
+ } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Const)) {
+ Ice::Type Type = convertType(CFP->getType());
+ if (Type == Ice::IceType_f32)
+ return Ctx->getConstantFloat(CFP->getValueAPF().convertToFloat());
+ else if (Type == Ice::IceType_f64)
+ return Ctx->getConstantDouble(CFP->getValueAPF().convertToDouble());
+ assert(0 && "Unexpected floating point type");
+ return NULL;
+ } else {
+ assert(0 && "Unhandled constant type");
+ return NULL;
+ }
+ } else {
+ return mapValueToIceVar(Op);
+ }
+ }
+
+ // Note: this currently assumes a 1x1 mapping between LLVM IR and Ice
+ // instructions.
+ Ice::Inst *convertInstruction(const Instruction *Inst) {
+ switch (Inst->getOpcode()) {
+ case Instruction::PHI:
+ return convertPHINodeInstruction(cast<PHINode>(Inst));
+ case Instruction::Br:
+ return convertBrInstruction(cast<BranchInst>(Inst));
+ case Instruction::Ret:
+ return convertRetInstruction(cast<ReturnInst>(Inst));
+ case Instruction::IntToPtr:
+ return convertIntToPtrInstruction(cast<IntToPtrInst>(Inst));
+ case Instruction::PtrToInt:
+ return convertPtrToIntInstruction(cast<PtrToIntInst>(Inst));
+ case Instruction::ICmp:
+ return convertICmpInstruction(cast<ICmpInst>(Inst));
+ case Instruction::FCmp:
+ return convertFCmpInstruction(cast<FCmpInst>(Inst));
+ case Instruction::Select:
+ return convertSelectInstruction(cast<SelectInst>(Inst));
+ case Instruction::Switch:
+ return convertSwitchInstruction(cast<SwitchInst>(Inst));
+ case Instruction::Load:
+ return convertLoadInstruction(cast<LoadInst>(Inst));
+ case Instruction::Store:
+ return convertStoreInstruction(cast<StoreInst>(Inst));
+ case Instruction::ZExt:
+ return convertCastInstruction(cast<ZExtInst>(Inst), Ice::InstCast::Zext);
+ case Instruction::SExt:
+ return convertCastInstruction(cast<SExtInst>(Inst), Ice::InstCast::Sext);
+ case Instruction::Trunc:
+ return convertCastInstruction(cast<TruncInst>(Inst),
+ Ice::InstCast::Trunc);
+ case Instruction::FPTrunc:
+ return convertCastInstruction(cast<FPTruncInst>(Inst),
+ Ice::InstCast::Fptrunc);
+ case Instruction::FPExt:
+ return convertCastInstruction(cast<FPExtInst>(Inst),
+ Ice::InstCast::Fpext);
+ case Instruction::FPToSI:
+ return convertCastInstruction(cast<FPToSIInst>(Inst),
+ Ice::InstCast::Fptosi);
+ case Instruction::FPToUI:
+ return convertCastInstruction(cast<FPToUIInst>(Inst),
+ Ice::InstCast::Fptoui);
+ case Instruction::SIToFP:
+ return convertCastInstruction(cast<SIToFPInst>(Inst),
+ Ice::InstCast::Sitofp);
+ case Instruction::UIToFP:
+ return convertCastInstruction(cast<UIToFPInst>(Inst),
+ Ice::InstCast::Uitofp);
+ case Instruction::BitCast:
+ return convertCastInstruction(cast<BitCastInst>(Inst),
+ Ice::InstCast::Bitcast);
+ case Instruction::Add:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Add);
+ case Instruction::Sub:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Sub);
+ case Instruction::Mul:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Mul);
+ case Instruction::UDiv:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Udiv);
+ case Instruction::SDiv:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Sdiv);
+ case Instruction::URem:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Urem);
+ case Instruction::SRem:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Srem);
+ case Instruction::Shl:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Shl);
+ case Instruction::LShr:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Lshr);
+ case Instruction::AShr:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Ashr);
+ case Instruction::FAdd:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Fadd);
+ case Instruction::FSub:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Fsub);
+ case Instruction::FMul:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Fmul);
+ case Instruction::FDiv:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Fdiv);
+ case Instruction::FRem:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Frem);
+ case Instruction::And:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::And);
+ case Instruction::Or:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Or);
+ case Instruction::Xor:
+ return convertArithInstruction(Inst, Ice::InstArithmetic::Xor);
+ case Instruction::Call:
+ return convertCallInstruction(cast<CallInst>(Inst));
+ case Instruction::Alloca:
+ return convertAllocaInstruction(cast<AllocaInst>(Inst));
+ case Instruction::Unreachable:
+ return convertUnreachableInstruction(cast<UnreachableInst>(Inst));
+ default:
+ report_fatal_error(std::string("Invalid PNaCl instruction: ") +
+ LLVMObjectAsString(Inst));
+ }
+
+ llvm_unreachable("convertInstruction");
+ return NULL;
+ }
+
+ Ice::Inst *convertLoadInstruction(const LoadInst *Inst) {
+ Ice::Operand *Src = convertOperand(Inst, 0);
+ Ice::Variable *Dest = mapValueToIceVar(Inst);
+ return Ice::InstLoad::create(Func, Dest, Src);
+ }
+
+ Ice::Inst *convertStoreInstruction(const StoreInst *Inst) {
+ Ice::Operand *Addr = convertOperand(Inst, 1);
+ Ice::Operand *Val = convertOperand(Inst, 0);
+ return Ice::InstStore::create(Func, Val, Addr);
+ }
+
+ Ice::Inst *convertArithInstruction(const Instruction *Inst,
+ Ice::InstArithmetic::OpKind Opcode) {
+ const BinaryOperator *BinOp = cast<BinaryOperator>(Inst);
+ Ice::Operand *Src0 = convertOperand(Inst, 0);
+ Ice::Operand *Src1 = convertOperand(Inst, 1);
+ Ice::Variable *Dest = mapValueToIceVar(BinOp);
+ return Ice::InstArithmetic::create(Func, Opcode, Dest, Src0, Src1);
+ }
+
+ Ice::Inst *convertPHINodeInstruction(const PHINode *Inst) {
+ unsigned NumValues = Inst->getNumIncomingValues();
+ Ice::InstPhi *IcePhi =
+ Ice::InstPhi::create(Func, NumValues, mapValueToIceVar(Inst));
+ for (unsigned N = 0, E = NumValues; N != E; ++N) {
+ IcePhi->addArgument(convertOperand(Inst, N),
+ mapBasicBlockToNode(Inst->getIncomingBlock(N)));
+ }
+ return IcePhi;
+ }
+
+ Ice::Inst *convertBrInstruction(const BranchInst *Inst) {
+ if (Inst->isConditional()) {
+ Ice::Operand *Src = convertOperand(Inst, 0);
+ BasicBlock *BBThen = Inst->getSuccessor(0);
+ BasicBlock *BBElse = Inst->getSuccessor(1);
+ Ice::CfgNode *NodeThen = mapBasicBlockToNode(BBThen);
+ Ice::CfgNode *NodeElse = mapBasicBlockToNode(BBElse);
+ return Ice::InstBr::create(Func, Src, NodeThen, NodeElse);
+ } else {
+ BasicBlock *BBSucc = Inst->getSuccessor(0);
+ return Ice::InstBr::create(Func, mapBasicBlockToNode(BBSucc));
+ }
+ }
+
+ Ice::Inst *convertIntToPtrInstruction(const IntToPtrInst *Inst) {
+ Ice::Operand *Src = convertOperand(Inst, 0);
+ Ice::Variable *Dest = mapValueToIceVar(Inst, SubzeroPointerType);
+ return Ice::InstAssign::create(Func, Dest, Src);
+ }
+
+ Ice::Inst *convertPtrToIntInstruction(const PtrToIntInst *Inst) {
+ Ice::Operand *Src = convertOperand(Inst, 0);
+ Ice::Variable *Dest = mapValueToIceVar(Inst);
+ return Ice::InstAssign::create(Func, Dest, Src);
+ }
+
+ Ice::Inst *convertRetInstruction(const ReturnInst *Inst) {
+ Ice::Operand *RetOperand = convertOperand(Inst, 0);
+ if (RetOperand) {
+ return Ice::InstRet::create(Func, RetOperand);
+ } else {
+ return Ice::InstRet::create(Func);
+ }
+ }
+
+ Ice::Inst *convertCastInstruction(const Instruction *Inst,
+ Ice::InstCast::OpKind CastKind) {
+ Ice::Operand *Src = convertOperand(Inst, 0);
+ Ice::Variable *Dest = mapValueToIceVar(Inst);
+ return Ice::InstCast::create(Func, CastKind, Dest, Src);
+ }
+
+ Ice::Inst *convertICmpInstruction(const ICmpInst *Inst) {
+ Ice::Operand *Src0 = convertOperand(Inst, 0);
+ Ice::Operand *Src1 = convertOperand(Inst, 1);
+ Ice::Variable *Dest = mapValueToIceVar(Inst);
+
+ Ice::InstIcmp::ICond Cond;
+ switch (Inst->getPredicate()) {
+ default:
+ llvm_unreachable("ICmpInst predicate");
+ case CmpInst::ICMP_EQ:
+ Cond = Ice::InstIcmp::Eq;
+ break;
+ case CmpInst::ICMP_NE:
+ Cond = Ice::InstIcmp::Ne;
+ break;
+ case CmpInst::ICMP_UGT:
+ Cond = Ice::InstIcmp::Ugt;
+ break;
+ case CmpInst::ICMP_UGE:
+ Cond = Ice::InstIcmp::Uge;
+ break;
+ case CmpInst::ICMP_ULT:
+ Cond = Ice::InstIcmp::Ult;
+ break;
+ case CmpInst::ICMP_ULE:
+ Cond = Ice::InstIcmp::Ule;
+ break;
+ case CmpInst::ICMP_SGT:
+ Cond = Ice::InstIcmp::Sgt;
+ break;
+ case CmpInst::ICMP_SGE:
+ Cond = Ice::InstIcmp::Sge;
+ break;
+ case CmpInst::ICMP_SLT:
+ Cond = Ice::InstIcmp::Slt;
+ break;
+ case CmpInst::ICMP_SLE:
+ Cond = Ice::InstIcmp::Sle;
+ break;
+ }
+
+ return Ice::InstIcmp::create(Func, Cond, Dest, Src0, Src1);
+ }
+
+ Ice::Inst *convertFCmpInstruction(const FCmpInst *Inst) {
+ Ice::Operand *Src0 = convertOperand(Inst, 0);
+ Ice::Operand *Src1 = convertOperand(Inst, 1);
+ Ice::Variable *Dest = mapValueToIceVar(Inst);
+
+ Ice::InstFcmp::FCond Cond;
+ switch (Inst->getPredicate()) {
+
+ default:
+ llvm_unreachable("FCmpInst predicate");
+
+ case CmpInst::FCMP_FALSE:
+ Cond = Ice::InstFcmp::False;
+ break;
+ case CmpInst::FCMP_OEQ:
+ Cond = Ice::InstFcmp::Oeq;
+ break;
+ case CmpInst::FCMP_OGT:
+ Cond = Ice::InstFcmp::Ogt;
+ break;
+ case CmpInst::FCMP_OGE:
+ Cond = Ice::InstFcmp::Oge;
+ break;
+ case CmpInst::FCMP_OLT:
+ Cond = Ice::InstFcmp::Olt;
+ break;
+ case CmpInst::FCMP_OLE:
+ Cond = Ice::InstFcmp::Ole;
+ break;
+ case CmpInst::FCMP_ONE:
+ Cond = Ice::InstFcmp::One;
+ break;
+ case CmpInst::FCMP_ORD:
+ Cond = Ice::InstFcmp::Ord;
+ break;
+ case CmpInst::FCMP_UEQ:
+ Cond = Ice::InstFcmp::Ueq;
+ break;
+ case CmpInst::FCMP_UGT:
+ Cond = Ice::InstFcmp::Ugt;
+ break;
+ case CmpInst::FCMP_UGE:
+ Cond = Ice::InstFcmp::Uge;
+ break;
+ case CmpInst::FCMP_ULT:
+ Cond = Ice::InstFcmp::Ult;
+ break;
+ case CmpInst::FCMP_ULE:
+ Cond = Ice::InstFcmp::Ule;
+ break;
+ case CmpInst::FCMP_UNE:
+ Cond = Ice::InstFcmp::Une;
+ break;
+ case CmpInst::FCMP_UNO:
+ Cond = Ice::InstFcmp::Uno;
+ break;
+ case CmpInst::FCMP_TRUE:
+ Cond = Ice::InstFcmp::True;
+ break;
+ }
+
+ return Ice::InstFcmp::create(Func, Cond, Dest, Src0, Src1);
+ }
+
+ Ice::Inst *convertSelectInstruction(const SelectInst *Inst) {
+ Ice::Variable *Dest = mapValueToIceVar(Inst);
+ Ice::Operand *Cond = convertValue(Inst->getCondition());
+ Ice::Operand *Source1 = convertValue(Inst->getTrueValue());
+ Ice::Operand *Source2 = convertValue(Inst->getFalseValue());
+ return Ice::InstSelect::create(Func, Dest, Cond, Source1, Source2);
+ }
+
+ Ice::Inst *convertSwitchInstruction(const SwitchInst *Inst) {
+ Ice::Operand *Source = convertValue(Inst->getCondition());
+ Ice::CfgNode *LabelDefault = mapBasicBlockToNode(Inst->getDefaultDest());
+ unsigned NumCases = Inst->getNumCases();
+ Ice::InstSwitch *Switch =
+ Ice::InstSwitch::create(Func, NumCases, Source, LabelDefault);
+ unsigned CurrentCase = 0;
+ for (SwitchInst::ConstCaseIt I = Inst->case_begin(), E = Inst->case_end();
+ I != E; ++I, ++CurrentCase) {
+ uint64_t CaseValue = I.getCaseValue()->getZExtValue();
+ Ice::CfgNode *CaseSuccessor = mapBasicBlockToNode(I.getCaseSuccessor());
+ Switch->addBranch(CurrentCase, CaseValue, CaseSuccessor);
+ }
+ return Switch;
+ }
+
+ Ice::Inst *convertCallInstruction(const CallInst *Inst) {
+ Ice::Variable *Dest = mapValueToIceVar(Inst);
+ Ice::Operand *CallTarget = convertValue(Inst->getCalledValue());
+ unsigned NumArgs = Inst->getNumArgOperands();
+ // Note: Subzero doesn't (yet) do anything special with the Tail
+ // flag in the bitcode, i.e. CallInst::isTailCall().
+ Ice::InstCall *NewInst =
+ Ice::InstCall::create(Func, NumArgs, Dest, CallTarget);
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ NewInst->addArg(convertOperand(Inst, i));
+ }
+ return NewInst;
+ }
+
+ Ice::Inst *convertAllocaInstruction(const AllocaInst *Inst) {
+ // PNaCl bitcode only contains allocas of byte-granular objects.
+ Ice::Operand *ByteCount = convertValue(Inst->getArraySize());
+ uint32_t Align = Inst->getAlignment();
+ Ice::Variable *Dest = mapValueToIceVar(Inst, SubzeroPointerType);
+
+ return Ice::InstAlloca::create(Func, ByteCount, Align, Dest);
+ }
+
+ Ice::Inst *convertUnreachableInstruction(const UnreachableInst *Inst) {
+ return Ice::InstUnreachable::create(Func);
+ }
+
+ Ice::CfgNode *convertBasicBlock(const BasicBlock *BB) {
+ Ice::CfgNode *Node = mapBasicBlockToNode(BB);
+ for (BasicBlock::const_iterator II = BB->begin(), II_e = BB->end();
+ II != II_e; ++II) {
+ Ice::Inst *Inst = convertInstruction(II);
+ Node->appendInst(Inst);
+ }
+ return Node;
+ }
+
+private:
+ // Data
+ Ice::GlobalContext *Ctx;
+ Ice::Cfg *Func;
+ Ice::CfgNode *CurrentNode;
+ Ice::Type SubzeroPointerType;
+ std::map<const Value *, Ice::Variable *> VarMap;
+ std::map<const BasicBlock *, Ice::CfgNode *> NodeMap;
+};
+
+static cl::list<Ice::VerboseItem> VerboseList(
+ "verbose", cl::CommaSeparated,
+ cl::desc("Verbose options (can be comma-separated):"),
+ cl::values(
+ clEnumValN(Ice::IceV_Instructions, "inst", "Print basic instructions"),
+ clEnumValN(Ice::IceV_Deleted, "del", "Include deleted instructions"),
+ clEnumValN(Ice::IceV_InstNumbers, "instnum",
+ "Print instruction numbers"),
+ clEnumValN(Ice::IceV_Preds, "pred", "Show predecessors"),
+ clEnumValN(Ice::IceV_Succs, "succ", "Show successors"),
+ clEnumValN(Ice::IceV_Liveness, "live", "Liveness information"),
+ clEnumValN(Ice::IceV_RegManager, "rmgr", "Register manager status"),
+ clEnumValN(Ice::IceV_RegOrigins, "orig", "Physical register origins"),
+ clEnumValN(Ice::IceV_LinearScan, "regalloc", "Linear scan details"),
+ clEnumValN(Ice::IceV_Frame, "frame", "Stack frame layout details"),
+ clEnumValN(Ice::IceV_Timing, "time", "Pass timing details"),
+ clEnumValN(Ice::IceV_All, "all", "Use all verbose options"),
+ clEnumValN(Ice::IceV_None, "none", "No verbosity"), clEnumValEnd));
+static cl::opt<std::string> IRFilename(cl::Positional, cl::desc("<IR file>"),
+ cl::Required);
+static cl::opt<std::string> OutputFilename("o",
+ cl::desc("Override output filename"),
+ cl::init("-"),
+ cl::value_desc("filename"));
+static cl::opt<std::string>
+TestPrefix("prefix", cl::desc("Prepend a prefix to symbol names for testing"),
+ cl::init(""), cl::value_desc("prefix"));
+static cl::opt<bool>
+DisableInternal("external",
+ cl::desc("Disable 'internal' linkage type for testing"));
+static cl::opt<bool>
+DisableTranslation("notranslate", cl::desc("Disable Subzero translation"));
+
+static cl::opt<bool> SubzeroTimingEnabled(
+ "timing", cl::desc("Enable breakdown timing of Subzero translation"));
+
+int main(int argc, char **argv) {
+ cl::ParseCommandLineOptions(argc, argv);
+
+ // Parse the input LLVM IR file into a module.
+ SMDiagnostic Err;
+ Module *Mod;
+
+ {
+ Ice::Timer T;
+ Mod = ParseIRFile(IRFilename, Err, getGlobalContext());
+
+ if (SubzeroTimingEnabled) {
+ std::cerr << "[Subzero timing] IR Parsing: " << T.getElapsedSec()
+ << " sec\n";
+ }
+ }
+
+ if (!Mod) {
+ Err.print(argv[0], errs());
+ return 1;
+ }
+
+ Ice::VerboseMask VMask = Ice::IceV_None;
+ for (unsigned i = 0; i != VerboseList.size(); ++i)
+ VMask |= VerboseList[i];
+
+ std::ofstream Ofs;
+ if (OutputFilename != "-") {
+ Ofs.open(OutputFilename.c_str(), std::ofstream::out);
+ }
+ raw_os_ostream *Os =
+ new raw_os_ostream(OutputFilename == "-" ? std::cout : Ofs);
+ Os->SetUnbuffered();
+
+ Ice::GlobalContext Ctx(Os, Os, VMask, TestPrefix);
+
+ for (Module::const_iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) {
+ if (I->empty())
+ continue;
+ LLVM2ICEConverter FunctionConverter(&Ctx);
+
+ Ice::Timer TConvert;
+ Ice::Cfg *Func = FunctionConverter.convertFunction(I);
+ if (DisableInternal)
+ Func->setInternal(false);
+
+ if (SubzeroTimingEnabled) {
+ std::cerr << "[Subzero timing] Convert function "
+ << Func->getFunctionName() << ": " << TConvert.getElapsedSec()
+ << " sec\n";
+ }
+
+ if (DisableTranslation) {
+ Func->dump();
+ }
+ }
+
+ return 0;
+}
diff --git a/szdiff.py b/szdiff.py
new file mode 100755
index 0000000..f2696e8
--- /dev/null
+++ b/szdiff.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python2
+
+import argparse
+import itertools
+import subprocess
+import re
+
+if __name__ == '__main__':
+ """Runs llvm2ice on an input .ll file, and compares the output
+ against the input.
+
+ Before comparing, the input file is massaged to remove comments,
+ blank lines, global variable definitions, external function
+ declarations, and possibly other patterns that llvm2ice does not
+ handle.
+
+ The output file and the massaged input file are compared line by
+ line for differences. However, there is a regex defined such that
+ if the regex matches a line in the input file, that line and the
+ corresponding line in the output file are ignored. This lets us
+ ignore minor differences such as inttoptr and ptrtoint, and
+ printing of floating-point constants.
+
+ On success, no output is produced. On failure, each mismatch is
+ printed as two lines, one starting with 'SZ' and one starting with
+ 'LL'.
+ """
+ desc = 'Compare llvm2ice output against bitcode input.'
+ argparser = argparse.ArgumentParser(description=desc)
+ argparser.add_argument(
+ 'llfile', nargs='?', default='-',
+ type=argparse.FileType('r'), metavar='FILE',
+ help='Textual bitcode file [default stdin]')
+ argparser.add_argument(
+ '--llvm2ice', required=False, default='./llvm2ice', metavar='LLVM2ICE',
+ help='Path to llvm2ice driver program [default ./llvm2ice]')
+ args = argparser.parse_args()
+ bitcode = args.llfile.readlines()
+
+ # Run llvm2ice and collect its output lines into sz_out.
+ command = [args.llvm2ice, '-verbose', 'inst', '-notranslate', '-']
+ p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ sz_out = p.communicate(input=''.join(bitcode))[0].splitlines()
+
+ # Filter certain lines and patterns from the input, and collect
+ # the remainder into llc_out.
+ llc_out = []
+ tail_call = re.compile(' tail call ');
+ trailing_comment = re.compile(';.*')
+ ignore_pattern = re.compile('^ *$|^declare|^@')
+ for line in bitcode:
+ # Convert tail call into regular (non-tail) call.
+ line = tail_call.sub(' call ', line)
+ # Remove trailing comments and spaces.
+ line = trailing_comment.sub('', line).rstrip()
+ # Ignore blanks lines, forward declarations, and variable definitions.
+ if not ignore_pattern.search(line):
+ llc_out.append(line)
+
+ # Compare sz_out and llc_out line by line, but ignore pairs of
+ # lines where the llc line matches a certain pattern.
+ return_code = 0
+ lines_total = 0
+ lines_diff = 0
+ ignore_pattern = re.compile(
+ '|'.join([' -[0-9]', # negative constants
+ ' (float|double) [-0-9]', # FP constants
+ ' (float|double) %\w+, [-0-9]',
+ ' inttoptr ', # inttoptr pointer types
+ ' ptrtoint ' # ptrtoint pointer types
+ ]))
+ for (sz_line, llc_line) in itertools.izip_longest(sz_out, llc_out):
+ lines_total += 1
+ if sz_line == llc_line:
+ continue
+ if llc_line and ignore_pattern.search(llc_line):
+ lines_diff += 1
+ continue
+ if sz_line: print 'SZ>' + sz_line
+ if llc_line: print 'LL>' + llc_line
+ return_code = 1
+
+ if return_code == 0:
+ message = 'Success (ignored %d diffs out of %d lines)'
+ print message % (lines_diff, lines_total)
+ exit(return_code)
diff --git a/tests_lit/.gitignore b/tests_lit/.gitignore
new file mode 100644
index 0000000..83260f8
--- /dev/null
+++ b/tests_lit/.gitignore
@@ -0,0 +1 @@
+Output
diff --git a/tests_lit/lit.cfg b/tests_lit/lit.cfg
new file mode 100644
index 0000000..171517d
--- /dev/null
+++ b/tests_lit/lit.cfg
@@ -0,0 +1,54 @@
+# Taken from utils/lit/tests in the LLVM tree and hacked together to support
+# our tests.
+
+# -*- Python -*-
+
+import os
+import sys
+
+import lit.formats
+
+# name: The name of this test suite.
+config.name = 'subzero'
+
+# testFormat: The test format to use to interpret tests.
+config.test_format = lit.formats.ShTest()
+
+# suffixes: A list of file extensions to treat as test files.
+config.suffixes = ['.ll']
+
+# test_source_root: The root path where tests are located.
+config.test_source_root = os.path.dirname(__file__)
+config.test_exec_root = config.test_source_root
+config.target_triple = '(unused)'
+
+src_root = os.path.abspath(os.path.join(config.test_source_root, '..'))
+bin_root = src_root
+config.substitutions.append(('%{src_root}', src_root))
+config.substitutions.append(('%{python}', sys.executable))
+
+# Finding LLVM binary tools. All tools used in the tests must be listed in
+# the llvmbintools list.
+llvmbinpath = os.path.abspath(os.environ.get('LLVM_BIN_PATH'))
+
+# Finding Subzero tools
+config.substitutions.append(('%llvm2ice', os.path.join(bin_root, 'llvm2ice')))
+config.substitutions.append(('%szdiff', os.path.join(bin_root, 'szdiff.py')))
+
+llvmbintools = ['FileCheck']
+
+for tool in llvmbintools:
+ config.substitutions.append((tool, os.path.join(llvmbinpath, tool)))
+
+# Add a feature to detect the Python version.
+config.available_features.add("python%d.%d" % (sys.version_info[0],
+ sys.version_info[1]))
+
+# Debugging output
+def dbg(s):
+ print '[DBG] %s' % s
+
+dbg('bin_root = %s' % bin_root)
+dbg('llvmbinpath = %s' % llvmbinpath)
+
+
diff --git a/tests_lit/llvm2ice_tests/64bit.pnacl.ll b/tests_lit/llvm2ice_tests/64bit.pnacl.ll
new file mode 100644
index 0000000..4b79fd1
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/64bit.pnacl.ll
@@ -0,0 +1,799 @@
+; RUIN: %llvm2ice --verbose none %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+@__init_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__fini_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__tls_template_start = internal constant [0 x i8] zeroinitializer, align 8
+@__tls_template_alignment = internal constant [4 x i8] c"\01\00\00\00", align 4
+
+define internal i32 @ignore64BitArg(i64 %a, i32 %b, i64 %c) {
+entry:
+ ret i32 %b
+}
+
+define internal i32 @pass64BitArg(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f) {
+entry:
+ %call = call i32 @ignore64BitArgNoInline(i64 %a, i32 123, i64 %b)
+ %call1 = call i32 @ignore64BitArgNoInline(i64 %c, i32 123, i64 %d)
+ %call2 = call i32 @ignore64BitArgNoInline(i64 %e, i32 123, i64 %f)
+ %add = add i32 %call1, %call
+ %add3 = add i32 %add, %call2
+ ret i32 %add3
+}
+; CHECK: pass64BitArg:
+; CHECK: push 123
+; CHECK-NEXT: push
+; CHECK-NEXT: push
+; CHECK-NEXT: call ignore64BitArgNoInline
+; CHECK: push
+; CHECK-NEXT: push
+; CHECK-NEXT: push 123
+; CHECK-NEXT: push
+; CHECK-NEXT: push
+; CHECK-NEXT: call ignore64BitArgNoInline
+; CHECK: push
+; CHECK-NEXT: push
+; CHECK-NEXT: push 123
+; CHECK-NEXT: push
+; CHECK-NEXT: push
+; CHECK-NEXT: call ignore64BitArgNoInline
+
+declare i32 @ignore64BitArgNoInline(i64, i32, i64)
+
+define internal i32 @pass64BitConstArg(i64 %a, i64 %b) {
+entry:
+ %call = call i32 @ignore64BitArgNoInline(i64 %a, i32 123, i64 -2401053092306725256)
+ ret i32 %call
+}
+; CHECK: pass64BitConstArg:
+; CHECK: push 3735928559
+; CHECK-NEXT: push 305419896
+; CHECK-NEXT: push 123
+; CHECK-NEXT: push ecx
+; CHECK-NEXT: push eax
+; CHECK-NEXT: call ignore64BitArgNoInline
+
+define internal i64 @return64BitArg(i64 %a) {
+entry:
+ ret i64 %a
+}
+; CHECK: return64BitArg:
+; CHECK: mov {{.*}}, dword ptr [esp+4]
+; CHECK: mov {{.*}}, dword ptr [esp+8]
+; CHECK: ret
+
+define internal i64 @return64BitConst() {
+entry:
+ ret i64 -2401053092306725256
+}
+; CHECK: return64BitConst:
+; CHECK: mov eax, 305419896
+; CHECK: mov edx, 3735928559
+; CHECK: ret
+
+define internal i64 @add64BitSigned(i64 %a, i64 %b) {
+entry:
+ %add = add i64 %b, %a
+ ret i64 %add
+}
+; CHECK: add64BitSigned:
+; CHECK: add
+; CHECK: adc
+; CHECK: ret
+
+define internal i64 @add64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %add = add i64 %b, %a
+ ret i64 %add
+}
+; CHECK: add64BitUnsigned:
+; CHECK: add
+; CHECK: adc
+; CHECK: ret
+
+define internal i64 @sub64BitSigned(i64 %a, i64 %b) {
+entry:
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+; CHECK: sub64BitSigned:
+; CHECK: sub
+; CHECK: sbb
+; CHECK: ret
+
+define internal i64 @sub64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+; CHECK: sub64BitUnsigned:
+; CHECK: sub
+; CHECK: sbb
+; CHECK: ret
+
+define internal i64 @mul64BitSigned(i64 %a, i64 %b) {
+entry:
+ %mul = mul i64 %b, %a
+ ret i64 %mul
+}
+; CHECK: mul64BitSigned:
+; CHECK: imul
+; CHECK: imul
+; CHECK: mul
+; CHECK: add
+; CHECK: add
+; CHECK: ret
+
+define internal i64 @mul64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %mul = mul i64 %b, %a
+ ret i64 %mul
+}
+; CHECK: mul64BitUnsigned:
+; CHECK: imul
+; CHECK: imul
+; CHECK: mul
+; CHECK: add
+; CHECK: add
+; CHECK: ret
+
+define internal i64 @div64BitSigned(i64 %a, i64 %b) {
+entry:
+ %div = sdiv i64 %a, %b
+ ret i64 %div
+}
+; CHECK: div64BitSigned:
+; CHECK: call __divdi3
+; CHECK: ret
+
+define internal i64 @div64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %div = udiv i64 %a, %b
+ ret i64 %div
+}
+; CHECK: div64BitUnsigned:
+; CHECK: call __udivdi3
+; CHECK: ret
+
+define internal i64 @rem64BitSigned(i64 %a, i64 %b) {
+entry:
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
+; CHECK: rem64BitSigned:
+; CHECK: call __moddi3
+; CHECK: ret
+
+define internal i64 @rem64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
+; CHECK: rem64BitUnsigned:
+; CHECK: call __umoddi3
+; CHECK: ret
+
+define internal i64 @shl64BitSigned(i64 %a, i64 %b) {
+entry:
+ %shl = shl i64 %a, %b
+ ret i64 %shl
+}
+; CHECK: shl64BitSigned:
+; CHECK: shld
+; CHECK: shl e
+; CHECK: test {{.*}}, 32
+; CHECK: je
+
+define internal i64 @shl64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %shl = shl i64 %a, %b
+ ret i64 %shl
+}
+; CHECK: shl64BitUnsigned:
+; CHECK: shld
+; CHECK: shl e
+; CHECK: test {{.*}}, 32
+; CHECK: je
+
+define internal i64 @shr64BitSigned(i64 %a, i64 %b) {
+entry:
+ %shr = ashr i64 %a, %b
+ ret i64 %shr
+}
+; CHECK: shr64BitSigned:
+; CHECK: shrd
+; CHECK: sar
+; CHECK: test {{.*}}, 32
+; CHECK: je
+; CHECK: sar {{.*}}, 31
+
+define internal i64 @shr64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %shr = lshr i64 %a, %b
+ ret i64 %shr
+}
+; CHECK: shr64BitUnsigned:
+; CHECK: shrd
+; CHECK: shr
+; CHECK: test {{.*}}, 32
+; CHECK: je
+
+define internal i64 @and64BitSigned(i64 %a, i64 %b) {
+entry:
+ %and = and i64 %b, %a
+ ret i64 %and
+}
+; CHECK: and64BitSigned:
+; CHECK: and
+; CHECK: and
+
+define internal i64 @and64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %and = and i64 %b, %a
+ ret i64 %and
+}
+; CHECK: and64BitUnsigned:
+; CHECK: and
+; CHECK: and
+
+define internal i64 @or64BitSigned(i64 %a, i64 %b) {
+entry:
+ %or = or i64 %b, %a
+ ret i64 %or
+}
+; CHECK: or64BitSigned:
+; CHECK: or
+; CHECK: or
+
+define internal i64 @or64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %or = or i64 %b, %a
+ ret i64 %or
+}
+; CHECK: or64BitUnsigned:
+; CHECK: or
+; CHECK: or
+
+define internal i64 @xor64BitSigned(i64 %a, i64 %b) {
+entry:
+ %xor = xor i64 %b, %a
+ ret i64 %xor
+}
+; CHECK: xor64BitSigned:
+; CHECK: xor
+; CHECK: xor
+
+define internal i64 @xor64BitUnsigned(i64 %a, i64 %b) {
+entry:
+ %xor = xor i64 %b, %a
+ ret i64 %xor
+}
+; CHECK: xor64BitUnsigned:
+; CHECK: xor
+; CHECK: xor
+
+define internal i32 @trunc64To32Signed(i64 %a) {
+entry:
+ %conv = trunc i64 %a to i32
+ ret i32 %conv
+}
+; CHECK: trunc64To32Signed:
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK-NEXT: ret
+
+define internal i32 @trunc64To16Signed(i64 %a) {
+entry:
+ %conv = trunc i64 %a to i16
+ %conv.ret_ext = sext i16 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: trunc64To16Signed:
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK-NEXT: movsx eax, ax
+; CHECK-NEXT: ret
+
+define internal i32 @trunc64To8Signed(i64 %a) {
+entry:
+ %conv = trunc i64 %a to i8
+ %conv.ret_ext = sext i8 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: trunc64To8Signed:
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK-NEXT: movsx eax, al
+; CHECK-NEXT: ret
+
+define internal i32 @trunc64To32Unsigned(i64 %a) {
+entry:
+ %conv = trunc i64 %a to i32
+ ret i32 %conv
+}
+; CHECK: trunc64To32Unsigned:
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK-NEXT: ret
+
+define internal i32 @trunc64To16Unsigned(i64 %a) {
+entry:
+ %conv = trunc i64 %a to i16
+ %conv.ret_ext = zext i16 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: trunc64To16Unsigned:
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK-NEXT: movzx eax, ax
+; CHECK-NEXT: ret
+
+define internal i32 @trunc64To8Unsigned(i64 %a) {
+entry:
+ %conv = trunc i64 %a to i8
+ %conv.ret_ext = zext i8 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: trunc64To8Unsigned:
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK-NEXT: movzx eax, al
+; CHECK-NEXT: ret
+
+define internal i32 @trunc64To1(i64 %a) {
+entry:
+; %tobool = icmp ne i64 %a, 0
+ %tobool = trunc i64 %a to i1
+ %tobool.ret_ext = zext i1 %tobool to i32
+ ret i32 %tobool.ret_ext
+}
+; CHECK: trunc64To1:
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK: and eax, 1
+; CHECK-NEXT: ret
+
+define internal i64 @sext32To64(i32 %a) {
+entry:
+ %conv = sext i32 %a to i64
+ ret i64 %conv
+}
+; CHECK: sext32To64:
+; CHECK: mov
+; CHECK: sar {{.*}}, 31
+
+define internal i64 @sext16To64(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i16
+ %conv = sext i16 %a.arg_trunc to i64
+ ret i64 %conv
+}
+; CHECK: sext16To64:
+; CHECK: movsx
+; CHECK: sar {{.*}}, 31
+
+define internal i64 @sext8To64(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i8
+ %conv = sext i8 %a.arg_trunc to i64
+ ret i64 %conv
+}
+; CHECK: sext8To64:
+; CHECK: movsx
+; CHECK: sar {{.*}}, 31
+
+define internal i64 @zext32To64(i32 %a) {
+entry:
+ %conv = zext i32 %a to i64
+ ret i64 %conv
+}
+; CHECK: zext32To64:
+; CHECK: mov
+; CHECK: mov {{.*}}, 0
+
+define internal i64 @zext16To64(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i16
+ %conv = zext i16 %a.arg_trunc to i64
+ ret i64 %conv
+}
+; CHECK: zext16To64:
+; CHECK: movzx
+; CHECK: mov {{.*}}, 0
+
+define internal i64 @zext8To64(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i8
+ %conv = zext i8 %a.arg_trunc to i64
+ ret i64 %conv
+}
+; CHECK: zext8To64:
+; CHECK: movzx
+; CHECK: mov {{.*}}, 0
+
+define internal i64 @zext1To64(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i1
+ %conv = zext i1 %a.arg_trunc to i64
+ ret i64 %conv
+}
+; CHECK: zext1To64:
+; CHECK: movzx
+; CHECK: mov {{.*}}, 0
+
+define internal void @icmpEq64(i64 %a, i64 %b, i64 %c, i64 %d) {
+entry:
+ %cmp = icmp eq i64 %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = icmp eq i64 %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.then2, %if.end
+ ret void
+}
+; CHECK: icmpEq64:
+; CHECK: jne
+; CHECK: jne
+; CHECK: call
+; CHECK: jne
+; CHECK: jne
+; CHECK: call
+
+declare void @func()
+
+define internal void @icmpNe64(i64 %a, i64 %b, i64 %c, i64 %d) {
+entry:
+ %cmp = icmp ne i64 %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = icmp ne i64 %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.end, %if.then2
+ ret void
+}
+; CHECK: icmpNe64:
+; CHECK: jne
+; CHECK: jne
+; CHECK: call
+; CHECK: jne
+; CHECK: jne
+; CHECK: call
+
+define internal void @icmpGt64(i64 %a, i64 %b, i64 %c, i64 %d) {
+entry:
+ %cmp = icmp ugt i64 %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = icmp sgt i64 %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.then2, %if.end
+ ret void
+}
+; CHECK: icmpGt64:
+; CHECK: ja
+; CHECK: jb
+; CHECK: ja
+; CHECK: call
+; CHECK: jg
+; CHECK: jl
+; CHECK: ja
+; CHECK: call
+
+define internal void @icmpGe64(i64 %a, i64 %b, i64 %c, i64 %d) {
+entry:
+ %cmp = icmp uge i64 %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = icmp sge i64 %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.end, %if.then2
+ ret void
+}
+; CHECK: icmpGe64:
+; CHECK: ja
+; CHECK: jb
+; CHECK: jae
+; CHECK: call
+; CHECK: jg
+; CHECK: jl
+; CHECK: jae
+; CHECK: call
+
+define internal void @icmpLt64(i64 %a, i64 %b, i64 %c, i64 %d) {
+entry:
+ %cmp = icmp ult i64 %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = icmp slt i64 %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.then2, %if.end
+ ret void
+}
+; CHECK: icmpLt64:
+; CHECK: jb
+; CHECK: ja
+; CHECK: jb
+; CHECK: call
+; CHECK: jl
+; CHECK: jg
+; CHECK: jb
+; CHECK: call
+
+define internal void @icmpLe64(i64 %a, i64 %b, i64 %c, i64 %d) {
+entry:
+ %cmp = icmp ule i64 %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = icmp sle i64 %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.end, %if.then2
+ ret void
+}
+; CHECK: icmpLe64:
+; CHECK: jb
+; CHECK: ja
+; CHECK: jbe
+; CHECK: call
+; CHECK: jl
+; CHECK: jg
+; CHECK: jbe
+; CHECK: call
+
+define internal i32 @icmpEq64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp eq i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpEq64Bool:
+; CHECK: jne
+; CHECK: jne
+
+define internal i32 @icmpNe64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp ne i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpNe64Bool:
+; CHECK: jne
+; CHECK: jne
+
+define internal i32 @icmpSgt64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp sgt i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpSgt64Bool:
+; CHECK: cmp
+; CHECK: jg
+; CHECK: jl
+; CHECK: cmp
+; CHECK: ja
+
+define internal i32 @icmpUgt64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp ugt i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpUgt64Bool:
+; CHECK: cmp
+; CHECK: ja
+; CHECK: jb
+; CHECK: cmp
+; CHECK: ja
+
+define internal i32 @icmpSge64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp sge i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpSge64Bool:
+; CHECK: cmp
+; CHECK: jg
+; CHECK: jl
+; CHECK: cmp
+; CHECK: jae
+
+define internal i32 @icmpUge64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp uge i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpUge64Bool:
+; CHECK: cmp
+; CHECK: ja
+; CHECK: jb
+; CHECK: cmp
+; CHECK: jae
+
+define internal i32 @icmpSlt64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp slt i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpSlt64Bool:
+; CHECK: cmp
+; CHECK: jl
+; CHECK: jg
+; CHECK: cmp
+; CHECK: jb
+
+define internal i32 @icmpUlt64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp ult i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpUlt64Bool:
+; CHECK: cmp
+; CHECK: jb
+; CHECK: ja
+; CHECK: cmp
+; CHECK: jb
+
+define internal i32 @icmpSle64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp sle i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpSle64Bool:
+; CHECK: cmp
+; CHECK: jl
+; CHECK: jg
+; CHECK: cmp
+; CHECK: jbe
+
+define internal i32 @icmpUle64Bool(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp ule i64 %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: icmpUle64Bool:
+; CHECK: cmp
+; CHECK: jb
+; CHECK: ja
+; CHECK: cmp
+; CHECK: jbe
+
+define internal i64 @load64(i32 %a) {
+entry:
+ %a.asptr = inttoptr i32 %a to i64*
+ %v0 = load i64* %a.asptr, align 1
+ ret i64 %v0
+}
+; CHECK: load64:
+; CHECK: mov e[[REGISTER:[a-z]+]], dword ptr [esp+4]
+; CHECK-NEXT: mov {{.*}}, dword ptr [e[[REGISTER]]]
+; CHECK-NEXT: mov {{.*}}, dword ptr [e[[REGISTER]]+4]
+
+define internal void @store64(i32 %a, i64 %value) {
+entry:
+ %a.asptr = inttoptr i32 %a to i64*
+ store i64 %value, i64* %a.asptr, align 1
+ ret void
+}
+; CHECK: store64:
+; CHECK: mov e[[REGISTER:[a-z]+]], dword ptr [esp+4]
+; CHECK: mov dword ptr [e[[REGISTER]]+4],
+; CHECK: mov dword ptr [e[[REGISTER]]],
+
+define internal void @store64Const(i32 %a) {
+entry:
+ %a.asptr = inttoptr i32 %a to i64*
+ store i64 -2401053092306725256, i64* %a.asptr, align 1
+ ret void
+}
+; CHECK: store64Const:
+; CHECK: mov e[[REGISTER:[a-z]+]], dword ptr [esp+4]
+; CHECK: mov dword ptr [e[[REGISTER]]+4], 3735928559
+; CHECK: mov dword ptr [e[[REGISTER]]], 305419896
+
+define internal i64 @select64VarVar(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp ult i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+}
+; CHECK: select64VarVar:
+; CHECK: cmp
+; CHECK: jb
+; CHECK: ja
+; CHECK: cmp
+; CHECK: jb
+; CHECK: cmp
+; CHECK: jne
+
+define internal i64 @select64VarConst(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp ult i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 -2401053092306725256
+ ret i64 %cond
+}
+; CHECK: select64VarConst:
+; CHECK: cmp
+; CHECK: jb
+; CHECK: ja
+; CHECK: cmp
+; CHECK: jb
+; CHECK: cmp
+; CHECK: jne
+
+define internal i64 @select64ConstVar(i64 %a, i64 %b) {
+entry:
+ %cmp = icmp ult i64 %a, %b
+ %cond = select i1 %cmp, i64 -2401053092306725256, i64 %b
+ ret i64 %cond
+}
+; CHECK: select64ConstVar:
+; CHECK: cmp
+; CHECK: jb
+; CHECK: ja
+; CHECK: cmp
+; CHECK: jb
+; CHECK: cmp
+; CHECK: jne
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/alloc.ll b/tests_lit/llvm2ice_tests/alloc.ll
new file mode 100644
index 0000000..f7a2040
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/alloc.ll
@@ -0,0 +1,35 @@
+; RUIN: %llvm2ice --verbose none %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @fixed_400(i32 %n) {
+entry:
+ %array = alloca i8, i32 400, align 16
+ %array.asint = ptrtoint i8* %array to i32
+ call void @f1(i32 %array.asint)
+ ret void
+ ; CHECK: sub esp, 400
+ ; CHECK-NEXT: mov eax, esp
+ ; CHECK-NEXT: push eax
+ ; CHECK-NEXT: call f1
+}
+
+declare void @f1(i32)
+
+define void @variable_n(i32 %n) {
+entry:
+ %array = alloca i8, i32 %n, align 16
+ %array.asint = ptrtoint i8* %array to i32
+ call void @f2(i32 %array.asint)
+ ret void
+ ; CHECK: mov eax, dword ptr [ebp+8]
+ ; CHECK-NEXT: sub esp, eax
+ ; CHECK-NEXT: mov eax, esp
+ ; CHECK-NEXT: push eax
+ ; CHECK-NEXT: call f2
+}
+
+declare void @f2(i32)
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/arith-opt.ll b/tests_lit/llvm2ice_tests/arith-opt.ll
new file mode 100644
index 0000000..becbc9f
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/arith-opt.ll
@@ -0,0 +1,110 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i32 @Add(i32 %a, i32 %b) {
+; CHECK: define i32 @Add
+entry:
+ %add = add i32 %b, %a
+; CHECK: add
+ tail call void @Use(i32 %add)
+; CHECK: call Use
+ ret i32 %add
+}
+
+declare void @Use(i32)
+
+define i32 @And(i32 %a, i32 %b) {
+; CHECK: define i32 @And
+entry:
+ %and = and i32 %b, %a
+; CHECK: and
+ tail call void @Use(i32 %and)
+; CHECK: call Use
+ ret i32 %and
+}
+
+define i32 @Or(i32 %a, i32 %b) {
+; CHECK: define i32 @Or
+entry:
+ %or = or i32 %b, %a
+; CHECK: or
+ tail call void @Use(i32 %or)
+; CHECK: call Use
+ ret i32 %or
+}
+
+define i32 @Xor(i32 %a, i32 %b) {
+; CHECK: define i32 @Xor
+entry:
+ %xor = xor i32 %b, %a
+; CHECK: xor
+ tail call void @Use(i32 %xor)
+; CHECK: call Use
+ ret i32 %xor
+}
+
+define i32 @Sub(i32 %a, i32 %b) {
+; CHECK: define i32 @Sub
+entry:
+ %sub = sub i32 %a, %b
+; CHECK: sub
+ tail call void @Use(i32 %sub)
+; CHECK: call Use
+ ret i32 %sub
+}
+
+define i32 @Mul(i32 %a, i32 %b) {
+; CHECK: define i32 @Mul
+entry:
+ %mul = mul i32 %b, %a
+; CHECK: imul
+ tail call void @Use(i32 %mul)
+; CHECK: call Use
+ ret i32 %mul
+}
+
+define i32 @Sdiv(i32 %a, i32 %b) {
+; CHECK: define i32 @Sdiv
+entry:
+ %div = sdiv i32 %a, %b
+; CHECK: cdq
+; CHECK: idiv
+ tail call void @Use(i32 %div)
+; CHECK: call Use
+ ret i32 %div
+}
+
+define i32 @Srem(i32 %a, i32 %b) {
+; CHECK: define i32 @Srem
+entry:
+ %rem = srem i32 %a, %b
+; CHECK: cdq
+; CHECK: idiv
+ tail call void @Use(i32 %rem)
+; CHECK: call Use
+ ret i32 %rem
+}
+
+define i32 @Udiv(i32 %a, i32 %b) {
+; CHECK: define i32 @Udiv
+entry:
+ %div = udiv i32 %a, %b
+; CHECK: div
+ tail call void @Use(i32 %div)
+; CHECK: call Use
+ ret i32 %div
+}
+
+define i32 @Urem(i32 %a, i32 %b) {
+; CHECK: define i32 @Urem
+entry:
+ %rem = urem i32 %a, %b
+; CHECK: div
+ tail call void @Use(i32 %rem)
+; CHECK: call Use
+ ret i32 %rem
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/arithmetic-chain.ll b/tests_lit/llvm2ice_tests/arithmetic-chain.ll
new file mode 100644
index 0000000..3ca1ad0
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/arithmetic-chain.ll
@@ -0,0 +1,24 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i64 @arithmetic_chain(i64 %foo, i64 %bar) {
+entry:
+ %r1 = add i64 %foo, %bar
+ %r2 = add i64 %foo, %r1
+ %r3 = mul i64 %bar, %r1
+ %r4 = shl i64 %r3, %r2
+ %r5 = add i64 %r4, 8
+ ret i64 %r5
+
+; CHECK: entry:
+; CHECK-NEXT: %r1 = add i64 %foo, %bar
+; CHECK-NEXT: %r2 = add i64 %foo, %r1
+; CHECK-NEXT: %r3 = mul i64 %bar, %r1
+; CHECK-NEXT: %r4 = shl i64 %r3, %r2
+; CHECK-NEXT: %r5 = add i64 %r4, 8
+; CHECK-NEXT: ret i64 %r5
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/bitcast.ll b/tests_lit/llvm2ice_tests/bitcast.ll
new file mode 100644
index 0000000..1a6623f
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/bitcast.ll
@@ -0,0 +1,30 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define internal i32 @cast_f2i(float %f) {
+entry:
+ %v0 = bitcast float %f to i32
+ ret i32 %v0
+}
+
+define internal float @cast_i2f(i32 %i) {
+entry:
+ %v0 = bitcast i32 %i to float
+ ret float %v0
+}
+
+define internal i64 @cast_d2ll(double %d) {
+entry:
+ %v0 = bitcast double %d to i64
+ ret i64 %v0
+}
+
+define internal double @cast_ll2d(i64 %ll) {
+entry:
+ %v0 = bitcast i64 %ll to double
+ ret double %v0
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/bool-opt.ll b/tests_lit/llvm2ice_tests/bool-opt.ll
new file mode 100644
index 0000000..787228c
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/bool-opt.ll
@@ -0,0 +1,16 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @testBool(i32 %a, i32 %b) {
+entry:
+ %cmp = icmp eq i32 %a, %b
+ tail call void @use(i1 %cmp)
+ ret void
+}
+
+declare void @use(i1 zeroext) #1
+
+; CHECK-NOT: ICE translation error
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/branch-simple.ll b/tests_lit/llvm2ice_tests/branch-simple.ll
new file mode 100644
index 0000000..502287a
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/branch-simple.ll
@@ -0,0 +1,21 @@
+; RUIN: %llvm2ice %s -verbose inst | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i32 @simple_cond_branch(i32 %foo, i32 %bar) {
+entry:
+ %r1 = icmp eq i32 %foo, %bar
+ br i1 %r1, label %Equal, label %Unequal
+Equal:
+ ret i32 %foo
+Unequal:
+ ret i32 %bar
+; CHECK: br i1 %r1, label %Equal, label %Unequal
+; CHECK: Equal:
+; CHECK: ret i32 %foo
+; CHECK: Unequal:
+; CHECK: ret i32 %bar
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/call.ll b/tests_lit/llvm2ice_tests/call.ll
new file mode 100644
index 0000000..6503dec
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/call.ll
@@ -0,0 +1,66 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i32 @fib(i32 %n) {
+; CHECK: define i32 @fib
+entry:
+ %cmp = icmp slt i32 %n, 2
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %sub = add i32 %n, -1
+ %call = tail call i32 @fib(i32 %sub)
+ %sub1 = add i32 %n, -2
+ %call2 = tail call i32 @fib(i32 %sub1)
+ %add = add i32 %call2, %call
+ ret i32 %add
+
+return: ; preds = %entry
+ ret i32 %n
+}
+
+define i32 @fact(i32 %n) {
+; CHECK: define i32 @fact
+entry:
+ %cmp = icmp slt i32 %n, 2
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %sub = add i32 %n, -1
+ %call = tail call i32 @fact(i32 %sub)
+ %mul = mul i32 %call, %n
+ ret i32 %mul
+
+return: ; preds = %entry
+ ret i32 %n
+}
+
+define i32 @redirect(i32 %n) {
+; CHECK: define i32 @redirect
+entry:
+ %call = tail call i32 @redirect_target(i32 %n)
+ ret i32 %call
+}
+
+declare i32 @redirect_target(i32)
+
+define void @call_void(i32 %n) {
+; CHECK: define void @call_void
+entry:
+ %cmp2 = icmp sgt i32 %n, 0
+ br i1 %cmp2, label %if.then, label %if.end
+
+if.then: ; preds = %entry, %if.then
+ %n.tr3 = phi i32 [ %call.i, %if.then ], [ %n, %entry ]
+ %sub = add i32 %n.tr3, -1
+ %call.i = tail call i32 @redirect_target(i32 %sub)
+ %cmp = icmp sgt i32 %call.i, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/callindirect.pnacl.ll b/tests_lit/llvm2ice_tests/callindirect.pnacl.ll
new file mode 100644
index 0000000..d07ad77
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/callindirect.pnacl.ll
@@ -0,0 +1,27 @@
+; RUIN: %llvm2ice --verbose none %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+@__init_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__fini_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__tls_template_start = internal constant [0 x i8] zeroinitializer, align 8
+@__tls_template_alignment = internal constant [4 x i8] c"\01\00\00\00", align 4
+
+define internal void @CallIndirect(i32 %f) {
+entry:
+ %f.asptr = inttoptr i32 %f to void ()*
+ call void %f.asptr()
+ call void %f.asptr()
+ call void %f.asptr()
+ call void %f.asptr()
+ call void %f.asptr()
+ ret void
+}
+; CHECK: call [[REGISTER:[a-z]+]]
+; CHECK: call [[REGISTER]]
+; CHECK: call [[REGISTER]]
+; CHECK: call [[REGISTER]]
+; CHECK: call [[REGISTER]]
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/casts.ll b/tests_lit/llvm2ice_tests/casts.ll
new file mode 100644
index 0000000..a9617ae
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/casts.ll
@@ -0,0 +1,16 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i64 @simple_zext(i32 %arg) {
+entry:
+ %c = zext i32 %arg to i64
+ ret i64 %c
+
+; CHECK: entry:
+; CHECK-NEXT: %c = zext i32 %arg to i64
+; CHECK-NEXT: ret i64 %c
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/cmp-opt.ll b/tests_lit/llvm2ice_tests/cmp-opt.ll
new file mode 100644
index 0000000..15aee2a
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/cmp-opt.ll
@@ -0,0 +1,41 @@
+; RUIN: %llvm2ice %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @testBool(i32 %a, i32 %b) {
+entry:
+ %cmp = icmp slt i32 %a, %b
+ %cmp1 = icmp sgt i32 %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void @use(i1 %cmp)
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ br i1 %cmp1, label %if.then5, label %if.end7
+
+if.then5: ; preds = %if.end
+ tail call void @use(i1 %cmp1)
+ br label %if.end7
+
+if.end7: ; preds = %if.then5, %if.end
+ ret void
+}
+
+declare void @use(i1 zeroext)
+
+; ERRORS-NOT: ICE translation error
+
+; CHECK: .globl testBool
+; Two bool computations
+; CHECK: cmp
+; CHECK: cmp
+; Test first bool
+; CHECK: cmp
+; CHECK: call
+; Test second bool
+; CHECK: cmp
+; CHECK: call
+; CHECK: ret
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/convert.ll b/tests_lit/llvm2ice_tests/convert.ll
new file mode 100644
index 0000000..53a1de4
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/convert.ll
@@ -0,0 +1,180 @@
+; RUIN: %llvm2ice %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+@i8v = common global i8 0, align 1
+@i16v = common global i16 0, align 2
+@i32v = common global i32 0, align 4
+@i64v = common global i64 0, align 8
+@u8v = common global i8 0, align 1
+@u16v = common global i16 0, align 2
+@u32v = common global i32 0, align 4
+@u64v = common global i64 0, align 8
+@i1 = common global i32 0, align 4
+@i2 = common global i32 0, align 4
+@u1 = common global i32 0, align 4
+@u2 = common global i32 0, align 4
+
+define void @from_int8() {
+entry:
+ %v0 = load i8* @i8v, align 1
+ %v1 = sext i8 %v0 to i16
+ store i16 %v1, i16* @i16v, align 1
+ %v2 = sext i8 %v0 to i32
+ store i32 %v2, i32* @i32v, align 1
+ %v3 = sext i8 %v0 to i64
+ store i64 %v3, i64* @i64v, align 1
+ ret void
+ ; CHECK: mov al, byte ptr [
+ ; CHECK-NEXT: movsx cx, al
+ ; CHECK-NEXT: mov word ptr [
+ ; CHECK-NEXT: movsx ecx, al
+ ; CHECK-NEXT: mov dword ptr [
+ ; CHECK-NEXT: movsx ecx, al
+ ; CHECK-NEXT: sar eax, 31
+ ; CHECK-NEXT: mov dword ptr [i64v+4],
+ ; CHECK-NEXT: mov dword ptr [i64v],
+}
+
+define void @from_int16() {
+entry:
+ %v0 = load i16* @i16v, align 1
+ %v1 = trunc i16 %v0 to i8
+ store i8 %v1, i8* @i8v, align 1
+ %v2 = sext i16 %v0 to i32
+ store i32 %v2, i32* @i32v, align 1
+ %v3 = sext i16 %v0 to i64
+ store i64 %v3, i64* @i64v, align 1
+ ret void
+ ; CHECK: mov ax, word ptr [
+ ; CHECK-NEXT: mov cx, ax
+ ; CHECK-NEXT: mov byte ptr [
+ ; CHECK-NEXT: movsx ecx, ax
+ ; CHECK-NEXT: mov dword ptr [
+ ; CHECK-NEXT: movsx ecx, ax
+ ; CHECK-NEXT: sar eax, 31
+ ; CHECK-NEXT: mov dword ptr [i64v+4],
+ ; CHECK-NEXT: mov dword ptr [i64v],
+}
+
+define void @from_int32() {
+entry:
+ %v0 = load i32* @i32v, align 1
+ %v1 = trunc i32 %v0 to i8
+ store i8 %v1, i8* @i8v, align 1
+ %v2 = trunc i32 %v0 to i16
+ store i16 %v2, i16* @i16v, align 1
+ %v3 = sext i32 %v0 to i64
+ store i64 %v3, i64* @i64v, align 1
+ ret void
+ ; CHECK: mov eax, dword ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov byte ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov word ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: sar eax, 31
+ ; CHECK-NEXT: mov dword ptr [i64v+4],
+ ; CHECK-NEXT: mov dword ptr [i64v],
+}
+
+define void @from_int64() {
+entry:
+ %v0 = load i64* @i64v, align 1
+ %v1 = trunc i64 %v0 to i8
+ store i8 %v1, i8* @i8v, align 1
+ %v2 = trunc i64 %v0 to i16
+ store i16 %v2, i16* @i16v, align 1
+ %v3 = trunc i64 %v0 to i32
+ store i32 %v3, i32* @i32v, align 1
+ ret void
+ ; CHECK: mov eax, dword ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov byte ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov word ptr [
+ ; CHECK-NEXT: mov dword ptr [
+}
+
+define void @from_uint8() {
+entry:
+ %v0 = load i8* @u8v, align 1
+ %v1 = zext i8 %v0 to i16
+ store i16 %v1, i16* @i16v, align 1
+ %v2 = zext i8 %v0 to i32
+ store i32 %v2, i32* @i32v, align 1
+ %v3 = zext i8 %v0 to i64
+ store i64 %v3, i64* @i64v, align 1
+ ret void
+ ; CHECK: mov al, byte ptr [
+ ; CHECK-NEXT: movzx cx, al
+ ; CHECK-NEXT: mov word ptr [
+ ; CHECK-NEXT: movzx ecx, al
+ ; CHECK-NEXT: mov dword ptr [
+ ; CHECK-NEXT: movzx eax, al
+ ; CHECK-NEXT: mov ecx, 0
+ ; CHECK-NEXT: mov dword ptr [i64v+4],
+ ; CHECK-NEXT: mov dword ptr [i64v],
+}
+
+define void @from_uint16() {
+entry:
+ %v0 = load i16* @u16v, align 1
+ %v1 = trunc i16 %v0 to i8
+ store i8 %v1, i8* @i8v, align 1
+ %v2 = zext i16 %v0 to i32
+ store i32 %v2, i32* @i32v, align 1
+ %v3 = zext i16 %v0 to i64
+ store i64 %v3, i64* @i64v, align 1
+ ret void
+ ; CHECK: mov ax, word ptr [
+ ; CHECK-NEXT: mov cx, ax
+ ; CHECK-NEXT: mov byte ptr [
+ ; CHECK-NEXT: movzx ecx, ax
+ ; CHECK-NEXT: mov dword ptr [
+ ; CHECK-NEXT: movzx eax, ax
+ ; CHECK-NEXT: mov ecx, 0
+ ; CHECK-NEXT: mov dword ptr [i64v+4],
+ ; CHECK-NEXT: mov dword ptr [i64v],
+}
+
+define void @from_uint32() {
+entry:
+ %v0 = load i32* @u32v, align 1
+ %v1 = trunc i32 %v0 to i8
+ store i8 %v1, i8* @i8v, align 1
+ %v2 = trunc i32 %v0 to i16
+ store i16 %v2, i16* @i16v, align 1
+ %v3 = zext i32 %v0 to i64
+ store i64 %v3, i64* @i64v, align 1
+ ret void
+ ; CHECK: mov eax, dword ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov byte ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov word ptr [
+ ; CHECK-NEXT: mov ecx, 0
+ ; CHECK-NEXT: mov dword ptr [i64v+4],
+ ; CHECK-NEXT: mov dword ptr [i64v],
+}
+
+define void @from_uint64() {
+entry:
+ %v0 = load i64* @u64v, align 1
+ %v1 = trunc i64 %v0 to i8
+ store i8 %v1, i8* @i8v, align 1
+ %v2 = trunc i64 %v0 to i16
+ store i16 %v2, i16* @i16v, align 1
+ %v3 = trunc i64 %v0 to i32
+ store i32 %v3, i32* @i32v, align 1
+ ret void
+ ; CHECK: mov eax, dword ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov byte ptr [
+ ; CHECK-NEXT: mov ecx, eax
+ ; CHECK-NEXT: mov word ptr [
+ ; CHECK-NEXT: mov dword ptr [
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/empty-func.ll b/tests_lit/llvm2ice_tests/empty-func.ll
new file mode 100644
index 0000000..a5edc02
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/empty-func.ll
@@ -0,0 +1,14 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @foo() {
+; CHECK: define void @foo()
+entry:
+ ret void
+; CHECK: entry
+; CHECK-NEXT: ret void
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/fp.pnacl.ll b/tests_lit/llvm2ice_tests/fp.pnacl.ll
new file mode 100644
index 0000000..8981c9e
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/fp.pnacl.ll
@@ -0,0 +1,1099 @@
+; RUIN: %llvm2ice --verbose none %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+@__init_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__fini_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__tls_template_start = internal constant [0 x i8] zeroinitializer, align 8
+@__tls_template_alignment = internal constant [4 x i8] c"\01\00\00\00", align 4
+
+define internal i32 @doubleArgs(double %a, i32 %b, double %c) {
+entry:
+ ret i32 %b
+}
+; CHECK: doubleArgs:
+; CHECK: mov eax, dword ptr [esp+12]
+; CHECK-NEXT: ret
+
+define internal i32 @floatArgs(float %a, i32 %b, float %c) {
+entry:
+ ret i32 %b
+}
+; CHECK: floatArgs:
+; CHECK: mov eax, dword ptr [esp+8]
+; CHECK-NEXT: ret
+
+define internal i32 @passFpArgs(float %a, double %b, float %c, double %d, float %e, double %f) {
+entry:
+ %call = call i32 @ignoreFpArgsNoInline(float %a, i32 123, double %b)
+ %call1 = call i32 @ignoreFpArgsNoInline(float %c, i32 123, double %d)
+ %call2 = call i32 @ignoreFpArgsNoInline(float %e, i32 123, double %f)
+ %add = add i32 %call1, %call
+ %add3 = add i32 %add, %call2
+ ret i32 %add3
+}
+; CHECK: passFpArgs:
+; CHECK: push 123
+; CHECK: call ignoreFpArgsNoInline
+; CHECK: push 123
+; CHECK: call ignoreFpArgsNoInline
+; CHECK: push 123
+; CHECK: call ignoreFpArgsNoInline
+
+declare i32 @ignoreFpArgsNoInline(float, i32, double)
+
+define internal i32 @passFpConstArg(float %a, double %b) {
+entry:
+ %call = call i32 @ignoreFpArgsNoInline(float %a, i32 123, double 2.340000e+00)
+ ret i32 %call
+}
+; CHECK: passFpConstArg:
+; CHECK: push 123
+; CHECK: call ignoreFpArgsNoInline
+
+define internal float @returnFloatArg(float %a) {
+entry:
+ ret float %a
+}
+; CHECK: returnFloatArg:
+; CHECK: fld dword ptr [esp
+
+define internal double @returnDoubleArg(double %a) {
+entry:
+ ret double %a
+}
+; CHECK: returnDoubleArg:
+; CHECK: fld qword ptr [esp
+
+define internal float @returnFloatConst() {
+entry:
+ ret float 0x3FF3AE1480000000
+}
+; CHECK: returnFloatConst:
+; CHECK: fld
+
+define internal double @returnDoubleConst() {
+entry:
+ ret double 1.230000e+00
+}
+; CHECK: returnDoubleConst:
+; CHECK: fld
+
+define internal float @addFloat(float %a, float %b) {
+entry:
+ %add = fadd float %a, %b
+ ret float %add
+}
+; CHECK: addFloat:
+; CHECK: addss
+; CHECK: fld
+
+define internal double @addDouble(double %a, double %b) {
+entry:
+ %add = fadd double %a, %b
+ ret double %add
+}
+; CHECK: addDouble:
+; CHECK: addsd
+; CHECK: fld
+
+define internal float @subFloat(float %a, float %b) {
+entry:
+ %sub = fsub float %a, %b
+ ret float %sub
+}
+; CHECK: subFloat:
+; CHECK: subss
+; CHECK: fld
+
+define internal double @subDouble(double %a, double %b) {
+entry:
+ %sub = fsub double %a, %b
+ ret double %sub
+}
+; CHECK: subDouble:
+; CHECK: subsd
+; CHECK: fld
+
+define internal float @mulFloat(float %a, float %b) {
+entry:
+ %mul = fmul float %a, %b
+ ret float %mul
+}
+; CHECK: mulFloat:
+; CHECK: mulss
+; CHECK: fld
+
+define internal double @mulDouble(double %a, double %b) {
+entry:
+ %mul = fmul double %a, %b
+ ret double %mul
+}
+; CHECK: mulDouble:
+; CHECK: mulsd
+; CHECK: fld
+
+define internal float @divFloat(float %a, float %b) {
+entry:
+ %div = fdiv float %a, %b
+ ret float %div
+}
+; CHECK: divFloat:
+; CHECK: divss
+; CHECK: fld
+
+define internal double @divDouble(double %a, double %b) {
+entry:
+ %div = fdiv double %a, %b
+ ret double %div
+}
+; CHECK: divDouble:
+; CHECK: divsd
+; CHECK: fld
+
+define internal float @remFloat(float %a, float %b) {
+entry:
+ %div = frem float %a, %b
+ ret float %div
+}
+; CHECK: remFloat:
+; CHECK: call fmodf
+
+define internal double @remDouble(double %a, double %b) {
+entry:
+ %div = frem double %a, %b
+ ret double %div
+}
+; CHECK: remDouble:
+; CHECK: call fmod
+
+define internal float @fptrunc(double %a) {
+entry:
+ %conv = fptrunc double %a to float
+ ret float %conv
+}
+; CHECK: fptrunc:
+; CHECK: cvtsd2ss
+; CHECK: fld
+
+define internal double @fpext(float %a) {
+entry:
+ %conv = fpext float %a to double
+ ret double %conv
+}
+; CHECK: fpext:
+; CHECK: cvtss2sd
+; CHECK: fld
+
+define internal i64 @doubleToSigned64(double %a) {
+entry:
+ %conv = fptosi double %a to i64
+ ret i64 %conv
+}
+; CHECK: doubleToSigned64:
+; CHECK: call cvtdtosi64
+
+define internal i64 @floatToSigned64(float %a) {
+entry:
+ %conv = fptosi float %a to i64
+ ret i64 %conv
+}
+; CHECK: floatToSigned64:
+; CHECK: call cvtftosi64
+
+define internal i64 @doubleToUnsigned64(double %a) {
+entry:
+ %conv = fptoui double %a to i64
+ ret i64 %conv
+}
+; CHECK: doubleToUnsigned64:
+; CHECK: call cvtdtoui64
+
+define internal i64 @floatToUnsigned64(float %a) {
+entry:
+ %conv = fptoui float %a to i64
+ ret i64 %conv
+}
+; CHECK: floatToUnsigned64:
+; CHECK: call cvtftoui64
+
+define internal i32 @doubleToSigned32(double %a) {
+entry:
+ %conv = fptosi double %a to i32
+ ret i32 %conv
+}
+; CHECK: doubleToSigned32:
+; CHECK: cvtsd2si
+
+define internal i32 @floatToSigned32(float %a) {
+entry:
+ %conv = fptosi float %a to i32
+ ret i32 %conv
+}
+; CHECK: floatToSigned32:
+; CHECK: cvtss2si
+
+define internal i32 @doubleToUnsigned32(double %a) {
+entry:
+ %conv = fptoui double %a to i32
+ ret i32 %conv
+}
+; CHECK: doubleToUnsigned32:
+; CHECK: call cvtdtoui32
+
+define internal i32 @floatToUnsigned32(float %a) {
+entry:
+ %conv = fptoui float %a to i32
+ ret i32 %conv
+}
+; CHECK: floatToUnsigned32:
+; CHECK: call cvtftoui32
+
+define internal i32 @doubleToSigned16(double %a) {
+entry:
+ %conv = fptosi double %a to i16
+ %conv.ret_ext = sext i16 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: doubleToSigned16:
+; CHECK: cvtsd2si
+; CHECK: movsx
+
+define internal i32 @floatToSigned16(float %a) {
+entry:
+ %conv = fptosi float %a to i16
+ %conv.ret_ext = sext i16 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: floatToSigned16:
+; CHECK: cvtss2si
+; CHECK: movsx
+
+define internal i32 @doubleToUnsigned16(double %a) {
+entry:
+ %conv = fptoui double %a to i16
+ %conv.ret_ext = zext i16 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: doubleToUnsigned16:
+; CHECK: cvtsd2si
+; CHECK: movzx
+
+define internal i32 @floatToUnsigned16(float %a) {
+entry:
+ %conv = fptoui float %a to i16
+ %conv.ret_ext = zext i16 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: floatToUnsigned16:
+; CHECK: cvtss2si
+; CHECK: movzx
+
+define internal i32 @doubleToSigned8(double %a) {
+entry:
+ %conv = fptosi double %a to i8
+ %conv.ret_ext = sext i8 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: doubleToSigned8:
+; CHECK: cvtsd2si
+; CHECK: movsx
+
+define internal i32 @floatToSigned8(float %a) {
+entry:
+ %conv = fptosi float %a to i8
+ %conv.ret_ext = sext i8 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: floatToSigned8:
+; CHECK: cvtss2si
+; CHECK: movsx
+
+define internal i32 @doubleToUnsigned8(double %a) {
+entry:
+ %conv = fptoui double %a to i8
+ %conv.ret_ext = zext i8 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: doubleToUnsigned8:
+; CHECK: cvtsd2si
+; CHECK: movzx
+
+define internal i32 @floatToUnsigned8(float %a) {
+entry:
+ %conv = fptoui float %a to i8
+ %conv.ret_ext = zext i8 %conv to i32
+ ret i32 %conv.ret_ext
+}
+; CHECK: floatToUnsigned8:
+; CHECK: cvtss2si
+; CHECK: movzx
+
+define internal i32 @doubleToUnsigned1(double %a) {
+entry:
+ %tobool = fptoui double %a to i1
+ %tobool.ret_ext = zext i1 %tobool to i32
+ ret i32 %tobool.ret_ext
+}
+; CHECK: doubleToUnsigned1:
+; CHECK: cvtsd2si
+; CHECK: and eax, 1
+
+define internal i32 @floatToUnsigned1(float %a) {
+entry:
+ %tobool = fptoui float %a to i1
+ %tobool.ret_ext = zext i1 %tobool to i32
+ ret i32 %tobool.ret_ext
+}
+; CHECK: floatToUnsigned1:
+; CHECK: cvtss2si
+; CHECK: and eax, 1
+
+define internal double @signed64ToDouble(i64 %a) {
+entry:
+ %conv = sitofp i64 %a to double
+ ret double %conv
+}
+; CHECK: signed64ToDouble:
+; CHECK: call cvtsi64tod
+; CHECK: fstp
+
+define internal float @signed64ToFloat(i64 %a) {
+entry:
+ %conv = sitofp i64 %a to float
+ ret float %conv
+}
+; CHECK: signed64ToFloat:
+; CHECK: call cvtsi64tof
+; CHECK: fstp
+
+define internal double @unsigned64ToDouble(i64 %a) {
+entry:
+ %conv = uitofp i64 %a to double
+ ret double %conv
+}
+; CHECK: unsigned64ToDouble:
+; CHECK: call cvtui64tod
+; CHECK: fstp
+
+define internal float @unsigned64ToFloat(i64 %a) {
+entry:
+ %conv = uitofp i64 %a to float
+ ret float %conv
+}
+; CHECK: unsigned64ToFloat:
+; CHECK: call cvtui64tof
+; CHECK: fstp
+
+define internal double @signed32ToDouble(i32 %a) {
+entry:
+ %conv = sitofp i32 %a to double
+ ret double %conv
+}
+; CHECK: signed32ToDouble:
+; CHECK: cvtsi2sd
+; CHECK: fld
+
+define internal float @signed32ToFloat(i32 %a) {
+entry:
+ %conv = sitofp i32 %a to float
+ ret float %conv
+}
+; CHECK: signed32ToFloat:
+; CHECK: cvtsi2ss
+; CHECK: fld
+
+define internal double @unsigned32ToDouble(i32 %a) {
+entry:
+ %conv = uitofp i32 %a to double
+ ret double %conv
+}
+; CHECK: unsigned32ToDouble:
+; CHECK: call cvtui32tod
+; CHECK: fstp
+
+define internal float @unsigned32ToFloat(i32 %a) {
+entry:
+ %conv = uitofp i32 %a to float
+ ret float %conv
+}
+; CHECK: unsigned32ToFloat:
+; CHECK: call cvtui32tof
+; CHECK: fstp
+
+define internal double @signed16ToDouble(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i16
+ %conv = sitofp i16 %a.arg_trunc to double
+ ret double %conv
+}
+; CHECK: signed16ToDouble:
+; CHECK: cvtsi2sd
+; CHECK: fld
+
+define internal float @signed16ToFloat(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i16
+ %conv = sitofp i16 %a.arg_trunc to float
+ ret float %conv
+}
+; CHECK: signed16ToFloat:
+; CHECK: cvtsi2ss
+; CHECK: fld
+
+define internal double @unsigned16ToDouble(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i16
+ %conv = uitofp i16 %a.arg_trunc to double
+ ret double %conv
+}
+; CHECK: unsigned16ToDouble:
+; CHECK: cvtsi2sd
+; CHECK: fld
+
+define internal float @unsigned16ToFloat(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i16
+ %conv = uitofp i16 %a.arg_trunc to float
+ ret float %conv
+}
+; CHECK: unsigned16ToFloat:
+; CHECK: cvtsi2ss
+; CHECK: fld
+
+define internal double @signed8ToDouble(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i8
+ %conv = sitofp i8 %a.arg_trunc to double
+ ret double %conv
+}
+; CHECK: signed8ToDouble:
+; CHECK: cvtsi2sd
+; CHECK: fld
+
+define internal float @signed8ToFloat(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i8
+ %conv = sitofp i8 %a.arg_trunc to float
+ ret float %conv
+}
+; CHECK: signed8ToFloat:
+; CHECK: cvtsi2ss
+; CHECK: fld
+
+define internal double @unsigned8ToDouble(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i8
+ %conv = uitofp i8 %a.arg_trunc to double
+ ret double %conv
+}
+; CHECK: unsigned8ToDouble:
+; CHECK: cvtsi2sd
+; CHECK: fld
+
+define internal float @unsigned8ToFloat(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i8
+ %conv = uitofp i8 %a.arg_trunc to float
+ ret float %conv
+}
+; CHECK: unsigned8ToFloat:
+; CHECK: cvtsi2ss
+; CHECK: fld
+
+define internal double @unsigned1ToDouble(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i1
+ %conv = uitofp i1 %a.arg_trunc to double
+ ret double %conv
+}
+; CHECK: unsigned1ToDouble:
+; CHECK: cvtsi2sd
+; CHECK: fld
+
+define internal float @unsigned1ToFloat(i32 %a) {
+entry:
+ %a.arg_trunc = trunc i32 %a to i1
+ %conv = uitofp i1 %a.arg_trunc to float
+ ret float %conv
+}
+; CHECK: unsigned1ToFloat:
+; CHECK: cvtsi2ss
+; CHECK: fld
+
+define internal void @fcmpEq(float %a, float %b, double %c, double %d) {
+entry:
+ %cmp = fcmp oeq float %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = fcmp oeq double %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.then2, %if.end
+ ret void
+}
+; CHECK: fcmpEq:
+; CHECK: ucomiss
+; CHECK: jne .
+; CHECK-NEXT: jp .
+; CHECK: call func
+; CHECK: ucomisd
+; CHECK: jne .
+; CHECK-NEXT: jp .
+; CHECK: call func
+
+declare void @func()
+
+define internal void @fcmpNe(float %a, float %b, double %c, double %d) {
+entry:
+ %cmp = fcmp une float %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = fcmp une double %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.then2, %if.end
+ ret void
+}
+; CHECK: fcmpNe:
+; CHECK: ucomiss
+; CHECK: jne .
+; CHECK-NEXT: jp .
+; CHECK: call func
+; CHECK: ucomisd
+; CHECK: jne .
+; CHECK-NEXT: jp .
+; CHECK: call func
+
+define internal void @fcmpGt(float %a, float %b, double %c, double %d) {
+entry:
+ %cmp = fcmp ogt float %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = fcmp ogt double %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.then2, %if.end
+ ret void
+}
+; CHECK: fcmpGt:
+; CHECK: ucomiss
+; CHECK: ja .
+; CHECK: call func
+; CHECK: ucomisd
+; CHECK: ja .
+; CHECK: call func
+
+define internal void @fcmpGe(float %a, float %b, double %c, double %d) {
+entry:
+ %cmp = fcmp ult float %a, %b
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ %cmp1 = fcmp ult double %c, %d
+ br i1 %cmp1, label %if.end3, label %if.then2
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.end, %if.then2
+ ret void
+}
+; CHECK: fcmpGe:
+; CHECK: ucomiss
+; CHECK: jb .
+; CHECK: call func
+; CHECK: ucomisd
+; CHECK: jb .
+; CHECK: call func
+
+define internal void @fcmpLt(float %a, float %b, double %c, double %d) {
+entry:
+ %cmp = fcmp olt float %a, %b
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ %cmp1 = fcmp olt double %c, %d
+ br i1 %cmp1, label %if.then2, label %if.end3
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.then2, %if.end
+ ret void
+}
+; CHECK: fcmpLt:
+; CHECK: ucomiss
+; CHECK: ja .
+; CHECK: call func
+; CHECK: ucomisd
+; CHECK: ja .
+; CHECK: call func
+
+define internal void @fcmpLe(float %a, float %b, double %c, double %d) {
+entry:
+ %cmp = fcmp ugt float %a, %b
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ call void @func()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ %cmp1 = fcmp ugt double %c, %d
+ br i1 %cmp1, label %if.end3, label %if.then2
+
+if.then2: ; preds = %if.end
+ call void @func()
+ br label %if.end3
+
+if.end3: ; preds = %if.end, %if.then2
+ ret void
+}
+; CHECK: fcmpLe:
+; CHECK: ucomiss
+; CHECK: jb .
+; CHECK: call func
+; CHECK: ucomisd
+; CHECK: jb .
+; CHECK: call func
+
+define internal i32 @fcmpFalseFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp false float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpFalseFloat:
+; CHECK: mov {{.*}}, 0
+
+define internal i32 @fcmpFalseDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp false double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpFalseDouble:
+; CHECK: mov {{.*}}, 0
+
+define internal i32 @fcmpOeqFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp oeq float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOeqFloat:
+; CHECK: ucomiss
+; CHECK: jne .
+; CHECK: jp .
+
+define internal i32 @fcmpOeqDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp oeq double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOeqDouble:
+; CHECK: ucomisd
+; CHECK: jne .
+; CHECK: jp .
+
+define internal i32 @fcmpOgtFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp ogt float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOgtFloat:
+; CHECK: ucomiss
+; CHECK: ja .
+
+define internal i32 @fcmpOgtDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp ogt double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOgtDouble:
+; CHECK: ucomisd
+; CHECK: ja .
+
+define internal i32 @fcmpOgeFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp oge float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOgeFloat:
+; CHECK: ucomiss
+; CHECK: jae .
+
+define internal i32 @fcmpOgeDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp oge double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOgeDouble:
+; CHECK: ucomisd
+; CHECK: jae .
+
+define internal i32 @fcmpOltFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp olt float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOltFloat:
+; CHECK: ucomiss
+; CHECK: ja .
+
+define internal i32 @fcmpOltDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp olt double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOltDouble:
+; CHECK: ucomisd
+; CHECK: ja .
+
+define internal i32 @fcmpOleFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp ole float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOleFloat:
+; CHECK: ucomiss
+; CHECK: jae .
+
+define internal i32 @fcmpOleDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp ole double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOleDouble:
+; CHECK: ucomisd
+; CHECK: jae .
+
+define internal i32 @fcmpOneFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp one float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOneFloat:
+; CHECK: ucomiss
+; CHECK: jne .
+
+define internal i32 @fcmpOneDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp one double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOneDouble:
+; CHECK: ucomisd
+; CHECK: jne .
+
+define internal i32 @fcmpOrdFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp ord float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOrdFloat:
+; CHECK: ucomiss
+; CHECK: jnp .
+
+define internal i32 @fcmpOrdDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp ord double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpOrdDouble:
+; CHECK: ucomisd
+; CHECK: jnp .
+
+define internal i32 @fcmpUeqFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp ueq float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUeqFloat:
+; CHECK: ucomiss
+; CHECK: je .
+
+define internal i32 @fcmpUeqDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp ueq double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUeqDouble:
+; CHECK: ucomisd
+; CHECK: je .
+
+define internal i32 @fcmpUgtFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp ugt float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUgtFloat:
+; CHECK: ucomiss
+; CHECK: jb .
+
+define internal i32 @fcmpUgtDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp ugt double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUgtDouble:
+; CHECK: ucomisd
+; CHECK: jb .
+
+define internal i32 @fcmpUgeFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp uge float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUgeFloat:
+; CHECK: ucomiss
+; CHECK: jbe .
+
+define internal i32 @fcmpUgeDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp uge double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUgeDouble:
+; CHECK: ucomisd
+; CHECK: jbe .
+
+define internal i32 @fcmpUltFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp ult float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUltFloat:
+; CHECK: ucomiss
+; CHECK: jb .
+
+define internal i32 @fcmpUltDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp ult double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUltDouble:
+; CHECK: ucomisd
+; CHECK: jb .
+
+define internal i32 @fcmpUleFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp ule float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUleFloat:
+; CHECK: ucomiss
+; CHECK: jbe .
+
+define internal i32 @fcmpUleDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp ule double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUleDouble:
+; CHECK: ucomisd
+; CHECK: jbe .
+
+define internal i32 @fcmpUneFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp une float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUneFloat:
+; CHECK: ucomiss
+; CHECK: jne .
+; CHECK: jp .
+
+define internal i32 @fcmpUneDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp une double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUneDouble:
+; CHECK: ucomisd
+; CHECK: jne .
+; CHECK: jp .
+
+define internal i32 @fcmpUnoFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp uno float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUnoFloat:
+; CHECK: ucomiss
+; CHECK: jp .
+
+define internal i32 @fcmpUnoDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp uno double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpUnoDouble:
+; CHECK: ucomisd
+; CHECK: jp .
+
+define internal i32 @fcmpTrueFloat(float %a, float %b) {
+entry:
+ %cmp = fcmp true float %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpTrueFloat:
+; CHECK: mov {{.*}}, 1
+
+define internal i32 @fcmpTrueDouble(double %a, double %b) {
+entry:
+ %cmp = fcmp true double %a, %b
+ %cmp.ret_ext = zext i1 %cmp to i32
+ ret i32 %cmp.ret_ext
+}
+; CHECK: fcmpTrueDouble:
+; CHECK: mov {{.*}}, 1
+
+define internal float @loadFloat(i32 %a) {
+entry:
+ %a.asptr = inttoptr i32 %a to float*
+ %v0 = load float* %a.asptr, align 4
+ ret float %v0
+}
+; CHECK: loadFloat:
+; CHECK: movss
+; CHECK: fld
+
+define internal double @loadDouble(i32 %a) {
+entry:
+ %a.asptr = inttoptr i32 %a to double*
+ %v0 = load double* %a.asptr, align 8
+ ret double %v0
+}
+; CHECK: loadDouble:
+; CHECK: movsd
+; CHECK: fld
+
+define internal void @storeFloat(i32 %a, float %value) {
+entry:
+ %a.asptr = inttoptr i32 %a to float*
+ store float %value, float* %a.asptr, align 4
+ ret void
+}
+; CHECK: storeFloat:
+; CHECK: movss
+
+define internal void @storeDouble(i32 %a, double %value) {
+entry:
+ %a.asptr = inttoptr i32 %a to double*
+ store double %value, double* %a.asptr, align 8
+ ret void
+}
+; CHECK: storeDouble:
+; CHECK: movsd
+
+define internal void @storeFloatConst(i32 %a) {
+entry:
+ %a.asptr = inttoptr i32 %a to float*
+ store float 0x3FF3AE1480000000, float* %a.asptr, align 4
+ ret void
+}
+; CHECK: storeFloatConst:
+; CHECK: mov
+; CHECK: mov
+
+define internal void @storeDoubleConst(i32 %a) {
+entry:
+ %a.asptr = inttoptr i32 %a to double*
+ store double 1.230000e+00, double* %a.asptr, align 8
+ ret void
+}
+; CHECK: storeDoubleConst:
+; CHECK: mov
+; CHECK: mov
+
+define internal float @selectFloatVarVar(float %a, float %b) {
+entry:
+ %cmp = fcmp olt float %a, %b
+ %cond = select i1 %cmp, float %a, float %b
+ ret float %cond
+}
+; CHECK: selectFloatVarVar:
+; CHECK: ucomiss
+; CHECK: ja .
+; CHECK: fld
+
+define internal double @selectDoubleVarVar(double %a, double %b) {
+entry:
+ %cmp = fcmp olt double %a, %b
+ %cond = select i1 %cmp, double %a, double %b
+ ret double %cond
+}
+; CHECK: selectDoubleVarVar:
+; CHECK: ucomisd
+; CHECK: ja .
+; CHECK: fld
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/fpconst.pnacl.ll b/tests_lit/llvm2ice_tests/fpconst.pnacl.ll
new file mode 100644
index 0000000..eb43c8e
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/fpconst.pnacl.ll
@@ -0,0 +1,535 @@
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+; This is a smoke test for floating-point constant pooling. It tests
+; pooling of various float and double constants (including positive
+; and negative NaN) within functions and across functions. Note that
+; in LLVM bitcode, hex constants are used for an FP constant whenever
+; the constant "cannot be represented as a decimal floating point
+; number in a reasonable number of digits". See
+; http://llvm.org/docs/LangRef.html#simple-constants .
+
+@__init_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__fini_array_start = internal constant [0 x i8] zeroinitializer, align 4
+@__tls_template_start = internal constant [0 x i8] zeroinitializer, align 8
+@__tls_template_alignment = internal constant [4 x i8] c"\01\00\00\00", align 4
+
+define internal float @FpLookup1(i32 %Arg) {
+entry:
+ switch i32 %Arg, label %return [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb4
+ i32 3, label %sw.bb7
+ i32 -1, label %sw.bb10
+ i32 -2, label %sw.bb14
+ i32 -3, label %sw.bb19
+ i32 10, label %sw.bb24
+ i32 -10, label %sw.bb27
+ i32 100, label %sw.bb30
+ i32 101, label %sw.bb33
+ i32 102, label %sw.bb36
+ i32 103, label %sw.bb39
+ i32 -101, label %sw.bb42
+ i32 -102, label %sw.bb47
+ i32 -103, label %sw.bb52
+ i32 110, label %sw.bb57
+ i32 -110, label %sw.bb60
+ ]
+
+sw.bb: ; preds = %entry
+ %call = call float @Dummy(i32 0)
+ %add = fadd float %call, 1.000000e+00
+ br label %return
+
+sw.bb1: ; preds = %entry
+ %call2 = call float @Dummy(i32 1)
+ %add3 = fadd float %call2, 2.000000e+00
+ br label %return
+
+sw.bb4: ; preds = %entry
+ %call5 = call float @Dummy(i32 2)
+ %add6 = fadd float %call5, 4.000000e+00
+ br label %return
+
+sw.bb7: ; preds = %entry
+ %call8 = call float @Dummy(i32 3)
+ %add9 = fadd float %call8, 8.000000e+00
+ br label %return
+
+sw.bb10: ; preds = %entry
+ %call11 = call float @Dummy(i32 -1)
+ %conv13 = fadd float %call11, 5.000000e-01
+ br label %return
+
+sw.bb14: ; preds = %entry
+ %call15 = call float @Dummy(i32 -2)
+ %conv16 = fpext float %call15 to double
+ %add17 = fadd double %conv16, 0x3FD5555555555555
+ %conv18 = fptrunc double %add17 to float
+ br label %return
+
+sw.bb19: ; preds = %entry
+ %call20 = call float @Dummy(i32 -3)
+ %conv23 = fadd float %call20, 2.500000e-01
+ br label %return
+
+sw.bb24: ; preds = %entry
+ %call25 = call float @Dummy(i32 10)
+ %add26 = fadd float %call25, 0x7FF8000000000000
+ br label %return
+
+sw.bb27: ; preds = %entry
+ %call28 = call float @Dummy(i32 -10)
+ %add29 = fadd float %call28, 0xFFF8000000000000
+ br label %return
+
+sw.bb30: ; preds = %entry
+ %call31 = call float @Dummy(i32 100)
+ %add32 = fadd float %call31, 1.000000e+00
+ br label %return
+
+sw.bb33: ; preds = %entry
+ %call34 = call float @Dummy(i32 101)
+ %add35 = fadd float %call34, 2.000000e+00
+ br label %return
+
+sw.bb36: ; preds = %entry
+ %call37 = call float @Dummy(i32 102)
+ %add38 = fadd float %call37, 4.000000e+00
+ br label %return
+
+sw.bb39: ; preds = %entry
+ %call40 = call float @Dummy(i32 103)
+ %add41 = fadd float %call40, 8.000000e+00
+ br label %return
+
+sw.bb42: ; preds = %entry
+ %call43 = call float @Dummy(i32 -101)
+ %conv46 = fadd float %call43, 5.000000e-01
+ br label %return
+
+sw.bb47: ; preds = %entry
+ %call48 = call float @Dummy(i32 -102)
+ %conv49 = fpext float %call48 to double
+ %add50 = fadd double %conv49, 0x3FD5555555555555
+ %conv51 = fptrunc double %add50 to float
+ br label %return
+
+sw.bb52: ; preds = %entry
+ %call53 = call float @Dummy(i32 -103)
+ %conv56 = fadd float %call53, 2.500000e-01
+ br label %return
+
+sw.bb57: ; preds = %entry
+ %call58 = call float @Dummy(i32 110)
+ %add59 = fadd float %call58, 0x7FF8000000000000
+ br label %return
+
+sw.bb60: ; preds = %entry
+ %call61 = call float @Dummy(i32 -110)
+ %add62 = fadd float %call61, 0xFFF8000000000000
+ br label %return
+
+return: ; preds = %entry, %sw.bb60, %sw.bb57, %sw.bb52, %sw.bb47, %sw.bb42, %sw.bb39, %sw.bb36, %sw.bb33, %sw.bb30, %sw.bb27, %sw.bb24, %sw.bb19, %sw.bb14, %sw.bb10, %sw.bb7, %sw.bb4, %sw.bb1, %sw.bb
+ %retval.0 = phi float [ %add62, %sw.bb60 ], [ %add59, %sw.bb57 ], [ %conv56, %sw.bb52 ], [ %conv51, %sw.bb47 ], [ %conv46, %sw.bb42 ], [ %add41, %sw.bb39 ], [ %add38, %sw.bb36 ], [ %add35, %sw.bb33 ], [ %add32, %sw.bb30 ], [ %add29, %sw.bb27 ], [ %add26, %sw.bb24 ], [ %conv23, %sw.bb19 ], [ %conv18, %sw.bb14 ], [ %conv13, %sw.bb10 ], [ %add9, %sw.bb7 ], [ %add6, %sw.bb4 ], [ %add3, %sw.bb1 ], [ %add, %sw.bb ], [ 0.000000e+00, %entry ]
+ ret float %retval.0
+}
+
+declare float @Dummy(i32)
+
+define internal float @FpLookup2(i32 %Arg) {
+entry:
+ switch i32 %Arg, label %return [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb4
+ i32 3, label %sw.bb7
+ i32 -1, label %sw.bb10
+ i32 -2, label %sw.bb14
+ i32 -3, label %sw.bb19
+ i32 10, label %sw.bb24
+ i32 -10, label %sw.bb27
+ i32 100, label %sw.bb30
+ i32 101, label %sw.bb33
+ i32 102, label %sw.bb36
+ i32 103, label %sw.bb39
+ i32 -101, label %sw.bb42
+ i32 -102, label %sw.bb47
+ i32 -103, label %sw.bb52
+ i32 110, label %sw.bb57
+ i32 -110, label %sw.bb60
+ ]
+
+sw.bb: ; preds = %entry
+ %call = call float @Dummy(i32 0)
+ %add = fadd float %call, 1.000000e+00
+ br label %return
+
+sw.bb1: ; preds = %entry
+ %call2 = call float @Dummy(i32 1)
+ %add3 = fadd float %call2, 2.000000e+00
+ br label %return
+
+sw.bb4: ; preds = %entry
+ %call5 = call float @Dummy(i32 2)
+ %add6 = fadd float %call5, 4.000000e+00
+ br label %return
+
+sw.bb7: ; preds = %entry
+ %call8 = call float @Dummy(i32 3)
+ %add9 = fadd float %call8, 8.000000e+00
+ br label %return
+
+sw.bb10: ; preds = %entry
+ %call11 = call float @Dummy(i32 -1)
+ %conv13 = fadd float %call11, 5.000000e-01
+ br label %return
+
+sw.bb14: ; preds = %entry
+ %call15 = call float @Dummy(i32 -2)
+ %conv16 = fpext float %call15 to double
+ %add17 = fadd double %conv16, 0x3FD5555555555555
+ %conv18 = fptrunc double %add17 to float
+ br label %return
+
+sw.bb19: ; preds = %entry
+ %call20 = call float @Dummy(i32 -3)
+ %conv23 = fadd float %call20, 2.500000e-01
+ br label %return
+
+sw.bb24: ; preds = %entry
+ %call25 = call float @Dummy(i32 10)
+ %add26 = fadd float %call25, 0x7FF8000000000000
+ br label %return
+
+sw.bb27: ; preds = %entry
+ %call28 = call float @Dummy(i32 -10)
+ %add29 = fadd float %call28, 0xFFF8000000000000
+ br label %return
+
+sw.bb30: ; preds = %entry
+ %call31 = call float @Dummy(i32 100)
+ %add32 = fadd float %call31, 1.000000e+00
+ br label %return
+
+sw.bb33: ; preds = %entry
+ %call34 = call float @Dummy(i32 101)
+ %add35 = fadd float %call34, 2.000000e+00
+ br label %return
+
+sw.bb36: ; preds = %entry
+ %call37 = call float @Dummy(i32 102)
+ %add38 = fadd float %call37, 4.000000e+00
+ br label %return
+
+sw.bb39: ; preds = %entry
+ %call40 = call float @Dummy(i32 103)
+ %add41 = fadd float %call40, 8.000000e+00
+ br label %return
+
+sw.bb42: ; preds = %entry
+ %call43 = call float @Dummy(i32 -101)
+ %conv46 = fadd float %call43, 5.000000e-01
+ br label %return
+
+sw.bb47: ; preds = %entry
+ %call48 = call float @Dummy(i32 -102)
+ %conv49 = fpext float %call48 to double
+ %add50 = fadd double %conv49, 0x3FD5555555555555
+ %conv51 = fptrunc double %add50 to float
+ br label %return
+
+sw.bb52: ; preds = %entry
+ %call53 = call float @Dummy(i32 -103)
+ %conv56 = fadd float %call53, 2.500000e-01
+ br label %return
+
+sw.bb57: ; preds = %entry
+ %call58 = call float @Dummy(i32 110)
+ %add59 = fadd float %call58, 0x7FF8000000000000
+ br label %return
+
+sw.bb60: ; preds = %entry
+ %call61 = call float @Dummy(i32 -110)
+ %add62 = fadd float %call61, 0xFFF8000000000000
+ br label %return
+
+return: ; preds = %entry, %sw.bb60, %sw.bb57, %sw.bb52, %sw.bb47, %sw.bb42, %sw.bb39, %sw.bb36, %sw.bb33, %sw.bb30, %sw.bb27, %sw.bb24, %sw.bb19, %sw.bb14, %sw.bb10, %sw.bb7, %sw.bb4, %sw.bb1, %sw.bb
+ %retval.0 = phi float [ %add62, %sw.bb60 ], [ %add59, %sw.bb57 ], [ %conv56, %sw.bb52 ], [ %conv51, %sw.bb47 ], [ %conv46, %sw.bb42 ], [ %add41, %sw.bb39 ], [ %add38, %sw.bb36 ], [ %add35, %sw.bb33 ], [ %add32, %sw.bb30 ], [ %add29, %sw.bb27 ], [ %add26, %sw.bb24 ], [ %conv23, %sw.bb19 ], [ %conv18, %sw.bb14 ], [ %conv13, %sw.bb10 ], [ %add9, %sw.bb7 ], [ %add6, %sw.bb4 ], [ %add3, %sw.bb1 ], [ %add, %sw.bb ], [ 0.000000e+00, %entry ]
+ ret float %retval.0
+}
+
+define internal double @FpLookup3(i32 %Arg) {
+entry:
+ switch i32 %Arg, label %return [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb5
+ i32 3, label %sw.bb9
+ i32 -1, label %sw.bb13
+ i32 -2, label %sw.bb17
+ i32 -3, label %sw.bb21
+ i32 10, label %sw.bb25
+ i32 -10, label %sw.bb29
+ i32 100, label %sw.bb33
+ i32 101, label %sw.bb37
+ i32 102, label %sw.bb41
+ i32 103, label %sw.bb45
+ i32 -101, label %sw.bb49
+ i32 -102, label %sw.bb53
+ i32 -103, label %sw.bb57
+ i32 110, label %sw.bb61
+ i32 -110, label %sw.bb65
+ ]
+
+sw.bb: ; preds = %entry
+ %call = call float @Dummy(i32 0)
+ %add = fadd float %call, 1.000000e+00
+ %conv = fpext float %add to double
+ br label %return
+
+sw.bb1: ; preds = %entry
+ %call2 = call float @Dummy(i32 1)
+ %add3 = fadd float %call2, 2.000000e+00
+ %conv4 = fpext float %add3 to double
+ br label %return
+
+sw.bb5: ; preds = %entry
+ %call6 = call float @Dummy(i32 2)
+ %add7 = fadd float %call6, 4.000000e+00
+ %conv8 = fpext float %add7 to double
+ br label %return
+
+sw.bb9: ; preds = %entry
+ %call10 = call float @Dummy(i32 3)
+ %add11 = fadd float %call10, 8.000000e+00
+ %conv12 = fpext float %add11 to double
+ br label %return
+
+sw.bb13: ; preds = %entry
+ %call14 = call float @Dummy(i32 -1)
+ %conv15 = fpext float %call14 to double
+ %add16 = fadd double %conv15, 5.000000e-01
+ br label %return
+
+sw.bb17: ; preds = %entry
+ %call18 = call float @Dummy(i32 -2)
+ %conv19 = fpext float %call18 to double
+ %add20 = fadd double %conv19, 0x3FD5555555555555
+ br label %return
+
+sw.bb21: ; preds = %entry
+ %call22 = call float @Dummy(i32 -3)
+ %conv23 = fpext float %call22 to double
+ %add24 = fadd double %conv23, 2.500000e-01
+ br label %return
+
+sw.bb25: ; preds = %entry
+ %call26 = call float @Dummy(i32 10)
+ %conv27 = fpext float %call26 to double
+ %add28 = fadd double %conv27, 0x7FF8000000000000
+ br label %return
+
+sw.bb29: ; preds = %entry
+ %call30 = call float @Dummy(i32 -10)
+ %conv31 = fpext float %call30 to double
+ %add32 = fadd double %conv31, 0xFFF8000000000000
+ br label %return
+
+sw.bb33: ; preds = %entry
+ %call34 = call float @Dummy(i32 100)
+ %add35 = fadd float %call34, 1.000000e+00
+ %conv36 = fpext float %add35 to double
+ br label %return
+
+sw.bb37: ; preds = %entry
+ %call38 = call float @Dummy(i32 101)
+ %add39 = fadd float %call38, 2.000000e+00
+ %conv40 = fpext float %add39 to double
+ br label %return
+
+sw.bb41: ; preds = %entry
+ %call42 = call float @Dummy(i32 102)
+ %add43 = fadd float %call42, 4.000000e+00
+ %conv44 = fpext float %add43 to double
+ br label %return
+
+sw.bb45: ; preds = %entry
+ %call46 = call float @Dummy(i32 103)
+ %add47 = fadd float %call46, 8.000000e+00
+ %conv48 = fpext float %add47 to double
+ br label %return
+
+sw.bb49: ; preds = %entry
+ %call50 = call float @Dummy(i32 -101)
+ %conv51 = fpext float %call50 to double
+ %add52 = fadd double %conv51, 5.000000e-01
+ br label %return
+
+sw.bb53: ; preds = %entry
+ %call54 = call float @Dummy(i32 -102)
+ %conv55 = fpext float %call54 to double
+ %add56 = fadd double %conv55, 0x3FD5555555555555
+ br label %return
+
+sw.bb57: ; preds = %entry
+ %call58 = call float @Dummy(i32 -103)
+ %conv59 = fpext float %call58 to double
+ %add60 = fadd double %conv59, 2.500000e-01
+ br label %return
+
+sw.bb61: ; preds = %entry
+ %call62 = call float @Dummy(i32 110)
+ %conv63 = fpext float %call62 to double
+ %add64 = fadd double %conv63, 0x7FF8000000000000
+ br label %return
+
+sw.bb65: ; preds = %entry
+ %call66 = call float @Dummy(i32 -110)
+ %conv67 = fpext float %call66 to double
+ %add68 = fadd double %conv67, 0xFFF8000000000000
+ br label %return
+
+return: ; preds = %entry, %sw.bb65, %sw.bb61, %sw.bb57, %sw.bb53, %sw.bb49, %sw.bb45, %sw.bb41, %sw.bb37, %sw.bb33, %sw.bb29, %sw.bb25, %sw.bb21, %sw.bb17, %sw.bb13, %sw.bb9, %sw.bb5, %sw.bb1, %sw.bb
+ %retval.0 = phi double [ %add68, %sw.bb65 ], [ %add64, %sw.bb61 ], [ %add60, %sw.bb57 ], [ %add56, %sw.bb53 ], [ %add52, %sw.bb49 ], [ %conv48, %sw.bb45 ], [ %conv44, %sw.bb41 ], [ %conv40, %sw.bb37 ], [ %conv36, %sw.bb33 ], [ %add32, %sw.bb29 ], [ %add28, %sw.bb25 ], [ %add24, %sw.bb21 ], [ %add20, %sw.bb17 ], [ %add16, %sw.bb13 ], [ %conv12, %sw.bb9 ], [ %conv8, %sw.bb5 ], [ %conv4, %sw.bb1 ], [ %conv, %sw.bb ], [ 0.000000e+00, %entry ]
+ ret double %retval.0
+}
+
+define internal double @FpLookup4(i32 %Arg) {
+entry:
+ switch i32 %Arg, label %return [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb5
+ i32 3, label %sw.bb9
+ i32 -1, label %sw.bb13
+ i32 -2, label %sw.bb17
+ i32 -3, label %sw.bb21
+ i32 10, label %sw.bb25
+ i32 -10, label %sw.bb29
+ i32 100, label %sw.bb33
+ i32 101, label %sw.bb37
+ i32 102, label %sw.bb41
+ i32 103, label %sw.bb45
+ i32 -101, label %sw.bb49
+ i32 -102, label %sw.bb53
+ i32 -103, label %sw.bb57
+ i32 110, label %sw.bb61
+ i32 -110, label %sw.bb65
+ ]
+
+sw.bb: ; preds = %entry
+ %call = call float @Dummy(i32 0)
+ %add = fadd float %call, 1.000000e+00
+ %conv = fpext float %add to double
+ br label %return
+
+sw.bb1: ; preds = %entry
+ %call2 = call float @Dummy(i32 1)
+ %add3 = fadd float %call2, 2.000000e+00
+ %conv4 = fpext float %add3 to double
+ br label %return
+
+sw.bb5: ; preds = %entry
+ %call6 = call float @Dummy(i32 2)
+ %add7 = fadd float %call6, 4.000000e+00
+ %conv8 = fpext float %add7 to double
+ br label %return
+
+sw.bb9: ; preds = %entry
+ %call10 = call float @Dummy(i32 3)
+ %add11 = fadd float %call10, 8.000000e+00
+ %conv12 = fpext float %add11 to double
+ br label %return
+
+sw.bb13: ; preds = %entry
+ %call14 = call float @Dummy(i32 -1)
+ %conv15 = fpext float %call14 to double
+ %add16 = fadd double %conv15, 5.000000e-01
+ br label %return
+
+sw.bb17: ; preds = %entry
+ %call18 = call float @Dummy(i32 -2)
+ %conv19 = fpext float %call18 to double
+ %add20 = fadd double %conv19, 0x3FD5555555555555
+ br label %return
+
+sw.bb21: ; preds = %entry
+ %call22 = call float @Dummy(i32 -3)
+ %conv23 = fpext float %call22 to double
+ %add24 = fadd double %conv23, 2.500000e-01
+ br label %return
+
+sw.bb25: ; preds = %entry
+ %call26 = call float @Dummy(i32 10)
+ %conv27 = fpext float %call26 to double
+ %add28 = fadd double %conv27, 0x7FF8000000000000
+ br label %return
+
+sw.bb29: ; preds = %entry
+ %call30 = call float @Dummy(i32 -10)
+ %conv31 = fpext float %call30 to double
+ %add32 = fadd double %conv31, 0xFFF8000000000000
+ br label %return
+
+sw.bb33: ; preds = %entry
+ %call34 = call float @Dummy(i32 100)
+ %add35 = fadd float %call34, 1.000000e+00
+ %conv36 = fpext float %add35 to double
+ br label %return
+
+sw.bb37: ; preds = %entry
+ %call38 = call float @Dummy(i32 101)
+ %add39 = fadd float %call38, 2.000000e+00
+ %conv40 = fpext float %add39 to double
+ br label %return
+
+sw.bb41: ; preds = %entry
+ %call42 = call float @Dummy(i32 102)
+ %add43 = fadd float %call42, 4.000000e+00
+ %conv44 = fpext float %add43 to double
+ br label %return
+
+sw.bb45: ; preds = %entry
+ %call46 = call float @Dummy(i32 103)
+ %add47 = fadd float %call46, 8.000000e+00
+ %conv48 = fpext float %add47 to double
+ br label %return
+
+sw.bb49: ; preds = %entry
+ %call50 = call float @Dummy(i32 -101)
+ %conv51 = fpext float %call50 to double
+ %add52 = fadd double %conv51, 5.000000e-01
+ br label %return
+
+sw.bb53: ; preds = %entry
+ %call54 = call float @Dummy(i32 -102)
+ %conv55 = fpext float %call54 to double
+ %add56 = fadd double %conv55, 0x3FD5555555555555
+ br label %return
+
+sw.bb57: ; preds = %entry
+ %call58 = call float @Dummy(i32 -103)
+ %conv59 = fpext float %call58 to double
+ %add60 = fadd double %conv59, 2.500000e-01
+ br label %return
+
+sw.bb61: ; preds = %entry
+ %call62 = call float @Dummy(i32 110)
+ %conv63 = fpext float %call62 to double
+ %add64 = fadd double %conv63, 0x7FF8000000000000
+ br label %return
+
+sw.bb65: ; preds = %entry
+ %call66 = call float @Dummy(i32 -110)
+ %conv67 = fpext float %call66 to double
+ %add68 = fadd double %conv67, 0xFFF8000000000000
+ br label %return
+
+return: ; preds = %entry, %sw.bb65, %sw.bb61, %sw.bb57, %sw.bb53, %sw.bb49, %sw.bb45, %sw.bb41, %sw.bb37, %sw.bb33, %sw.bb29, %sw.bb25, %sw.bb21, %sw.bb17, %sw.bb13, %sw.bb9, %sw.bb5, %sw.bb1, %sw.bb
+ %retval.0 = phi double [ %add68, %sw.bb65 ], [ %add64, %sw.bb61 ], [ %add60, %sw.bb57 ], [ %add56, %sw.bb53 ], [ %add52, %sw.bb49 ], [ %conv48, %sw.bb45 ], [ %conv44, %sw.bb41 ], [ %conv40, %sw.bb37 ], [ %conv36, %sw.bb33 ], [ %add32, %sw.bb29 ], [ %add28, %sw.bb25 ], [ %add24, %sw.bb21 ], [ %add20, %sw.bb17 ], [ %add16, %sw.bb13 ], [ %conv12, %sw.bb9 ], [ %conv8, %sw.bb5 ], [ %conv4, %sw.bb1 ], [ %conv, %sw.bb ], [ 0.000000e+00, %entry ]
+ ret double %retval.0
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/global.ll b/tests_lit/llvm2ice_tests/global.ll
new file mode 100644
index 0000000..22dbad4
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/global.ll
@@ -0,0 +1,23 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+@intern_global = global i32 12, align 4
+@extern_global = external global i32
+
+define i32 @test_intern_global() {
+; CHECK: define i32 @test_intern_global
+entry:
+ %v0 = load i32* @intern_global, align 1
+ ret i32 %v0
+}
+
+define i32 @test_extern_global() {
+; CHECK: define i32 @test_extern_global
+entry:
+ %v0 = load i32* @extern_global, align 1
+ ret i32 %v0
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/icmp-simple.ll b/tests_lit/llvm2ice_tests/icmp-simple.ll
new file mode 100644
index 0000000..69e0adb
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/icmp-simple.ll
@@ -0,0 +1,18 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @dummy_icmp(i64 %foo, i64 %bar) {
+; CHECK: define void @dummy_icmp
+entry:
+ %r1 = icmp eq i64 %foo, %bar
+ %r2 = icmp slt i64 %foo, %bar
+ ret void
+; CHECK: entry:
+; CHECK-NEXT: %r1 = icmp eq i64 %foo, %bar
+; CHECK-NEXT: %r2 = icmp slt i64 %foo, %bar
+; CHECK-NEXT: ret void
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/inttoptr.ll b/tests_lit/llvm2ice_tests/inttoptr.ll
new file mode 100644
index 0000000..67781d7
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/inttoptr.ll
@@ -0,0 +1,13 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @dummy_inttoptr(i32 %addr_arg) {
+entry:
+ %ptr = inttoptr i32 %addr_arg to i32*
+ ret void
+; CHECK: %ptr = i32 %addr_arg
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/load.ll b/tests_lit/llvm2ice_tests/load.ll
new file mode 100644
index 0000000..31b0624
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/load.ll
@@ -0,0 +1,50 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @load_i64(i32 %addr_arg) {
+entry:
+ %ptr64 = inttoptr i32 %addr_arg to i64*
+ %iv = load i64* %ptr64, align 1
+ ret void
+
+; CHECK: %ptr64 = i32 %addr_arg
+; CHECK-NEXT: %iv = load i64* {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+define void @load_i32(i32 %addr_arg) {
+entry:
+ %ptr32 = inttoptr i32 %addr_arg to i32*
+ %iv = load i32* %ptr32, align 1
+ ret void
+
+; CHECK: %ptr32 = i32 %addr_arg
+; CHECK-NEXT: %iv = load i32* {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+define void @load_i16(i32 %addr_arg) {
+entry:
+ %ptr16 = inttoptr i32 %addr_arg to i16*
+ %iv = load i16* %ptr16, align 1
+ ret void
+
+; CHECK: %ptr16 = i32 %addr_arg
+; CHECK-NEXT: %iv = load i16* {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+define void @load_i8(i32 %addr_arg) {
+entry:
+ %ptr8 = inttoptr i32 %addr_arg to i8*
+ %iv = load i8* %ptr8, align 1
+ ret void
+
+; CHECK: %ptr8 = i32 %addr_arg
+; CHECK-NEXT: %iv = load i8* {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/return-int-arg.ll b/tests_lit/llvm2ice_tests/return-int-arg.ll
new file mode 100644
index 0000000..66f6a3b
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/return-int-arg.ll
@@ -0,0 +1,20 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i32 @func_single_arg(i32 %a) {
+; CHECK: define i32 @func_single_arg
+entry:
+ ret i32 %a
+; CHECK: ret i32 %a
+}
+
+define i32 @func_multiple_args(i32 %a, i32 %b, i32 %c) {
+; CHECK: func_multiple_args
+entry:
+ ret i32 %c
+; CHECK: ret i32 %c
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/select-opt.ll b/tests_lit/llvm2ice_tests/select-opt.ll
new file mode 100644
index 0000000..e47dd3c
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/select-opt.ll
@@ -0,0 +1,28 @@
+; RUIN: %llvm2ice %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @testSelect(i32 %a, i32 %b) {
+entry:
+ %cmp = icmp slt i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ tail call void @useInt(i32 %cond)
+ %cmp1 = icmp sgt i32 %a, %b
+ %cond2 = select i1 %cmp1, i32 10, i32 20
+ tail call void @useInt(i32 %cond2)
+ ret void
+}
+
+declare void @useInt(i32)
+
+; CHECK: .globl testSelect
+; CHECK: cmp
+; CHECK: cmp
+; CHECK: call useInt
+; CHECK: cmp
+; CHECK: cmp
+; CHECK: call useInt
+; CHECK: ret
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/shift.ll b/tests_lit/llvm2ice_tests/shift.ll
new file mode 100644
index 0000000..45d295d
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/shift.ll
@@ -0,0 +1,33 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+@i1 = common global i32 0, align 4
+@i2 = common global i32 0, align 4
+@u1 = common global i32 0, align 4
+@u2 = common global i32 0, align 4
+
+define void @conv1() {
+entry:
+ %v0 = load i32* @u1, align 1
+ %sext = shl i32 %v0, 24
+ %v1 = ashr i32 %sext, 24
+ store i32 %v1, i32* @i1, align 1
+ ret void
+ ; CHECK: shl eax, 24
+ ; CHECK-NEXT: sar eax, 24
+}
+
+define void @conv2() {
+entry:
+ %v0 = load i32* @u1, align 1
+ %sext1 = shl i32 %v0, 16
+ %v1 = ashr i32 %sext1, 16
+ store i32 %v1, i32* @i2, align 1
+ ret void
+ ; CHECK: shl eax, 16
+ ; CHECK-NEXT: sar eax, 16
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/simple-arith.ll b/tests_lit/llvm2ice_tests/simple-arith.ll
new file mode 100644
index 0000000..0b109c6
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/simple-arith.ll
@@ -0,0 +1,34 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+
+define i64 @add_args_i64(i64 %arg1, i64 %arg2) {
+entry:
+ %add = add i64 %arg2, %arg1
+ ret i64 %add
+}
+
+; Checks for verbose instruction output
+
+; CHECK: define i64 @add_args
+; CHECK: %add = add i64 %arg2, %arg1
+; CHECK-NEXT: ret i64 %add
+
+define i32 @add_args_i32(i32 %arg1, i32 %arg2) {
+entry:
+ %add = add i32 %arg2, %arg1
+ ret i32 %add
+}
+
+; Checks for emitted assembly
+
+; CHECK: .globl add_args_i32
+; CHECK: mov eax, dword ptr [esp+4]
+; CHECK-NEXT: mov ecx, dword ptr [esp+8]
+; CHECK-NEXT: add ecx, eax
+; CHECK-NEXT: mov eax, ecx
+; CHECK-NEXT: ret
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/simple-cond.ll b/tests_lit/llvm2ice_tests/simple-cond.ll
new file mode 100644
index 0000000..cc1f583
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/simple-cond.ll
@@ -0,0 +1,30 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define internal i32 @simple_cond(i32 %a, i32 %n) {
+entry:
+ %cmp = icmp slt i32 %n, 0
+; CHECK: %cmp = icmp slt i32 %n, 0
+ br i1 %cmp, label %if.then, label %if.else
+; CHECK-NEXT: br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %sub = sub i32 1, %n
+ br label %if.end
+
+if.else:
+ %gep_array = mul i32 %n, 4
+ %gep = add i32 %a, %gep_array
+ %gep.asptr = inttoptr i32 %gep to i32*
+ %v0 = load i32* %gep.asptr, align 1
+ br label %if.end
+
+if.end:
+ %result.0 = phi i32 [ %sub, %if.then ], [ %v0, %if.else ]
+; CHECK: %result.0 = phi i32 [ %sub, %if.then ], [ %v0, %if.else ]
+ ret i32 %result.0
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/simple-loop.ll b/tests_lit/llvm2ice_tests/simple-loop.ll
new file mode 100644
index 0000000..4460834
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/simple-loop.ll
@@ -0,0 +1,52 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i32 @simple_loop(i32 %a, i32 %n) {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body:
+ %i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+ %gep_array = mul i32 %i.06, 4
+ %gep = add i32 %a, %gep_array
+ %gep.asptr = inttoptr i32 %gep to i32*
+ %v0 = load i32* %gep.asptr, align 1
+ %add = add i32 %v0, %sum.05
+ %inc = add i32 %i.06, 1
+ %cmp = icmp slt i32 %inc, %n
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ ret i32 %sum.0.lcssa
+}
+
+; Checks for verbose instruction output
+
+; CHECK: br i1 %cmp4, label %for.body, label %for.end
+; CHECK-NEXT: for.body
+; CHECK: %i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+; CHECK-NEXT: %sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+
+; Checks for emitted assembly
+
+; CHECK: .globl simple_loop
+
+; CHECK: mov ecx, dword ptr [esp+{{[0-9]+}}]
+; CHECK: cmp ecx, 0
+; CHECK-NEXT: jg {{.*}}for.body
+; CHECK-NEXT: jmp {{.*}}for.end
+
+; TODO: the mov from ebx to esi seems redundant here - so this may need to be
+; modified later
+
+; CHECK: add [[IREG:[a-z]+]], 1
+; CHECK-NEXT: mov [[ICMPREG:[a-z]+]], [[IREG]]
+; CHECK: cmp [[ICMPREG]], ecx
+; CHECK-NEXT: jl {{.*}}for.body
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/store.ll b/tests_lit/llvm2ice_tests/store.ll
new file mode 100644
index 0000000..edf2ff2
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/store.ll
@@ -0,0 +1,50 @@
+; RUIN: %llvm2ice %s -verbose inst | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define void @store_i64(i32 %addr_arg) {
+entry:
+ %ptr64 = inttoptr i32 %addr_arg to i64*
+ store i64 1, i64* %ptr64, align 1
+ ret void
+
+; CHECK: %ptr64 = i32 %addr_arg
+; CHECK-NEXT: store i64 1, {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+define void @store_i32(i32 %addr_arg) {
+entry:
+ %ptr32 = inttoptr i32 %addr_arg to i32*
+ store i32 1, i32* %ptr32, align 1
+ ret void
+
+; CHECK: %ptr32 = i32 %addr_arg
+; CHECK-NEXT: store i32 1, {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+define void @store_i16(i32 %addr_arg) {
+entry:
+ %ptr16 = inttoptr i32 %addr_arg to i16*
+ store i16 1, i16* %ptr16, align 1
+ ret void
+
+; CHECK: %ptr16 = i32 %addr_arg
+; CHECK-NEXT: store i16 1, {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+define void @store_i8(i32 %addr_arg) {
+entry:
+ %ptr8 = inttoptr i32 %addr_arg to i8*
+ store i8 1, i8* %ptr8, align 1
+ ret void
+
+; CHECK: %ptr8 = i32 %addr_arg
+; CHECK-NEXT: store i8 1, {{.*}}, align 1
+; CHECK-NEXT: ret void
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/struct-arith.pnacl.ll b/tests_lit/llvm2ice_tests/struct-arith.pnacl.ll
new file mode 100644
index 0000000..de68416
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/struct-arith.pnacl.ll
@@ -0,0 +1,55 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+; This file is lowered from C code that does some simple aritmetic with
+; struct members. It's also built with the PNaCl toolchain so this is the
+; stable ABI subset of LLVM IR (structs are gone, pointers turned into i32,
+; geps gone, etc.)
+
+define internal i32 @compute_important_function(i32 %v1, i32 %v2) {
+entry:
+ %v1.asptr = inttoptr i32 %v1 to i32*
+ %_v0 = load i32* %v1.asptr, align 1
+
+; CHECK: entry:
+; CHECK-NEXT: %v1.asptr = i32 %v1
+; CHECK-NEXT: %_v0 = load i32* {{.*}}, align 1
+
+ %v2.asptr = inttoptr i32 %v2 to i32*
+ %_v1 = load i32* %v2.asptr, align 1
+ %gep = add i32 %v2, 12
+ %gep.asptr = inttoptr i32 %gep to i32*
+ %_v2 = load i32* %gep.asptr, align 1
+ %mul = mul i32 %_v2, %_v1
+ %gep6 = add i32 %v1, 4
+ %gep6.asptr = inttoptr i32 %gep6 to i32*
+ %_v3 = load i32* %gep6.asptr, align 1
+ %gep8 = add i32 %v2, 8
+ %gep8.asptr = inttoptr i32 %gep8 to i32*
+ %_v4 = load i32* %gep8.asptr, align 1
+ %gep10 = add i32 %v2, 4
+ %gep10.asptr = inttoptr i32 %gep10 to i32*
+ %_v5 = load i32* %gep10.asptr, align 1
+ %mul3 = mul i32 %_v5, %_v4
+ %gep12 = add i32 %v1, 8
+ %gep12.asptr = inttoptr i32 %gep12 to i32*
+ %_v6 = load i32* %gep12.asptr, align 1
+ %mul7 = mul i32 %_v6, %_v3
+ %mul9 = mul i32 %mul7, %_v6
+ %gep14 = add i32 %v1, 12
+ %gep14.asptr = inttoptr i32 %gep14 to i32*
+ %_v7 = load i32* %gep14.asptr, align 1
+ %mul11 = mul i32 %mul9, %_v7
+ %add4.neg = add i32 %mul, %_v0
+ %add = sub i32 %add4.neg, %_v3
+ %sub = sub i32 %add, %mul3
+ %sub12 = sub i32 %sub, %mul11
+ ret i32 %sub12
+
+; CHECK: %sub12 = sub i32 %sub, %mul11
+; CHECK-NEXT: ret i32 %sub12
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/switch-opt.ll b/tests_lit/llvm2ice_tests/switch-opt.ll
new file mode 100644
index 0000000..3d008d1
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/switch-opt.ll
@@ -0,0 +1,36 @@
+; RUIN: %llvm2ice %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define i32 @testSwitch(i32 %a) {
+entry:
+ switch i32 %a, label %sw.default [
+ i32 1, label %sw.epilog
+ i32 2, label %sw.epilog
+ i32 3, label %sw.epilog
+ i32 7, label %sw.bb1
+ i32 8, label %sw.bb1
+ i32 15, label %sw.bb2
+ i32 14, label %sw.bb2
+ ]
+
+sw.default: ; preds = %entry
+ %add = add i32 %a, 27
+ br label %sw.epilog
+
+sw.bb1: ; preds = %entry, %entry
+ %phitmp = sub i32 21, %a
+ br label %sw.bb2
+
+sw.bb2: ; preds = %sw.bb1, %entry, %entry
+ %result.0 = phi i32 [ 1, %entry ], [ 1, %entry ], [ %phitmp, %sw.bb1 ]
+ br label %sw.epilog
+
+sw.epilog: ; preds = %sw.bb2, %sw.default, %entry, %entry, %entry
+ %result.1 = phi i32 [ %add, %sw.default ], [ %result.0, %sw.bb2 ], [ 17, %entry ], [ 17, %entry ], [ 17, %entry ]
+ ret i32 %result.1
+}
+
+; CHECK-NOT: ICE translation error
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ
diff --git a/tests_lit/llvm2ice_tests/unreachable.ll b/tests_lit/llvm2ice_tests/unreachable.ll
new file mode 100644
index 0000000..9d49008
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/unreachable.ll
@@ -0,0 +1,19 @@
+; RUIN: %llvm2ice -verbose inst %s | FileCheck %s
+; RUIN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
+; RUN: %szdiff --llvm2ice=%llvm2ice %s | FileCheck --check-prefix=DUMP %s
+
+define internal i32 @divide(i32 %num, i32 %den) {
+entry:
+ %cmp = icmp ne i32 %den, 0
+ br i1 %cmp, label %return, label %abort
+
+abort: ; preds = %entry
+ unreachable
+
+return: ; preds = %entry
+ %div = sdiv i32 %num, %den
+ ret i32 %div
+}
+
+; ERRORS-NOT: ICE translation error
+; DUMP-NOT: SZ