Reactor: Copy new debug macros to Reactor.
Fix up all calls to `assert()` in [LLVM,Subzero]Reactor.cpp with an appropriate call to one of these macros.
Bug: b/127433389
Change-Id: I188add3929c46932b8de5acf2ac4b2ac83b0768b
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/29055
Presubmit-Ready: Ben Clayton <bclayton@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Tested-by: Ben Clayton <bclayton@google.com>
diff --git a/src/Reactor/Debug.cpp b/src/Reactor/Debug.cpp
index adfcabd..7f0d2cd 100644
--- a/src/Reactor/Debug.cpp
+++ b/src/Reactor/Debug.cpp
@@ -14,26 +14,61 @@
#include "Debug.hpp"
-#include <stdio.h>
+#include <string>
#include <stdarg.h>
namespace rr
{
-void trace(const char *format, ...)
+
+void tracev(const char *format, va_list args)
{
+#ifndef RR_DISABLE_TRACE
if(false)
{
- FILE *file = fopen("debug.txt", "a");
+ FILE *file = fopen(TRACE_OUTPUT_FILE, "a");
if(file)
{
- va_list vararg;
- va_start(vararg, format);
- vfprintf(file, format, vararg);
- va_end(vararg);
-
+ vfprintf(file, format, args);
fclose(file);
}
}
+#endif
}
-}
\ No newline at end of file
+
+void trace(const char *format, ...)
+{
+ va_list vararg;
+ va_start(vararg, format);
+ tracev(format, vararg);
+ va_end(vararg);
+}
+
+void warn(const char *format, ...)
+{
+ va_list vararg;
+ va_start(vararg, format);
+ tracev(format, vararg);
+ va_end(vararg);
+
+ va_start(vararg, format);
+ vfprintf(stderr, format, vararg);
+ va_end(vararg);
+}
+
+void abort(const char *format, ...)
+{
+ va_list vararg;
+
+ va_start(vararg, format);
+ tracev(format, vararg);
+ va_end(vararg);
+
+ va_start(vararg, format);
+ vfprintf(stderr, format, vararg);
+ va_end(vararg);
+
+ ::abort();
+}
+
+} // namespace rr
diff --git a/src/Reactor/Debug.hpp b/src/Reactor/Debug.hpp
index 9df0b38..d2b3a1f 100644
--- a/src/Reactor/Debug.hpp
+++ b/src/Reactor/Debug.hpp
@@ -12,41 +12,100 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef Debug_hpp
-#define Debug_hpp
+// debug.h: Debugging utilities.
-#if defined(__ANDROID__) && !defined(ANDROID_NDK_BUILD)
-#include "DebugAndroid.hpp"
-#else
+#ifndef rr_DEBUG_H_
+#define rr_DEBUG_H_
+#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
-#undef min
-#undef max
+#if !defined(TRACE_OUTPUT_FILE)
+#define TRACE_OUTPUT_FILE "debug.txt"
+#endif
namespace rr
{
-void trace(const char *format, ...);
+ // Outputs text to the debugging log
+ void trace(const char *format, ...);
+ inline void trace() {}
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
- #define TRACE(format, ...) trace("[0x%0.8X]%s(" format ")\n", this, __FUNCTION__, ##__VA_ARGS__)
-#else
- #define TRACE(...) ((void)0)
-#endif
+ // Outputs text to the debugging log and prints to stderr.
+ void warn(const char *format, ...);
+ inline void warn() {}
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
- #define UNIMPLEMENTED() {trace("\t! Unimplemented: %s(%d)\n", __FUNCTION__, __LINE__); ASSERT(false);}
-#else
- #define UNIMPLEMENTED() ((void)0)
-#endif
-
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
- #define ASSERT(expression) {if(!(expression)) trace("\t! Assert failed in %s(%d): " #expression "\n", __FUNCTION__, __LINE__); assert(expression);}
-#else
- #define ASSERT assert
-#endif
+ // Outputs the message to the debugging log and stderr, and calls abort().
+ void abort(const char *format, ...);
}
-#endif // __ANDROID__
-#endif // Debug_hpp
+// A macro to output a trace of a function call and its arguments to the
+// debugging log. Disabled if RR_DISABLE_TRACE is defined.
+#if defined(RR_DISABLE_TRACE)
+#define TRACE(message, ...) (void(0))
+#else
+#define TRACE(message, ...) rr::trace("%s:%d TRACE: " message "\n", __FILE__, __LINE__, ##__VA_ARGS__)
+#endif
+
+// A macro to print a warning message to the debugging log and stderr to denote
+// an issue that needs fixing.
+#define FIXME(message, ...) rr::warn("%s:%d FIXME: " message "\n", __FILE__, __LINE__, ##__VA_ARGS__);
+
+// A macro to print a warning message to the debugging log and stderr.
+#define WARN(message, ...) rr::warn("%s:%d WARNING: " message "\n", __FILE__, __LINE__, ##__VA_ARGS__);
+
+// A macro that prints the message to the debugging log and stderr and
+// immediately aborts execution of the application.
+//
+// Note: This will terminate the application regardless of build flags!
+// Use with extreme caution!
+#undef ABORT
+#define ABORT(message, ...) rr::abort("%s:%d ABORT: " message "\n", __FILE__, __LINE__, ##__VA_ARGS__)
+
+// A macro that delegates to:
+// ABORT() in debug builds (!NDEBUG || DCHECK_ALWAYS_ON)
+// or
+// WARN() in release builds (NDEBUG && !DCHECK_ALWAYS_ON)
+#undef DABORT
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#define DABORT(message, ...) ABORT(message, ##__VA_ARGS__)
+#else
+#define DABORT(message, ...) WARN(message, ##__VA_ARGS__)
+#endif
+
+// A macro asserting a condition.
+// If the condition fails, the condition and message is passed to DABORT().
+#undef ASSERT_MSG
+#define ASSERT_MSG(expression, format, ...) do { \
+ if(!(expression)) { \
+ DABORT("ASSERT(%s): " format "\n", #expression, ##__VA_ARGS__); \
+ } } while(0)
+
+// A macro asserting a condition.
+// If the condition fails, the condition is passed to DABORT().
+#undef ASSERT
+#define ASSERT(expression) do { \
+ if(!(expression)) { \
+ DABORT("ASSERT(%s)\n", #expression); \
+ } } while(0)
+
+// A macro to indicate unimplemented functionality.
+#undef UNIMPLEMENTED
+#define UNIMPLEMENTED(format, ...) DABORT("UNIMPLEMENTED: " format, ##__VA_ARGS__)
+
+// A macro for code which is not expected to be reached under valid assumptions.
+#undef UNREACHABLE
+#define UNREACHABLE(format, ...) DABORT("UNREACHABLE: " format, ##__VA_ARGS__)
+
+// A macro asserting a condition and performing a return.
+#undef ASSERT_OR_RETURN
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#define ASSERT_OR_RETURN(expression) ASSERT(expression)
+#else
+#define ASSERT_OR_RETURN(expression) do { \
+ if(!(expression)) { \
+ return; \
+ } } while(0)
+#endif
+
+#endif // rr_DEBUG_H_
diff --git a/src/Reactor/LLVMReactor.cpp b/src/Reactor/LLVMReactor.cpp
index 418e66d..2069a77 100644
--- a/src/Reactor/LLVMReactor.cpp
+++ b/src/Reactor/LLVMReactor.cpp
@@ -13,6 +13,7 @@
// limitations under the License.
#include "Reactor.hpp"
+#include "Debug.hpp"
#include "x86.hpp"
#include "CPUID.hpp"
@@ -92,7 +93,7 @@
#if defined(__x86_64__) && defined(_WIN32)
extern "C" void X86CompilationCallback()
{
- assert(false); // UNIMPLEMENTED
+ UNIMPLEMENTED("X86CompilationCallback");
}
#endif
@@ -235,7 +236,7 @@
}
else
{
- assert(numBits <= 64);
+ ASSERT_MSG(numBits <= 64, "numBits: %d", int(numBits));
uint64_t maxVal = (numBits == 64) ? ~0ULL : (1ULL << numBits) - 1;
max = llvm::ConstantInt::get(extTy, maxVal, false);
min = llvm::ConstantInt::get(extTy, 0, false);
@@ -361,7 +362,7 @@
llvm::cast<llvm::IntegerType>(dstTy->getElementType());
uint64_t truncNumBits = dstElemTy->getIntegerBitWidth();
- assert(truncNumBits < 64 && "shift 64 must be handled separately");
+ ASSERT_MSG(truncNumBits < 64, "shift 64 must be handled separately. truncNumBits: %d", int(truncNumBits));
llvm::Constant *max, *min;
if (isSigned)
{
@@ -530,7 +531,7 @@
case SCCP: passManager->add(llvm::createSCCPPass()); break;
case ScalarReplAggregates: passManager->add(llvm::createScalarReplAggregatesPass()); break;
default:
- assert(false);
+ UNREACHABLE("optimization[pass]: %d, pass: %d", int(optimization[pass]), int(pass));
}
}
}
@@ -588,7 +589,8 @@
while (trimmed[0] == '_') { trimmed++; }
FunctionMap::const_iterator it = func_.find(trimmed);
- assert(it != func_.end()); // Missing functions will likely make the module fail in exciting non-obvious ways.
+ // Missing functions will likely make the module fail in exciting non-obvious ways.
+ ASSERT_MSG(it != func_.end(), "Missing external function: '%s'", name.c_str());
return it->second;
}
};
@@ -713,7 +715,7 @@
case SCCP: passManager->add(llvm::createSCCPPass()); break;
case ScalarReplAggregates: passManager->add(llvm::createSROAPass()); break;
default:
- assert(false);
+ UNREACHABLE("optimization[pass]: %d, pass: %d", int(optimization[pass]), int(pass));
}
}
@@ -773,7 +775,9 @@
case Type_v4i8: return T(Byte16::getType());
case Type_v2f32: return T(Float4::getType());
case Type_LLVM: return reinterpret_cast<llvm::Type*>(t);
- default: assert(false); return nullptr;
+ default:
+ UNREACHABLE("asInternalType(t): %d", int(asInternalType(t)));
+ return nullptr;
}
}
@@ -833,7 +837,7 @@
// At this point we should only have LLVM 'primitive' types.
unsigned int bits = t->getPrimitiveSizeInBits();
- assert(bits != 0);
+ ASSERT_MSG(bits != 0, "bits: %d", int(bits));
// TODO(capn): Booleans are 1 bit integers in LLVM's SSA type system,
// but are typically stored as one byte. The DataLayout structure should
@@ -842,7 +846,7 @@
}
break;
default:
- assert(false);
+ UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
return 0;
}
}
@@ -858,7 +862,9 @@
case Type_v4i8: return 4;
case Type_v2f32: return 2;
case Type_LLVM: return llvm::cast<llvm::VectorType>(T(type))->getNumElements();
- default: assert(false); return 0;
+ default:
+ UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
+ return 0;
}
}
@@ -881,7 +887,9 @@
case std::memory_order_release: return llvm::AtomicOrdering::Release;
case std::memory_order_acq_rel: return llvm::AtomicOrdering::AcquireRelease;
case std::memory_order_seq_cst: return llvm::AtomicOrdering::SequentiallyConsistent;
- default: assert(false); return llvm::AtomicOrdering::AcquireRelease;
+ default:
+ UNREACHABLE("memoryOrder: %d", int(memoryOrder));
+ return llvm::AtomicOrdering::AcquireRelease;
}
}
@@ -1281,14 +1289,15 @@
// Fallthrough to non-emulated case.
case Type_LLVM:
{
- assert(V(ptr)->getType()->getContainedType(0) == T(type));
+ ASSERT(V(ptr)->getType()->getContainedType(0) == T(type));
auto load = new llvm::LoadInst(V(ptr), "", isVolatile, alignment);
load->setAtomic(atomicOrdering(atomic, memoryOrder));
return V(::builder->Insert(load));
}
default:
- assert(false); return nullptr;
+ UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
+ return nullptr;
}
}
@@ -1319,20 +1328,21 @@
// Fallthrough to non-emulated case.
case Type_LLVM:
{
- assert(V(ptr)->getType()->getContainedType(0) == T(type));
+ ASSERT(V(ptr)->getType()->getContainedType(0) == T(type));
auto store = ::builder->Insert(new llvm::StoreInst(V(value), V(ptr), isVolatile, alignment));
store->setAtomic(atomicOrdering(atomic, memoryOrder));
return value;
}
default:
- assert(false); return nullptr;
+ UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
+ return nullptr;
}
}
Value *Nucleus::createGEP(Value *ptr, Type *type, Value *index, bool unsignedIndex)
{
- assert(V(ptr)->getType()->getContainedType(0) == T(type));
+ ASSERT(V(ptr)->getType()->getContainedType(0) == T(type));
if(sizeof(void*) == 8)
{
@@ -1559,7 +1569,7 @@
Value *Nucleus::createExtractElement(Value *vector, Type *type, int index)
{
- assert(V(vector)->getType()->getContainedType(0) == T(type));
+ ASSERT(V(vector)->getType()->getContainedType(0) == T(type));
return V(::builder->CreateExtractElement(V(vector), V(createConstantInt(index))));
}
@@ -1573,7 +1583,7 @@
int size = llvm::cast<llvm::VectorType>(V(v1)->getType())->getNumElements();
const int maxSize = 16;
llvm::Constant *swizzle[maxSize];
- assert(size <= maxSize);
+ ASSERT(size <= maxSize);
for(int i = 0; i < size; i++)
{
@@ -1668,10 +1678,10 @@
Value *Nucleus::createConstantVector(const int64_t *constants, Type *type)
{
- assert(llvm::isa<llvm::VectorType>(T(type)));
+ ASSERT(llvm::isa<llvm::VectorType>(T(type)));
const int numConstants = elementCount(type); // Number of provided constants for the (emulated) type.
const int numElements = llvm::cast<llvm::VectorType>(T(type))->getNumElements(); // Number of elements of the underlying vector type.
- assert(numElements <= 16 && numConstants <= numElements);
+ ASSERT(numElements <= 16 && numConstants <= numElements);
llvm::Constant *constantVector[16];
for(int i = 0; i < numElements; i++)
@@ -1684,10 +1694,10 @@
Value *Nucleus::createConstantVector(const double *constants, Type *type)
{
- assert(llvm::isa<llvm::VectorType>(T(type)));
+ ASSERT(llvm::isa<llvm::VectorType>(T(type)));
const int numConstants = elementCount(type); // Number of provided constants for the (emulated) type.
const int numElements = llvm::cast<llvm::VectorType>(T(type))->getNumElements(); // Number of elements of the underlying vector type.
- assert(numElements <= 8 && numConstants <= numElements);
+ ASSERT(numElements <= 8 && numConstants <= numElements);
llvm::Constant *constantVector[8];
for(int i = 0; i < numElements; i++)
@@ -3217,7 +3227,7 @@
RValue<UInt4> Ctlz(RValue<UInt4> v, bool isZeroUndef)
{
#if REACTOR_LLVM_VERSION < 7
- assert(false); // TODO: LLVM 3 does not support ctlz in a vector form.
+ UNIMPLEMENTED("LLVM 3 does not support ctlz in a vector form");
#endif
::llvm::SmallVector<::llvm::Type*, 2> paramTys;
paramTys.push_back(T(UInt4::getType()));
@@ -3232,7 +3242,7 @@
RValue<UInt4> Cttz(RValue<UInt4> v, bool isZeroUndef)
{
#if REACTOR_LLVM_VERSION < 7
- assert(false); // TODO: LLVM 3 does not support cttz in a vector form.
+ UNIMPLEMENTED("LLVM 3 does not support cttz in a vector form");
#endif
::llvm::SmallVector<::llvm::Type*, 2> paramTys;
paramTys.push_back(T(UInt4::getType()));
diff --git a/src/Reactor/SubzeroReactor.cpp b/src/Reactor/SubzeroReactor.cpp
index 27604a8..63d7bd6 100644
--- a/src/Reactor/SubzeroReactor.cpp
+++ b/src/Reactor/SubzeroReactor.cpp
@@ -13,6 +13,7 @@
// limitations under the License.
#include "Reactor.hpp"
+#include "Debug.hpp"
#include "Optimizer.hpp"
#include "ExecutableMemory.hpp"
@@ -51,7 +52,6 @@
#include <mutex>
#include <limits>
#include <iostream>
-#include <cassert>
namespace
{
@@ -191,7 +191,7 @@
case Type_v8i8: return 8;
case Type_v4i8: return 4;
case Type_v2f32: return 8;
- default: assert(false);
+ default: ASSERT(false);
}
}
@@ -229,7 +229,7 @@
uint32_t symtab_entries = symbolTable->sh_size / symbolTable->sh_entsize;
if(index >= symtab_entries)
{
- assert(index < symtab_entries && "Symbol Index out of range");
+ ASSERT(index < symtab_entries && "Symbol Index out of range");
return nullptr;
}
@@ -272,7 +272,7 @@
}
break;
default:
- assert(false && "Unsupported relocation type");
+ ASSERT(false && "Unsupported relocation type");
return nullptr;
}
}
@@ -290,7 +290,7 @@
// *patchSite = (int32_t)((intptr_t)symbolValue + *patchSite - (intptr_t)patchSite);
// break;
default:
- assert(false && "Unsupported relocation type");
+ ASSERT(false && "Unsupported relocation type");
return nullptr;
}
}
@@ -314,7 +314,7 @@
uint32_t symtab_entries = symbolTable->sh_size / symbolTable->sh_entsize;
if(index >= symtab_entries)
{
- assert(index < symtab_entries && "Symbol Index out of range");
+ ASSERT(index < symtab_entries && "Symbol Index out of range");
return nullptr;
}
@@ -352,7 +352,7 @@
*patchSite32 = (int32_t)((intptr_t)symbolValue + *patchSite32 + relocation.r_addend);
break;
default:
- assert(false && "Unsupported relocation type");
+ ASSERT(false && "Unsupported relocation type");
return nullptr;
}
@@ -369,17 +369,17 @@
}
// Expect ELF bitness to match platform
- assert(sizeof(void*) == 8 ? elfHeader->getFileClass() == ELFCLASS64 : elfHeader->getFileClass() == ELFCLASS32);
+ ASSERT(sizeof(void*) == 8 ? elfHeader->getFileClass() == ELFCLASS64 : elfHeader->getFileClass() == ELFCLASS32);
#if defined(__i386__)
- assert(sizeof(void*) == 4 && elfHeader->e_machine == EM_386);
+ ASSERT(sizeof(void*) == 4 && elfHeader->e_machine == EM_386);
#elif defined(__x86_64__)
- assert(sizeof(void*) == 8 && elfHeader->e_machine == EM_X86_64);
+ ASSERT(sizeof(void*) == 8 && elfHeader->e_machine == EM_X86_64);
#elif defined(__arm__)
- assert(sizeof(void*) == 4 && elfHeader->e_machine == EM_ARM);
+ ASSERT(sizeof(void*) == 4 && elfHeader->e_machine == EM_ARM);
#elif defined(__aarch64__)
- assert(sizeof(void*) == 8 && elfHeader->e_machine == EM_AARCH64);
+ ASSERT(sizeof(void*) == 8 && elfHeader->e_machine == EM_AARCH64);
#elif defined(__mips__)
- assert(sizeof(void*) == 4 && elfHeader->e_machine == EM_MIPS);
+ ASSERT(sizeof(void*) == 4 && elfHeader->e_machine == EM_MIPS);
#else
#error "Unsupported platform"
#endif
@@ -399,7 +399,7 @@
}
else if(sectionHeader[i].sh_type == SHT_REL)
{
- assert(sizeof(void*) == 4 && "UNIMPLEMENTED"); // Only expected/implemented for 32-bit code
+ ASSERT(sizeof(void*) == 4 && "UNIMPLEMENTED"); // Only expected/implemented for 32-bit code
for(Elf32_Word index = 0; index < sectionHeader[i].sh_size / sectionHeader[i].sh_entsize; index++)
{
@@ -409,7 +409,7 @@
}
else if(sectionHeader[i].sh_type == SHT_RELA)
{
- assert(sizeof(void*) == 8 && "UNIMPLEMENTED"); // Only expected/implemented for 64-bit code
+ ASSERT(sizeof(void*) == 8 && "UNIMPLEMENTED"); // Only expected/implemented for 64-bit code
for(Elf32_Word index = 0; index < sectionHeader[i].sh_size / sectionHeader[i].sh_entsize; index++)
{
@@ -477,7 +477,7 @@
buffer[position] = Value;
position++;
}
- else assert(false && "UNIMPLEMENTED");
+ else ASSERT(false && "UNIMPLEMENTED");
}
void writeBytes(llvm::StringRef Bytes) override
@@ -590,7 +590,7 @@
optimize();
::function->translate();
- assert(!::function->hasError());
+ ASSERT(!::function->hasError());
auto globals = ::function->getGlobalInits();
@@ -648,7 +648,7 @@
void Nucleus::setInsertBlock(BasicBlock *basicBlock)
{
- // assert(::basicBlock->getInsts().back().getTerminatorEdges().size() >= 0 && "Previous basic block must have a terminator");
+ // ASSERT(::basicBlock->getInsts().back().getTerminatorEdges().size() >= 0 && "Previous basic block must have a terminator");
Variable::materializeAll();
@@ -734,7 +734,7 @@
static Value *createArithmetic(Ice::InstArithmetic::OpKind op, Value *lhs, Value *rhs)
{
- assert(lhs->getType() == rhs->getType() || llvm::isa<Ice::Constant>(rhs));
+ ASSERT(lhs->getType() == rhs->getType() || llvm::isa<Ice::Constant>(rhs));
bool swapOperands = llvm::isa<Ice::Constant>(lhs) && isCommutative(op);
@@ -865,8 +865,8 @@
Value *Nucleus::createLoad(Value *ptr, Type *type, bool isVolatile, unsigned int align, bool atomic, std::memory_order memoryOrder)
{
- assert(!atomic); // Unimplemented
- assert(memoryOrder == std::memory_order_relaxed); // Unimplemented
+ ASSERT(!atomic); // Unimplemented
+ ASSERT(memoryOrder == std::memory_order_relaxed); // Unimplemented
int valueType = (int)reinterpret_cast<intptr_t>(type);
Ice::Variable *result = ::function->makeVariable(T(type));
@@ -899,7 +899,7 @@
auto bitcast = Ice::InstCast::create(::function, Ice::InstCast::Bitcast, result, vector.loadValue());
::basicBlock->appendInst(bitcast);
}
- else assert(false);
+ else UNREACHABLE("typeSize(type): %d", int(typeSize(type)));
}
else
{
@@ -922,8 +922,8 @@
Value *Nucleus::createStore(Value *value, Value *ptr, Type *type, bool isVolatile, unsigned int align, bool atomic, std::memory_order memoryOrder)
{
- assert(!atomic); // Unimplemented
- assert(memoryOrder == std::memory_order_relaxed); // Unimplemented
+ ASSERT(!atomic); // Unimplemented
+ ASSERT(memoryOrder == std::memory_order_relaxed); // Unimplemented
#if __has_feature(memory_sanitizer)
// Mark all (non-stack) memory writes as initialized by calling __msan_unpoison
@@ -968,7 +968,7 @@
Int y = Extract(v, 1);
*Pointer<Int>(pointer + 4) = y;
}
- else assert(false);
+ else UNREACHABLE("typeSize(type): %d", int(typeSize(type)));
}
else
{
@@ -983,7 +983,7 @@
}
else
{
- assert(value->getType() == T(type));
+ ASSERT(value->getType() == T(type));
auto store = Ice::InstStore::create(::function, value, ptr, align);
::basicBlock->appendInst(store);
@@ -994,7 +994,7 @@
Value *Nucleus::createGEP(Value *ptr, Type *type, Value *index, bool unsignedIndex)
{
- assert(index->getType() == Ice::IceType_i32);
+ ASSERT(index->getType() == Ice::IceType_i32);
if(auto *constant = llvm::dyn_cast<Ice::ConstantInteger32>(index))
{
@@ -1030,7 +1030,8 @@
Value *Nucleus::createAtomicAdd(Value *ptr, Value *value)
{
- assert(false && "UNIMPLEMENTED"); return nullptr;
+ UNIMPLEMENTED("createAtomicAdd");
+ return nullptr;
}
static Value *createCast(Ice::InstCast::OpKind op, Value *v, Type *destType)
@@ -1108,7 +1109,7 @@
static Value *createIntCompare(Ice::InstIcmp::ICond condition, Value *lhs, Value *rhs)
{
- assert(lhs->getType() == rhs->getType());
+ ASSERT(lhs->getType() == rhs->getType());
auto result = ::function->makeVariable(Ice::isScalarIntegerType(lhs->getType()) ? Ice::IceType_i1 : lhs->getType());
auto cmp = Ice::InstIcmp::create(::function, condition, result, lhs, rhs);
@@ -1169,8 +1170,8 @@
static Value *createFloatCompare(Ice::InstFcmp::FCond condition, Value *lhs, Value *rhs)
{
- assert(lhs->getType() == rhs->getType());
- assert(Ice::isScalarFloatingType(lhs->getType()) || lhs->getType() == Ice::IceType_v4f32);
+ ASSERT(lhs->getType() == rhs->getType());
+ ASSERT(Ice::isScalarFloatingType(lhs->getType()) || lhs->getType() == Ice::IceType_v4f32);
auto result = ::function->makeVariable(Ice::isScalarFloatingType(lhs->getType()) ? Ice::IceType_i1 : Ice::IceType_v4i32);
auto cmp = Ice::InstFcmp::create(::function, condition, result, lhs, rhs);
@@ -1269,7 +1270,7 @@
Value *Nucleus::createShuffleVector(Value *V1, Value *V2, const int *select)
{
- assert(V1->getType() == V2->getType());
+ ASSERT(V1->getType() == V2->getType());
int size = Ice::typeNumElements(V1->getType());
auto result = ::function->makeVariable(V1->getType());
@@ -1287,7 +1288,7 @@
Value *Nucleus::createSelect(Value *C, Value *ifTrue, Value *ifFalse)
{
- assert(ifTrue->getType() == ifFalse->getType());
+ ASSERT(ifTrue->getType() == ifFalse->getType());
auto result = ::function->makeVariable(ifTrue->getType());
auto *select = Ice::InstSelect::create(::function, result, C, ifTrue, ifFalse);
@@ -1331,7 +1332,7 @@
{
if(Ice::isVectorType(T(Ty)))
{
- assert(Ice::typeNumElements(T(Ty)) <= 16);
+ ASSERT(Ice::typeNumElements(T(Ty)) <= 16);
int64_t c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return createConstantVector(c, Ty);
}
@@ -1394,7 +1395,7 @@
Value *Nucleus::createConstantVector(const int64_t *constants, Type *type)
{
const int vectorSize = 16;
- assert(Ice::typeWidthInBytes(T(type)) == vectorSize);
+ ASSERT(Ice::typeWidthInBytes(T(type)) == vectorSize);
const int alignment = vectorSize;
auto globalPool = ::function->getGlobalPool();
@@ -1471,7 +1472,7 @@
}
break;
default:
- assert(false && "Unknown constant vector type" && type);
+ UNREACHABLE("Unknown constant vector type: %d", (int)reinterpret_cast<intptr_t>(type));
}
auto name = Ice::GlobalString::createWithoutString(::context);
@@ -1839,7 +1840,7 @@
Short4::Short4(RValue<Float4> cast)
{
- assert(false && "UNIMPLEMENTED");
+ UNIMPLEMENTED("Short4::Short4(RValue<Float4> cast)");
}
RValue<Short4> operator<<(RValue<Short4> lhs, unsigned char rhs)
@@ -2317,7 +2318,8 @@
RValue<UShort4> Average(RValue<UShort4> x, RValue<UShort4> y)
{
- assert(false && "UNIMPLEMENTED"); return RValue<UShort4>(V(nullptr));
+ UNIMPLEMENTED("RValue<UShort4> Average(RValue<UShort4> x, RValue<UShort4> y)");
+ return UShort4(0);
}
Type *UShort4::getType()
@@ -2381,12 +2383,14 @@
RValue<Int4> MulAdd(RValue<Short8> x, RValue<Short8> y)
{
- assert(false && "UNIMPLEMENTED"); return RValue<Int4>(V(nullptr));
+ UNIMPLEMENTED("RValue<Int4> MulAdd(RValue<Short8> x, RValue<Short8> y)");
+ return Int4(0);
}
RValue<Short8> MulHigh(RValue<Short8> x, RValue<Short8> y)
{
- assert(false && "UNIMPLEMENTED"); return RValue<Short8>(V(nullptr));
+ UNIMPLEMENTED("RValue<Short8> MulHigh(RValue<Short8> x, RValue<Short8> y)");
+ return Short8(0);
}
Type *Short8::getType()
@@ -2450,18 +2454,20 @@
RValue<UShort8> Swizzle(RValue<UShort8> x, char select0, char select1, char select2, char select3, char select4, char select5, char select6, char select7)
{
- assert(false && "UNIMPLEMENTED"); return RValue<UShort8>(V(nullptr));
+ UNIMPLEMENTED("RValue<UShort8> Swizzle(RValue<UShort8> x, char select0, char select1, char select2, char select3, char select4, char select5, char select6, char select7)");
+ return UShort8(0);
}
RValue<UShort8> MulHigh(RValue<UShort8> x, RValue<UShort8> y)
{
- assert(false && "UNIMPLEMENTED"); return RValue<UShort8>(V(nullptr));
+ UNIMPLEMENTED("RValue<UShort8> MulHigh(RValue<UShort8> x, RValue<UShort8> y)");
+ return UShort8(0);
}
// FIXME: Implement as Shuffle(x, y, Select(i0, ..., i16)) and Shuffle(x, y, SELECT_PACK_REPEAT(element))
// RValue<UShort8> PackRepeat(RValue<Byte16> x, RValue<Byte16> y, int element)
// {
-// assert(false && "UNIMPLEMENTED"); return RValue<UShort8>(V(nullptr));
+// ASSERT(false && "UNIMPLEMENTED"); return RValue<UShort8>(V(nullptr));
// }
Type *UShort8::getType()
@@ -2569,7 +2575,7 @@
// RValue<UInt> RoundUInt(RValue<Float> cast)
// {
-// assert(false && "UNIMPLEMENTED"); return RValue<UInt>(V(nullptr));
+// ASSERT(false && "UNIMPLEMENTED"); return RValue<UInt>(V(nullptr));
// }
Type *UInt::getType()
@@ -3366,16 +3372,12 @@
RValue<Long> Ticks()
{
- assert(false && "UNIMPLEMENTED"); return RValue<Long>(V(nullptr));
+ UNIMPLEMENTED("RValue<Long> Ticks()");
+ return Long(Int(0));
}
// Below are functions currently unimplemented for the Subzero backend.
// They are stubbed to satisfy the linker.
- #ifdef UNIMPLEMENTED
- #undef UNIMPLEMENTED
- #endif
- #define UNIMPLEMENTED(msg) assert(((void)(msg), false))
-
RValue<Float4> Sin(RValue<Float4> x) { UNIMPLEMENTED("Subzero Sin()"); return Float4(0); }
RValue<Float4> Cos(RValue<Float4> x) { UNIMPLEMENTED("Subzero Cos()"); return Float4(0); }
RValue<Float4> Tan(RValue<Float4> x) { UNIMPLEMENTED("Subzero Tan()"); return Float4(0); }