blob: c9569b9e9733ba93ce227c2e6eec3344759407f1 [file] [log] [blame]
// Copyright 2016 The SwiftShader Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "LLVMReactor.hpp"
#include "CPUID.hpp"
#include "Debug.hpp"
#include "EmulatedIntrinsics.hpp"
#include "LLVMReactorDebugInfo.hpp"
#include "Print.hpp"
#include "Reactor.hpp"
#include "x86.hpp"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Transforms/Coroutines.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Scalar.h"
#include <fstream>
#include <iostream>
#include <mutex>
#include <numeric>
#include <thread>
#include <unordered_map>
#if defined(__i386__) || defined(__x86_64__)
# include <xmmintrin.h>
#endif
#include <math.h>
#if defined(__x86_64__) && defined(_WIN32)
extern "C" void X86CompilationCallback()
{
UNIMPLEMENTED_NO_BUG("X86CompilationCallback");
}
#endif
#if !LLVM_ENABLE_THREADS
# error "LLVM_ENABLE_THREADS needs to be enabled"
#endif
#if LLVM_VERSION_MAJOR < 11
namespace llvm {
using FixedVectorType = VectorType;
} // namespace llvm
#endif
namespace {
// Used to automatically invoke llvm_shutdown() when driver is unloaded
llvm::llvm_shutdown_obj llvmShutdownObj;
// This has to be a raw pointer because glibc 2.17 doesn't support __cxa_thread_atexit_impl
// for destructing objects at exit. See crbug.com/1074222
thread_local rr::JITBuilder *jit = nullptr;
// Default configuration settings. Must be accessed under mutex lock.
std::mutex defaultConfigLock;
rr::Config &defaultConfig()
{
// This uses a static in a function to avoid the cost of a global static
// initializer. See http://neugierig.org/software/chromium/notes/2011/08/static-initializers.html
static rr::Config config = rr::Config::Edit()
.add(rr::Optimization::Pass::ScalarReplAggregates)
.add(rr::Optimization::Pass::InstructionCombining)
.apply({});
return config;
}
llvm::Value *lowerPAVG(llvm::Value *x, llvm::Value *y)
{
llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
llvm::VectorType *extTy =
llvm::VectorType::getExtendedElementVectorType(ty);
x = jit->builder->CreateZExt(x, extTy);
y = jit->builder->CreateZExt(y, extTy);
// (x + y + 1) >> 1
llvm::Constant *one = llvm::ConstantInt::get(extTy, 1);
llvm::Value *res = jit->builder->CreateAdd(x, y);
res = jit->builder->CreateAdd(res, one);
res = jit->builder->CreateLShr(res, one);
return jit->builder->CreateTrunc(res, ty);
}
llvm::Value *lowerPMINMAX(llvm::Value *x, llvm::Value *y,
llvm::ICmpInst::Predicate pred)
{
return jit->builder->CreateSelect(jit->builder->CreateICmp(pred, x, y), x, y);
}
llvm::Value *lowerPCMP(llvm::ICmpInst::Predicate pred, llvm::Value *x,
llvm::Value *y, llvm::Type *dstTy)
{
return jit->builder->CreateSExt(jit->builder->CreateICmp(pred, x, y), dstTy, "");
}
#if defined(__i386__) || defined(__x86_64__)
llvm::Value *lowerPMOV(llvm::Value *op, llvm::Type *dstType, bool sext)
{
llvm::VectorType *srcTy = llvm::cast<llvm::VectorType>(op->getType());
llvm::FixedVectorType *dstTy = llvm::cast<llvm::FixedVectorType>(dstType);
llvm::Value *undef = llvm::UndefValue::get(srcTy);
llvm::SmallVector<uint32_t, 16> mask(dstTy->getNumElements());
std::iota(mask.begin(), mask.end(), 0);
llvm::Value *v = jit->builder->CreateShuffleVector(op, undef, mask);
return sext ? jit->builder->CreateSExt(v, dstTy)
: jit->builder->CreateZExt(v, dstTy);
}
llvm::Value *lowerPABS(llvm::Value *v)
{
llvm::Value *zero = llvm::Constant::getNullValue(v->getType());
llvm::Value *cmp = jit->builder->CreateICmp(llvm::ICmpInst::ICMP_SGT, v, zero);
llvm::Value *neg = jit->builder->CreateNeg(v);
return jit->builder->CreateSelect(cmp, v, neg);
}
#endif // defined(__i386__) || defined(__x86_64__)
#if !defined(__i386__) && !defined(__x86_64__)
llvm::Value *lowerPFMINMAX(llvm::Value *x, llvm::Value *y,
llvm::FCmpInst::Predicate pred)
{
return jit->builder->CreateSelect(jit->builder->CreateFCmp(pred, x, y), x, y);
}
llvm::Value *lowerRound(llvm::Value *x)
{
llvm::Function *nearbyint = llvm::Intrinsic::getDeclaration(
jit->module.get(), llvm::Intrinsic::nearbyint, { x->getType() });
return jit->builder->CreateCall(nearbyint, { x });
}
llvm::Value *lowerRoundInt(llvm::Value *x, llvm::Type *ty)
{
return jit->builder->CreateFPToSI(lowerRound(x), ty);
}
llvm::Value *lowerFloor(llvm::Value *x)
{
llvm::Function *floor = llvm::Intrinsic::getDeclaration(
jit->module.get(), llvm::Intrinsic::floor, { x->getType() });
return jit->builder->CreateCall(floor, { x });
}
llvm::Value *lowerTrunc(llvm::Value *x)
{
llvm::Function *trunc = llvm::Intrinsic::getDeclaration(
jit->module.get(), llvm::Intrinsic::trunc, { x->getType() });
return jit->builder->CreateCall(trunc, { x });
}
llvm::Value *lowerSQRT(llvm::Value *x)
{
llvm::Function *sqrt = llvm::Intrinsic::getDeclaration(
jit->module.get(), llvm::Intrinsic::sqrt, { x->getType() });
return jit->builder->CreateCall(sqrt, { x });
}
llvm::Value *lowerRCP(llvm::Value *x)
{
llvm::Type *ty = x->getType();
llvm::Constant *one;
if(llvm::FixedVectorType *vectorTy = llvm::dyn_cast<llvm::FixedVectorType>(ty))
{
one = llvm::ConstantVector::getSplat(
# if LLVM_VERSION_MAJOR >= 11
vectorTy->getElementCount(),
# else
vectorTy->getNumElements(),
# endif
llvm::ConstantFP::get(vectorTy->getElementType(), 1));
}
else
{
one = llvm::ConstantFP::get(ty, 1);
}
return jit->builder->CreateFDiv(one, x);
}
llvm::Value *lowerRSQRT(llvm::Value *x)
{
return lowerRCP(lowerSQRT(x));
}
llvm::Value *lowerVectorShl(llvm::Value *x, uint64_t scalarY)
{
llvm::FixedVectorType *ty = llvm::cast<llvm::FixedVectorType>(x->getType());
llvm::Value *y = llvm::ConstantVector::getSplat(
# if LLVM_VERSION_MAJOR >= 11
ty->getElementCount(),
# else
ty->getNumElements(),
# endif
llvm::ConstantInt::get(ty->getElementType(), scalarY));
return jit->builder->CreateShl(x, y);
}
llvm::Value *lowerVectorAShr(llvm::Value *x, uint64_t scalarY)
{
llvm::FixedVectorType *ty = llvm::cast<llvm::FixedVectorType>(x->getType());
llvm::Value *y = llvm::ConstantVector::getSplat(
# if LLVM_VERSION_MAJOR >= 11
ty->getElementCount(),
# else
ty->getNumElements(),
# endif
llvm::ConstantInt::get(ty->getElementType(), scalarY));
return jit->builder->CreateAShr(x, y);
}
llvm::Value *lowerVectorLShr(llvm::Value *x, uint64_t scalarY)
{
llvm::FixedVectorType *ty = llvm::cast<llvm::FixedVectorType>(x->getType());
llvm::Value *y = llvm::ConstantVector::getSplat(
# if LLVM_VERSION_MAJOR >= 11
ty->getElementCount(),
# else
ty->getNumElements(),
# endif
llvm::ConstantInt::get(ty->getElementType(), scalarY));
return jit->builder->CreateLShr(x, y);
}
llvm::Value *lowerMulAdd(llvm::Value *x, llvm::Value *y)
{
llvm::FixedVectorType *ty = llvm::cast<llvm::FixedVectorType>(x->getType());
llvm::VectorType *extTy = llvm::VectorType::getExtendedElementVectorType(ty);
llvm::Value *extX = jit->builder->CreateSExt(x, extTy);
llvm::Value *extY = jit->builder->CreateSExt(y, extTy);
llvm::Value *mult = jit->builder->CreateMul(extX, extY);
llvm::Value *undef = llvm::UndefValue::get(extTy);
llvm::SmallVector<uint32_t, 16> evenIdx;
llvm::SmallVector<uint32_t, 16> oddIdx;
for(uint64_t i = 0, n = ty->getNumElements(); i < n; i += 2)
{
evenIdx.push_back(i);
oddIdx.push_back(i + 1);
}
llvm::Value *lhs = jit->builder->CreateShuffleVector(mult, undef, evenIdx);
llvm::Value *rhs = jit->builder->CreateShuffleVector(mult, undef, oddIdx);
return jit->builder->CreateAdd(lhs, rhs);
}
llvm::Value *lowerPack(llvm::Value *x, llvm::Value *y, bool isSigned)
{
llvm::FixedVectorType *srcTy = llvm::cast<llvm::FixedVectorType>(x->getType());
llvm::VectorType *dstTy = llvm::VectorType::getTruncatedElementVectorType(srcTy);
llvm::IntegerType *dstElemTy =
llvm::cast<llvm::IntegerType>(dstTy->getElementType());
uint64_t truncNumBits = dstElemTy->getIntegerBitWidth();
ASSERT_MSG(truncNumBits < 64, "shift 64 must be handled separately. truncNumBits: %d", int(truncNumBits));
llvm::Constant *max, *min;
if(isSigned)
{
max = llvm::ConstantInt::get(srcTy, (1LL << (truncNumBits - 1)) - 1, true);
min = llvm::ConstantInt::get(srcTy, (-1LL << (truncNumBits - 1)), true);
}
else
{
max = llvm::ConstantInt::get(srcTy, (1ULL << truncNumBits) - 1, false);
min = llvm::ConstantInt::get(srcTy, 0, false);
}
x = lowerPMINMAX(x, min, llvm::ICmpInst::ICMP_SGT);
x = lowerPMINMAX(x, max, llvm::ICmpInst::ICMP_SLT);
y = lowerPMINMAX(y, min, llvm::ICmpInst::ICMP_SGT);
y = lowerPMINMAX(y, max, llvm::ICmpInst::ICMP_SLT);
x = jit->builder->CreateTrunc(x, dstTy);
y = jit->builder->CreateTrunc(y, dstTy);
llvm::SmallVector<uint32_t, 16> index(srcTy->getNumElements() * 2);
std::iota(index.begin(), index.end(), 0);
return jit->builder->CreateShuffleVector(x, y, index);
}
llvm::Value *lowerSignMask(llvm::Value *x, llvm::Type *retTy)
{
llvm::FixedVectorType *ty = llvm::cast<llvm::FixedVectorType>(x->getType());
llvm::Constant *zero = llvm::ConstantInt::get(ty, 0);
llvm::Value *cmp = jit->builder->CreateICmpSLT(x, zero);
llvm::Value *ret = jit->builder->CreateZExt(
jit->builder->CreateExtractElement(cmp, static_cast<uint64_t>(0)), retTy);
for(uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
{
llvm::Value *elem = jit->builder->CreateZExt(
jit->builder->CreateExtractElement(cmp, i), retTy);
ret = jit->builder->CreateOr(ret, jit->builder->CreateShl(elem, i));
}
return ret;
}
llvm::Value *lowerFPSignMask(llvm::Value *x, llvm::Type *retTy)
{
llvm::FixedVectorType *ty = llvm::cast<llvm::FixedVectorType>(x->getType());
llvm::Constant *zero = llvm::ConstantFP::get(ty, 0);
llvm::Value *cmp = jit->builder->CreateFCmpULT(x, zero);
llvm::Value *ret = jit->builder->CreateZExt(
jit->builder->CreateExtractElement(cmp, static_cast<uint64_t>(0)), retTy);
for(uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
{
llvm::Value *elem = jit->builder->CreateZExt(
jit->builder->CreateExtractElement(cmp, i), retTy);
ret = jit->builder->CreateOr(ret, jit->builder->CreateShl(elem, i));
}
return ret;
}
#endif // !defined(__i386__) && !defined(__x86_64__)
llvm::Value *lowerPUADDSAT(llvm::Value *x, llvm::Value *y)
{
return jit->builder->CreateBinaryIntrinsic(llvm::Intrinsic::uadd_sat, x, y);
}
llvm::Value *lowerPSADDSAT(llvm::Value *x, llvm::Value *y)
{
return jit->builder->CreateBinaryIntrinsic(llvm::Intrinsic::sadd_sat, x, y);
}
llvm::Value *lowerPUSUBSAT(llvm::Value *x, llvm::Value *y)
{
return jit->builder->CreateBinaryIntrinsic(llvm::Intrinsic::usub_sat, x, y);
}
llvm::Value *lowerPSSUBSAT(llvm::Value *x, llvm::Value *y)
{
return jit->builder->CreateBinaryIntrinsic(llvm::Intrinsic::ssub_sat, x, y);
}
llvm::Value *lowerMulHigh(llvm::Value *x, llvm::Value *y, bool sext)
{
llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
llvm::VectorType *extTy = llvm::VectorType::getExtendedElementVectorType(ty);
llvm::Value *extX, *extY;
if(sext)
{
extX = jit->builder->CreateSExt(x, extTy);
extY = jit->builder->CreateSExt(y, extTy);
}
else
{
extX = jit->builder->CreateZExt(x, extTy);
extY = jit->builder->CreateZExt(y, extTy);
}
llvm::Value *mult = jit->builder->CreateMul(extX, extY);
llvm::IntegerType *intTy = llvm::cast<llvm::IntegerType>(ty->getElementType());
llvm::Value *mulh = jit->builder->CreateAShr(mult, intTy->getBitWidth());
return jit->builder->CreateTrunc(mulh, ty);
}
} // namespace
namespace rr {
std::string BackendName()
{
return std::string("LLVM ") + LLVM_VERSION_STRING;
}
const Capabilities Caps = {
true, // CoroutinesSupported
};
// The abstract Type* types are implemented as LLVM types, except that
// 64-bit vectors are emulated using 128-bit ones to avoid use of MMX in x86
// and VFP in ARM, and eliminate the overhead of converting them to explicit
// 128-bit ones. LLVM types are pointers, so we can represent emulated types
// as abstract pointers with small enum values.
enum InternalType : uintptr_t
{
// Emulated types:
Type_v2i32,
Type_v4i16,
Type_v2i16,
Type_v8i8,
Type_v4i8,
Type_v2f32,
EmulatedTypeCount,
// Returned by asInternalType() to indicate that the abstract Type*
// should be interpreted as LLVM type pointer:
Type_LLVM
};
inline InternalType asInternalType(Type *type)
{
InternalType t = static_cast<InternalType>(reinterpret_cast<uintptr_t>(type));
return (t < EmulatedTypeCount) ? t : Type_LLVM;
}
llvm::Type *T(Type *t)
{
// Use 128-bit vectors to implement logically shorter ones.
switch(asInternalType(t))
{
case Type_v2i32: return T(Int4::type());
case Type_v4i16: return T(Short8::type());
case Type_v2i16: return T(Short8::type());
case Type_v8i8: return T(Byte16::type());
case Type_v4i8: return T(Byte16::type());
case Type_v2f32: return T(Float4::type());
case Type_LLVM: return reinterpret_cast<llvm::Type *>(t);
default:
UNREACHABLE("asInternalType(t): %d", int(asInternalType(t)));
return nullptr;
}
}
Type *T(InternalType t)
{
return reinterpret_cast<Type *>(t);
}
inline const std::vector<llvm::Type *> &T(const std::vector<Type *> &t)
{
return reinterpret_cast<const std::vector<llvm::Type *> &>(t);
}
inline llvm::BasicBlock *B(BasicBlock *t)
{
return reinterpret_cast<llvm::BasicBlock *>(t);
}
inline BasicBlock *B(llvm::BasicBlock *t)
{
return reinterpret_cast<BasicBlock *>(t);
}
static size_t typeSize(Type *type)
{
switch(asInternalType(type))
{
case Type_v2i32: return 8;
case Type_v4i16: return 8;
case Type_v2i16: return 4;
case Type_v8i8: return 8;
case Type_v4i8: return 4;
case Type_v2f32: return 8;
case Type_LLVM:
{
llvm::Type *t = T(type);
if(t->isPointerTy())
{
return sizeof(void *);
}
// At this point we should only have LLVM 'primitive' types.
unsigned int bits = t->getPrimitiveSizeInBits();
ASSERT_MSG(bits != 0, "bits: %d", int(bits));
// TODO(capn): Booleans are 1 bit integers in LLVM's SSA type system,
// but are typically stored as one byte. The DataLayout structure should
// be used here and many other places if this assumption fails.
return (bits + 7) / 8;
}
break;
default:
UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
return 0;
}
}
static unsigned int elementCount(Type *type)
{
switch(asInternalType(type))
{
case Type_v2i32: return 2;
case Type_v4i16: return 4;
case Type_v2i16: return 2;
case Type_v8i8: return 8;
case Type_v4i8: return 4;
case Type_v2f32: return 2;
case Type_LLVM: return llvm::cast<llvm::FixedVectorType>(T(type))->getNumElements();
default:
UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
return 0;
}
}
static llvm::Function *createFunction(const char *name, llvm::Type *retTy, const std::vector<llvm::Type *> &params)
{
llvm::FunctionType *functionType = llvm::FunctionType::get(retTy, params, false);
auto func = llvm::Function::Create(functionType, llvm::GlobalValue::InternalLinkage, name, jit->module.get());
func->setLinkage(llvm::GlobalValue::ExternalLinkage);
func->setDoesNotThrow();
func->setCallingConv(llvm::CallingConv::C);
if(__has_feature(memory_sanitizer))
{
func->addFnAttr(llvm::Attribute::SanitizeMemory);
}
return func;
}
Nucleus::Nucleus()
{
#if !__has_feature(memory_sanitizer)
// thread_local variables in shared libraries are initialized at load-time,
// but this is not observed by MemorySanitizer if the loader itself was not
// instrumented, leading to false-positive uninitialized variable errors.
ASSERT(jit == nullptr);
ASSERT(Variable::unmaterializedVariables == nullptr);
#endif
jit = new JITBuilder(Nucleus::getDefaultConfig());
Variable::unmaterializedVariables = new Variable::UnmaterializedVariables();
}
Nucleus::~Nucleus()
{
delete Variable::unmaterializedVariables;
Variable::unmaterializedVariables = nullptr;
delete jit;
jit = nullptr;
}
void Nucleus::setDefaultConfig(const Config &cfg)
{
std::unique_lock<std::mutex> lock(::defaultConfigLock);
::defaultConfig() = cfg;
}
void Nucleus::adjustDefaultConfig(const Config::Edit &cfgEdit)
{
std::unique_lock<std::mutex> lock(::defaultConfigLock);
auto &config = ::defaultConfig();
config = cfgEdit.apply(config);
}
Config Nucleus::getDefaultConfig()
{
std::unique_lock<std::mutex> lock(::defaultConfigLock);
return ::defaultConfig();
}
std::shared_ptr<Routine> Nucleus::acquireRoutine(const char *name, const Config::Edit &cfgEdit /* = Config::Edit::None */)
{
if(jit->builder->GetInsertBlock()->empty() || !jit->builder->GetInsertBlock()->back().isTerminator())
{
llvm::Type *type = jit->function->getReturnType();
if(type->isVoidTy())
{
createRetVoid();
}
else
{
createRet(V(llvm::UndefValue::get(type)));
}
}
std::shared_ptr<Routine> routine;
auto acquire = [&](rr::JITBuilder *jit) {
// ::jit is thread-local, so when this is executed on a separate thread (see JIT_IN_SEPARATE_THREAD)
// it needs to only use the jit variable passed in as an argument.
auto cfg = cfgEdit.apply(jit->config);
#ifdef ENABLE_RR_DEBUG_INFO
if(jit->debugInfo != nullptr)
{
jit->debugInfo->Finalize();
}
#endif // ENABLE_RR_DEBUG_INFO
if(false)
{
std::error_code error;
llvm::raw_fd_ostream file(std::string(name) + "-llvm-dump-unopt.txt", error);
jit->module->print(file, 0);
}
#if defined(ENABLE_RR_LLVM_IR_VERIFICATION) || !defined(NDEBUG)
{
llvm::legacy::PassManager pm;
pm.add(llvm::createVerifierPass());
pm.run(*jit->module);
}
#endif // defined(ENABLE_RR_LLVM_IR_VERIFICATION) || !defined(NDEBUG)
jit->optimize(cfg);
if(false)
{
std::error_code error;
llvm::raw_fd_ostream file(std::string(name) + "-llvm-dump-opt.txt", error);
jit->module->print(file, 0);
}
routine = jit->acquireRoutine(name, &jit->function, 1, cfg);
};
#ifdef JIT_IN_SEPARATE_THREAD
// Perform optimizations and codegen in a separate thread to avoid stack overflow.
// FIXME(b/149829034): This is not a long-term solution. Reactor has no control
// over the threading and stack sizes of its users, so this should be addressed
// at a higher level instead.
std::thread thread(acquire, jit);
thread.join();
#else
acquire(jit);
#endif
return routine;
}
Value *Nucleus::allocateStackVariable(Type *type, int arraySize)
{
// Need to allocate it in the entry block for mem2reg to work
llvm::BasicBlock &entryBlock = jit->function->getEntryBlock();
llvm::Instruction *declaration;
#if LLVM_VERSION_MAJOR >= 11
auto align = jit->module->getDataLayout().getPrefTypeAlign(T(type));
#else
auto align = llvm::MaybeAlign(jit->module->getDataLayout().getPrefTypeAlignment(T(type)));
#endif
if(arraySize)
{
Value *size = (sizeof(size_t) == 8) ? Nucleus::createConstantLong(arraySize) : Nucleus::createConstantInt(arraySize);
declaration = new llvm::AllocaInst(T(type), 0, V(size), align);
}
else
{
declaration = new llvm::AllocaInst(T(type), 0, (llvm::Value *)nullptr, align);
}
entryBlock.getInstList().push_front(declaration);
return V(declaration);
}
BasicBlock *Nucleus::createBasicBlock()
{
return B(llvm::BasicBlock::Create(*jit->context, "", jit->function));
}
BasicBlock *Nucleus::getInsertBlock()
{
return B(jit->builder->GetInsertBlock());
}
void Nucleus::setInsertBlock(BasicBlock *basicBlock)
{
// assert(jit->builder->GetInsertBlock()->back().isTerminator());
jit->builder->SetInsertPoint(B(basicBlock));
}
void Nucleus::createFunction(Type *ReturnType, const std::vector<Type *> &Params)
{
jit->function = rr::createFunction("", T(ReturnType), T(Params));
#ifdef ENABLE_RR_DEBUG_INFO
jit->debugInfo = std::make_unique<DebugInfo>(jit->builder.get(), jit->context.get(), jit->module.get(), jit->function);
#endif // ENABLE_RR_DEBUG_INFO
jit->builder->SetInsertPoint(llvm::BasicBlock::Create(*jit->context, "", jit->function));
}
Value *Nucleus::getArgument(unsigned int index)
{
llvm::Function::arg_iterator args = jit->function->arg_begin();
while(index)
{
args++;
index--;
}
return V(&*args);
}
void Nucleus::createRetVoid()
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT_MSG(jit->function->getReturnType() == T(Void::type()), "Return type mismatch");
// Code generated after this point is unreachable, so any variables
// being read can safely return an undefined value. We have to avoid
// materializing variables after the terminator ret instruction.
Variable::killUnmaterialized();
jit->builder->CreateRetVoid();
}
void Nucleus::createRet(Value *v)
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT_MSG(jit->function->getReturnType() == V(v)->getType(), "Return type mismatch");
// Code generated after this point is unreachable, so any variables
// being read can safely return an undefined value. We have to avoid
// materializing variables after the terminator ret instruction.
Variable::killUnmaterialized();
jit->builder->CreateRet(V(v));
}
void Nucleus::createBr(BasicBlock *dest)
{
RR_DEBUG_INFO_UPDATE_LOC();
Variable::materializeAll();
jit->builder->CreateBr(B(dest));
}
void Nucleus::createCondBr(Value *cond, BasicBlock *ifTrue, BasicBlock *ifFalse)
{
RR_DEBUG_INFO_UPDATE_LOC();
Variable::materializeAll();
jit->builder->CreateCondBr(V(cond), B(ifTrue), B(ifFalse));
}
Value *Nucleus::createAdd(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAdd(V(lhs), V(rhs)));
}
Value *Nucleus::createSub(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateSub(V(lhs), V(rhs)));
}
Value *Nucleus::createMul(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateMul(V(lhs), V(rhs)));
}
Value *Nucleus::createUDiv(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateUDiv(V(lhs), V(rhs)));
}
Value *Nucleus::createSDiv(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateSDiv(V(lhs), V(rhs)));
}
Value *Nucleus::createFAdd(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFAdd(V(lhs), V(rhs)));
}
Value *Nucleus::createFSub(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFSub(V(lhs), V(rhs)));
}
Value *Nucleus::createFMul(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFMul(V(lhs), V(rhs)));
}
Value *Nucleus::createFDiv(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFDiv(V(lhs), V(rhs)));
}
Value *Nucleus::createURem(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateURem(V(lhs), V(rhs)));
}
Value *Nucleus::createSRem(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateSRem(V(lhs), V(rhs)));
}
Value *Nucleus::createFRem(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFRem(V(lhs), V(rhs)));
}
RValue<Float4> operator%(RValue<Float4> lhs, RValue<Float4> rhs)
{
return RValue<Float4>(Nucleus::createFRem(lhs.value(), rhs.value()));
}
Value *Nucleus::createShl(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateShl(V(lhs), V(rhs)));
}
Value *Nucleus::createLShr(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateLShr(V(lhs), V(rhs)));
}
Value *Nucleus::createAShr(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAShr(V(lhs), V(rhs)));
}
Value *Nucleus::createAnd(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAnd(V(lhs), V(rhs)));
}
Value *Nucleus::createOr(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateOr(V(lhs), V(rhs)));
}
Value *Nucleus::createXor(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateXor(V(lhs), V(rhs)));
}
Value *Nucleus::createNeg(Value *v)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateNeg(V(v)));
}
Value *Nucleus::createFNeg(Value *v)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFNeg(V(v)));
}
Value *Nucleus::createNot(Value *v)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateNot(V(v)));
}
Value *Nucleus::createLoad(Value *ptr, Type *type, bool isVolatile, unsigned int alignment, bool atomic, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
switch(asInternalType(type))
{
case Type_v2i32:
case Type_v4i16:
case Type_v8i8:
case Type_v2f32:
return createBitCast(
createInsertElement(
V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::type()), 2, false))),
createLoad(createBitCast(ptr, Pointer<Long>::type()), Long::type(), isVolatile, alignment, atomic, memoryOrder),
0),
type);
case Type_v2i16:
case Type_v4i8:
if(alignment != 0) // Not a local variable (all vectors are 128-bit).
{
Value *u = V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::type()), 2, false)));
Value *i = createLoad(createBitCast(ptr, Pointer<Int>::type()), Int::type(), isVolatile, alignment, atomic, memoryOrder);
i = createZExt(i, Long::type());
Value *v = createInsertElement(u, i, 0);
return createBitCast(v, type);
}
// Fallthrough to non-emulated case.
case Type_LLVM:
{
auto elTy = T(type);
ASSERT(V(ptr)->getType()->getContainedType(0) == elTy);
if(!atomic)
{
return V(jit->builder->CreateAlignedLoad(V(ptr), llvm::MaybeAlign(alignment), isVolatile));
}
else if(elTy->isIntegerTy() || elTy->isPointerTy())
{
// Integers and pointers can be atomically loaded by setting
// the ordering constraint on the load instruction.
auto load = jit->builder->CreateAlignedLoad(V(ptr), llvm::MaybeAlign(alignment), isVolatile);
load->setAtomic(atomicOrdering(atomic, memoryOrder));
return V(load);
}
else if(elTy->isFloatTy() || elTy->isDoubleTy())
{
// LLVM claims to support atomic loads of float types as
// above, but certain backends cannot deal with this.
// Load as an integer and bitcast. See b/136037244.
auto size = jit->module->getDataLayout().getTypeStoreSize(elTy);
auto elAsIntTy = llvm::IntegerType::get(*jit->context, size * 8);
auto ptrCast = jit->builder->CreatePointerCast(V(ptr), elAsIntTy->getPointerTo());
auto load = jit->builder->CreateAlignedLoad(ptrCast, llvm::MaybeAlign(alignment), isVolatile);
load->setAtomic(atomicOrdering(atomic, memoryOrder));
auto loadCast = jit->builder->CreateBitCast(load, elTy);
return V(loadCast);
}
else
{
// More exotic types require falling back to the extern:
// void __atomic_load(size_t size, void *ptr, void *ret, int ordering)
auto sizetTy = llvm::IntegerType::get(*jit->context, sizeof(size_t) * 8);
auto intTy = llvm::IntegerType::get(*jit->context, sizeof(int) * 8);
auto i8Ty = llvm::Type::getInt8Ty(*jit->context);
auto i8PtrTy = i8Ty->getPointerTo();
auto voidTy = llvm::Type::getVoidTy(*jit->context);
auto funcTy = llvm::FunctionType::get(voidTy, { sizetTy, i8PtrTy, i8PtrTy, intTy }, false);
auto func = jit->module->getOrInsertFunction("__atomic_load", funcTy);
auto size = jit->module->getDataLayout().getTypeStoreSize(elTy);
auto out = allocateStackVariable(type);
jit->builder->CreateCall(func, {
llvm::ConstantInt::get(sizetTy, size),
jit->builder->CreatePointerCast(V(ptr), i8PtrTy),
jit->builder->CreatePointerCast(V(out), i8PtrTy),
llvm::ConstantInt::get(intTy, uint64_t(atomicOrdering(true, memoryOrder))),
});
return V(jit->builder->CreateLoad(V(out)));
}
}
default:
UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
return nullptr;
}
}
Value *Nucleus::createStore(Value *value, Value *ptr, Type *type, bool isVolatile, unsigned int alignment, bool atomic, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
switch(asInternalType(type))
{
case Type_v2i32:
case Type_v4i16:
case Type_v8i8:
case Type_v2f32:
createStore(
createExtractElement(
createBitCast(value, T(llvm::VectorType::get(T(Long::type()), 2, false))), Long::type(), 0),
createBitCast(ptr, Pointer<Long>::type()),
Long::type(), isVolatile, alignment, atomic, memoryOrder);
return value;
case Type_v2i16:
case Type_v4i8:
if(alignment != 0) // Not a local variable (all vectors are 128-bit).
{
createStore(
createExtractElement(createBitCast(value, Int4::type()), Int::type(), 0),
createBitCast(ptr, Pointer<Int>::type()),
Int::type(), isVolatile, alignment, atomic, memoryOrder);
return value;
}
// Fallthrough to non-emulated case.
case Type_LLVM:
{
auto elTy = T(type);
ASSERT(V(ptr)->getType()->getContainedType(0) == elTy);
if(__has_feature(memory_sanitizer) && !REACTOR_ENABLE_MEMORY_SANITIZER_INSTRUMENTATION)
{
// Mark all memory writes as initialized by calling __msan_unpoison
// void __msan_unpoison(const volatile void *a, size_t size)
auto voidTy = llvm::Type::getVoidTy(*jit->context);
auto i8Ty = llvm::Type::getInt8Ty(*jit->context);
auto voidPtrTy = i8Ty->getPointerTo();
auto sizetTy = llvm::IntegerType::get(*jit->context, sizeof(size_t) * 8);
auto funcTy = llvm::FunctionType::get(voidTy, { voidPtrTy, sizetTy }, false);
auto func = jit->module->getOrInsertFunction("__msan_unpoison", funcTy);
auto size = jit->module->getDataLayout().getTypeStoreSize(elTy);
jit->builder->CreateCall(func, { jit->builder->CreatePointerCast(V(ptr), voidPtrTy),
llvm::ConstantInt::get(sizetTy, size) });
}
if(!atomic)
{
jit->builder->CreateAlignedStore(V(value), V(ptr), llvm::MaybeAlign(alignment), isVolatile);
}
else if(elTy->isIntegerTy() || elTy->isPointerTy())
{
// Integers and pointers can be atomically stored by setting
// the ordering constraint on the store instruction.
auto store = jit->builder->CreateAlignedStore(V(value), V(ptr), llvm::MaybeAlign(alignment), isVolatile);
store->setAtomic(atomicOrdering(atomic, memoryOrder));
}
else if(elTy->isFloatTy() || elTy->isDoubleTy())
{
// LLVM claims to support atomic stores of float types as
// above, but certain backends cannot deal with this.
// Store as an bitcast integer. See b/136037244.
auto size = jit->module->getDataLayout().getTypeStoreSize(elTy);
auto elAsIntTy = llvm::IntegerType::get(*jit->context, size * 8);
auto valCast = jit->builder->CreateBitCast(V(value), elAsIntTy);
auto ptrCast = jit->builder->CreatePointerCast(V(ptr), elAsIntTy->getPointerTo());
auto store = jit->builder->CreateAlignedStore(valCast, ptrCast, llvm::MaybeAlign(alignment), isVolatile);
store->setAtomic(atomicOrdering(atomic, memoryOrder));
}
else
{
// More exotic types require falling back to the extern:
// void __atomic_store(size_t size, void *ptr, void *val, int ordering)
auto sizetTy = llvm::IntegerType::get(*jit->context, sizeof(size_t) * 8);
auto intTy = llvm::IntegerType::get(*jit->context, sizeof(int) * 8);
auto i8Ty = llvm::Type::getInt8Ty(*jit->context);
auto i8PtrTy = i8Ty->getPointerTo();
auto voidTy = llvm::Type::getVoidTy(*jit->context);
auto funcTy = llvm::FunctionType::get(voidTy, { sizetTy, i8PtrTy, i8PtrTy, intTy }, false);
auto func = jit->module->getOrInsertFunction("__atomic_store", funcTy);
auto size = jit->module->getDataLayout().getTypeStoreSize(elTy);
auto copy = allocateStackVariable(type);
jit->builder->CreateStore(V(value), V(copy));
jit->builder->CreateCall(func, {
llvm::ConstantInt::get(sizetTy, size),
jit->builder->CreatePointerCast(V(ptr), i8PtrTy),
jit->builder->CreatePointerCast(V(copy), i8PtrTy),
llvm::ConstantInt::get(intTy, uint64_t(atomicOrdering(true, memoryOrder))),
});
}
return value;
}
default:
UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
return nullptr;
}
}
Value *Nucleus::createMaskedLoad(Value *ptr, Type *elTy, Value *mask, unsigned int alignment, bool zeroMaskedLanes)
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT(V(ptr)->getType()->isPointerTy());
ASSERT(V(mask)->getType()->isVectorTy());
auto numEls = llvm::cast<llvm::FixedVectorType>(V(mask)->getType())->getNumElements();
auto i1Ty = llvm::Type::getInt1Ty(*jit->context);
auto i32Ty = llvm::Type::getInt32Ty(*jit->context);
auto elVecTy = llvm::VectorType::get(T(elTy), numEls, false);
auto elVecPtrTy = elVecTy->getPointerTo();
auto i8Mask = jit->builder->CreateIntCast(V(mask), llvm::VectorType::get(i1Ty, numEls, false), false); // vec<int, int, ...> -> vec<bool, bool, ...>
auto passthrough = zeroMaskedLanes ? llvm::Constant::getNullValue(elVecTy) : llvm::UndefValue::get(elVecTy);
auto align = llvm::ConstantInt::get(i32Ty, alignment);
auto func = llvm::Intrinsic::getDeclaration(jit->module.get(), llvm::Intrinsic::masked_load, { elVecTy, elVecPtrTy });
return V(jit->builder->CreateCall(func, { V(ptr), align, i8Mask, passthrough }));
}
void Nucleus::createMaskedStore(Value *ptr, Value *val, Value *mask, unsigned int alignment)
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT(V(ptr)->getType()->isPointerTy());
ASSERT(V(val)->getType()->isVectorTy());
ASSERT(V(mask)->getType()->isVectorTy());
auto numEls = llvm::cast<llvm::FixedVectorType>(V(mask)->getType())->getNumElements();
auto i1Ty = llvm::Type::getInt1Ty(*jit->context);
auto i32Ty = llvm::Type::getInt32Ty(*jit->context);
auto elVecTy = V(val)->getType();
auto elVecPtrTy = elVecTy->getPointerTo();
auto i1Mask = jit->builder->CreateIntCast(V(mask), llvm::VectorType::get(i1Ty, numEls, false), false); // vec<int, int, ...> -> vec<bool, bool, ...>
auto align = llvm::ConstantInt::get(i32Ty, alignment);
auto func = llvm::Intrinsic::getDeclaration(jit->module.get(), llvm::Intrinsic::masked_store, { elVecTy, elVecPtrTy });
jit->builder->CreateCall(func, { V(val), V(ptr), align, i1Mask });
if(__has_feature(memory_sanitizer) && !REACTOR_ENABLE_MEMORY_SANITIZER_INSTRUMENTATION)
{
// Mark memory writes as initialized by calling __msan_unpoison
// void __msan_unpoison(const volatile void *a, size_t size)
auto voidTy = llvm::Type::getVoidTy(*jit->context);
auto voidPtrTy = voidTy->getPointerTo();
auto sizetTy = llvm::IntegerType::get(*jit->context, sizeof(size_t) * 8);
auto funcTy = llvm::FunctionType::get(voidTy, { voidPtrTy, sizetTy }, false);
auto func = jit->module->getOrInsertFunction("__msan_unpoison", funcTy);
auto size = jit->module->getDataLayout().getTypeStoreSize(llvm::cast<llvm::VectorType>(elVecTy)->getElementType());
for(unsigned i = 0; i < numEls; i++)
{
// Check mask for this element
auto idx = llvm::ConstantInt::get(i32Ty, i);
auto thenBlock = llvm::BasicBlock::Create(*jit->context, "", jit->function);
auto mergeBlock = llvm::BasicBlock::Create(*jit->context, "", jit->function);
jit->builder->CreateCondBr(jit->builder->CreateExtractElement(i1Mask, idx), thenBlock, mergeBlock);
jit->builder->SetInsertPoint(thenBlock);
// Insert __msan_unpoison call in conditional block
auto elPtr = jit->builder->CreateGEP(V(ptr), idx);
jit->builder->CreateCall(func, { jit->builder->CreatePointerCast(elPtr, voidPtrTy),
llvm::ConstantInt::get(sizetTy, size) });
jit->builder->CreateBr(mergeBlock);
jit->builder->SetInsertPoint(mergeBlock);
}
}
}
static llvm::Value *createGather(llvm::Value *base, llvm::Type *elTy, llvm::Value *offsets, llvm::Value *mask, unsigned int alignment, bool zeroMaskedLanes)
{
ASSERT(base->getType()->isPointerTy());
ASSERT(offsets->getType()->isVectorTy());
ASSERT(mask->getType()->isVectorTy());
auto numEls = llvm::cast<llvm::FixedVectorType>(mask->getType())->getNumElements();
auto i1Ty = llvm::Type::getInt1Ty(*jit->context);
auto i32Ty = llvm::Type::getInt32Ty(*jit->context);
auto i8Ty = llvm::Type::getInt8Ty(*jit->context);
auto i8PtrTy = i8Ty->getPointerTo();
auto elPtrTy = elTy->getPointerTo();
auto elVecTy = llvm::VectorType::get(elTy, numEls, false);
auto elPtrVecTy = llvm::VectorType::get(elPtrTy, numEls, false);
auto i8Base = jit->builder->CreatePointerCast(base, i8PtrTy);
auto i8Ptrs = jit->builder->CreateGEP(i8Base, offsets);
auto elPtrs = jit->builder->CreatePointerCast(i8Ptrs, elPtrVecTy);
auto i1Mask = jit->builder->CreateIntCast(mask, llvm::VectorType::get(i1Ty, numEls, false), false); // vec<int, int, ...> -> vec<bool, bool, ...>
auto passthrough = zeroMaskedLanes ? llvm::Constant::getNullValue(elVecTy) : llvm::UndefValue::get(elVecTy);
if(!__has_feature(memory_sanitizer))
{
auto align = llvm::ConstantInt::get(i32Ty, alignment);
auto func = llvm::Intrinsic::getDeclaration(jit->module.get(), llvm::Intrinsic::masked_gather, { elVecTy, elPtrVecTy });
return jit->builder->CreateCall(func, { elPtrs, align, i1Mask, passthrough });
}
else // __has_feature(memory_sanitizer)
{
// MemorySanitizer currently does not support instrumenting llvm::Intrinsic::masked_gather
// Work around it by emulating gather with element-wise loads.
// TODO(b/172238865): Remove when supported by MemorySanitizer.
Value *result = Nucleus::allocateStackVariable(T(elVecTy));
Nucleus::createStore(V(passthrough), result, T(elVecTy));
for(unsigned i = 0; i < numEls; i++)
{
// Check mask for this element
Value *elementMask = Nucleus::createExtractElement(V(i1Mask), T(i1Ty), i);
If(RValue<Bool>(elementMask))
{
Value *elPtr = Nucleus::createExtractElement(V(elPtrs), T(elPtrTy), i);
Value *el = Nucleus::createLoad(elPtr, T(elTy), /*isVolatile */ false, alignment, /* atomic */ false, std::memory_order_relaxed);
Value *v = Nucleus::createLoad(result, T(elVecTy));
v = Nucleus::createInsertElement(v, el, i);
Nucleus::createStore(v, result, T(elVecTy));
}
}
return V(Nucleus::createLoad(result, T(elVecTy)));
}
}
RValue<Float4> Gather(RValue<Pointer<Float>> base, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment, bool zeroMaskedLanes /* = false */)
{
return As<Float4>(V(createGather(V(base.value()), T(Float::type()), V(offsets.value()), V(mask.value()), alignment, zeroMaskedLanes)));
}
RValue<Int4> Gather(RValue<Pointer<Int>> base, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment, bool zeroMaskedLanes /* = false */)
{
return As<Int4>(V(createGather(V(base.value()), T(Int::type()), V(offsets.value()), V(mask.value()), alignment, zeroMaskedLanes)));
}
static void createScatter(llvm::Value *base, llvm::Value *val, llvm::Value *offsets, llvm::Value *mask, unsigned int alignment)
{
ASSERT(base->getType()->isPointerTy());
ASSERT(val->getType()->isVectorTy());
ASSERT(offsets->getType()->isVectorTy());
ASSERT(mask->getType()->isVectorTy());
auto numEls = llvm::cast<llvm::FixedVectorType>(mask->getType())->getNumElements();
auto i1Ty = llvm::Type::getInt1Ty(*jit->context);
auto i32Ty = llvm::Type::getInt32Ty(*jit->context);
auto i8Ty = llvm::Type::getInt8Ty(*jit->context);
auto i8PtrTy = i8Ty->getPointerTo();
auto elVecTy = val->getType();
auto elTy = llvm::cast<llvm::VectorType>(elVecTy)->getElementType();
auto elPtrTy = elTy->getPointerTo();
auto elPtrVecTy = llvm::VectorType::get(elPtrTy, numEls, false);
auto i8Base = jit->builder->CreatePointerCast(base, i8PtrTy);
auto i8Ptrs = jit->builder->CreateGEP(i8Base, offsets);
auto elPtrs = jit->builder->CreatePointerCast(i8Ptrs, elPtrVecTy);
auto i1Mask = jit->builder->CreateIntCast(mask, llvm::VectorType::get(i1Ty, numEls, false), false); // vec<int, int, ...> -> vec<bool, bool, ...>
if(!__has_feature(memory_sanitizer))
{
auto align = llvm::ConstantInt::get(i32Ty, alignment);
auto func = llvm::Intrinsic::getDeclaration(jit->module.get(), llvm::Intrinsic::masked_scatter, { elVecTy, elPtrVecTy });
jit->builder->CreateCall(func, { val, elPtrs, align, i1Mask });
}
else // __has_feature(memory_sanitizer)
{
// MemorySanitizer currently does not support instrumenting llvm::Intrinsic::masked_scatter
// Work around it by emulating scatter with element-wise stores.
// TODO(b/172238865): Remove when supported by MemorySanitizer.
for(unsigned i = 0; i < numEls; i++)
{
// Check mask for this element
auto idx = llvm::ConstantInt::get(i32Ty, i);
auto thenBlock = llvm::BasicBlock::Create(*jit->context, "", jit->function);
auto mergeBlock = llvm::BasicBlock::Create(*jit->context, "", jit->function);
jit->builder->CreateCondBr(jit->builder->CreateExtractElement(i1Mask, idx), thenBlock, mergeBlock);
jit->builder->SetInsertPoint(thenBlock);
auto el = jit->builder->CreateExtractElement(val, idx);
auto elPtr = jit->builder->CreateExtractElement(elPtrs, idx);
Nucleus::createStore(V(el), V(elPtr), T(elTy), /*isVolatile */ false, alignment, /* atomic */ false, std::memory_order_relaxed);
jit->builder->CreateBr(mergeBlock);
jit->builder->SetInsertPoint(mergeBlock);
}
}
}
void Scatter(RValue<Pointer<Float>> base, RValue<Float4> val, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment)
{
return createScatter(V(base.value()), V(val.value()), V(offsets.value()), V(mask.value()), alignment);
}
void Scatter(RValue<Pointer<Int>> base, RValue<Int4> val, RValue<Int4> offsets, RValue<Int4> mask, unsigned int alignment)
{
return createScatter(V(base.value()), V(val.value()), V(offsets.value()), V(mask.value()), alignment);
}
void Nucleus::createFence(std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
jit->builder->CreateFence(atomicOrdering(true, memoryOrder));
}
Value *Nucleus::createGEP(Value *ptr, Type *type, Value *index, bool unsignedIndex)
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT(V(ptr)->getType()->getContainedType(0) == T(type));
if(sizeof(void *) == 8)
{
// LLVM manual: "When indexing into an array, pointer or vector,
// integers of any width are allowed, and they are not required to
// be constant. These integers are treated as signed values where
// relevant."
//
// Thus if we want indexes to be treated as unsigned we have to
// zero-extend them ourselves.
//
// Note that this is not because we want to address anywhere near
// 4 GB of data. Instead this is important for performance because
// x86 supports automatic zero-extending of 32-bit registers to
// 64-bit. Thus when indexing into an array using a uint32 is
// actually faster than an int32.
index = unsignedIndex ? createZExt(index, Long::type()) : createSExt(index, Long::type());
}
// For non-emulated types we can rely on LLVM's GEP to calculate the
// effective address correctly.
if(asInternalType(type) == Type_LLVM)
{
return V(jit->builder->CreateGEP(V(ptr), V(index)));
}
// For emulated types we have to multiply the index by the intended
// type size ourselves to obain the byte offset.
index = (sizeof(void *) == 8) ? createMul(index, createConstantLong((int64_t)typeSize(type))) : createMul(index, createConstantInt((int)typeSize(type)));
// Cast to a byte pointer, apply the byte offset, and cast back to the
// original pointer type.
return createBitCast(
V(jit->builder->CreateGEP(V(createBitCast(ptr, T(llvm::PointerType::get(T(Byte::type()), 0)))), V(index))),
T(llvm::PointerType::get(T(type), 0)));
}
Value *Nucleus::createAtomicAdd(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::Add, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicSub(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::Sub, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicAnd(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::And, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicOr(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::Or, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicXor(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::Xor, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicMin(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::Min, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicMax(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::Max, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicUMin(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::UMin, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicUMax(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::UMax, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicExchange(Value *ptr, Value *value, std::memory_order memoryOrder)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, V(ptr), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrder)));
}
Value *Nucleus::createAtomicCompareExchange(Value *ptr, Value *value, Value *compare, std::memory_order memoryOrderEqual, std::memory_order memoryOrderUnequal)
{
RR_DEBUG_INFO_UPDATE_LOC();
// Note: AtomicCmpXchgInstruction returns a 2-member struct containing {result, success-flag}, not the result directly.
return V(jit->builder->CreateExtractValue(
jit->builder->CreateAtomicCmpXchg(V(ptr), V(compare), V(value),
#if LLVM_VERSION_MAJOR >= 11
llvm::MaybeAlign(),
#endif
atomicOrdering(true, memoryOrderEqual),
atomicOrdering(true, memoryOrderUnequal)),
llvm::ArrayRef<unsigned>(0u)));
}
Value *Nucleus::createTrunc(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateTrunc(V(v), T(destType)));
}
Value *Nucleus::createZExt(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateZExt(V(v), T(destType)));
}
Value *Nucleus::createSExt(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateSExt(V(v), T(destType)));
}
Value *Nucleus::createFPToUI(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFPToUI(V(v), T(destType)));
}
Value *Nucleus::createFPToSI(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFPToSI(V(v), T(destType)));
}
Value *Nucleus::createSIToFP(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateSIToFP(V(v), T(destType)));
}
Value *Nucleus::createFPTrunc(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFPTrunc(V(v), T(destType)));
}
Value *Nucleus::createFPExt(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFPExt(V(v), T(destType)));
}
Value *Nucleus::createBitCast(Value *v, Type *destType)
{
RR_DEBUG_INFO_UPDATE_LOC();
// Bitcasts must be between types of the same logical size. But with emulated narrow vectors we need
// support for casting between scalars and wide vectors. Emulate them by writing to the stack and
// reading back as the destination type.
if(!V(v)->getType()->isVectorTy() && T(destType)->isVectorTy())
{
Value *readAddress = allocateStackVariable(destType);
Value *writeAddress = createBitCast(readAddress, T(llvm::PointerType::get(V(v)->getType(), 0)));
createStore(v, writeAddress, T(V(v)->getType()));
return createLoad(readAddress, destType);
}
else if(V(v)->getType()->isVectorTy() && !T(destType)->isVectorTy())
{
Value *writeAddress = allocateStackVariable(T(V(v)->getType()));
createStore(v, writeAddress, T(V(v)->getType()));
Value *readAddress = createBitCast(writeAddress, T(llvm::PointerType::get(T(destType), 0)));
return createLoad(readAddress, destType);
}
return V(jit->builder->CreateBitCast(V(v), T(destType)));
}
Value *Nucleus::createICmpEQ(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpEQ(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpNE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpNE(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpUGT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpUGT(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpUGE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpUGE(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpULT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpULT(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpULE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpULE(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpSGT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpSGT(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpSGE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpSGE(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpSLT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpSLT(V(lhs), V(rhs)));
}
Value *Nucleus::createICmpSLE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateICmpSLE(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpOEQ(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpOEQ(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpOGT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpOGT(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpOGE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpOGE(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpOLT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpOLT(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpOLE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpOLE(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpONE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpONE(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpORD(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpORD(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpUNO(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpUNO(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpUEQ(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpUEQ(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpUGT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpUGT(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpUGE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpUGE(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpULT(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpULT(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpULE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpULE(V(lhs), V(rhs)));
}
Value *Nucleus::createFCmpUNE(Value *lhs, Value *rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateFCmpUNE(V(lhs), V(rhs)));
}
Value *Nucleus::createExtractElement(Value *vector, Type *type, int index)
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT(V(vector)->getType()->getContainedType(0) == T(type));
return V(jit->builder->CreateExtractElement(V(vector), V(createConstantInt(index))));
}
Value *Nucleus::createInsertElement(Value *vector, Value *element, int index)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateInsertElement(V(vector), V(element), V(createConstantInt(index))));
}
Value *Nucleus::createShuffleVector(Value *v1, Value *v2, const int *select)
{
RR_DEBUG_INFO_UPDATE_LOC();
int size = llvm::cast<llvm::FixedVectorType>(V(v1)->getType())->getNumElements();
const int maxSize = 16;
llvm::Constant *swizzle[maxSize];
ASSERT(size <= maxSize);
for(int i = 0; i < size; i++)
{
swizzle[i] = llvm::ConstantInt::get(llvm::Type::getInt32Ty(*jit->context), select[i]);
}
llvm::Value *shuffle = llvm::ConstantVector::get(llvm::ArrayRef<llvm::Constant *>(swizzle, size));
return V(jit->builder->CreateShuffleVector(V(v1), V(v2), shuffle));
}
Value *Nucleus::createSelect(Value *c, Value *ifTrue, Value *ifFalse)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(jit->builder->CreateSelect(V(c), V(ifTrue), V(ifFalse)));
}
SwitchCases *Nucleus::createSwitch(Value *control, BasicBlock *defaultBranch, unsigned numCases)
{
RR_DEBUG_INFO_UPDATE_LOC();
return reinterpret_cast<SwitchCases *>(jit->builder->CreateSwitch(V(control), B(defaultBranch), numCases));
}
void Nucleus::addSwitchCase(SwitchCases *switchCases, int label, BasicBlock *branch)
{
RR_DEBUG_INFO_UPDATE_LOC();
llvm::SwitchInst *sw = reinterpret_cast<llvm::SwitchInst *>(switchCases);
sw->addCase(llvm::ConstantInt::get(llvm::Type::getInt32Ty(*jit->context), label, true), B(branch));
}
void Nucleus::createUnreachable()
{
RR_DEBUG_INFO_UPDATE_LOC();
jit->builder->CreateUnreachable();
}
Type *Nucleus::getType(Value *value)
{
return T(V(value)->getType());
}
Type *Nucleus::getContainedType(Type *vectorType)
{
return T(T(vectorType)->getContainedType(0));
}
Type *Nucleus::getPointerType(Type *ElementType)
{
return T(llvm::PointerType::get(T(ElementType), 0));
}
static llvm::Type *getNaturalIntType()
{
return llvm::Type::getIntNTy(*jit->context, sizeof(int) * 8);
}
Type *Nucleus::getPrintfStorageType(Type *valueType)
{
llvm::Type *valueTy = T(valueType);
if(valueTy->isIntegerTy())
{
return T(getNaturalIntType());
}
if(valueTy->isFloatTy())
{
return T(llvm::Type::getDoubleTy(*jit->context));
}
UNIMPLEMENTED_NO_BUG("getPrintfStorageType: add more cases as needed");
return {};
}
Value *Nucleus::createNullValue(Type *Ty)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::Constant::getNullValue(T(Ty)));
}
Value *Nucleus::createConstantLong(int64_t i)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt64Ty(*jit->context), i, true));
}
Value *Nucleus::createConstantInt(int i)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt32Ty(*jit->context), i, true));
}
Value *Nucleus::createConstantInt(unsigned int i)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt32Ty(*jit->context), i, false));
}
Value *Nucleus::createConstantBool(bool b)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt1Ty(*jit->context), b));
}
Value *Nucleus::createConstantByte(signed char i)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt8Ty(*jit->context), i, true));
}
Value *Nucleus::createConstantByte(unsigned char i)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt8Ty(*jit->context), i, false));
}
Value *Nucleus::createConstantShort(short i)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt16Ty(*jit->context), i, true));
}
Value *Nucleus::createConstantShort(unsigned short i)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantInt::get(llvm::Type::getInt16Ty(*jit->context), i, false));
}
Value *Nucleus::createConstantFloat(float x)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantFP::get(T(Float::type()), x));
}
Value *Nucleus::createNullPointer(Type *Ty)
{
RR_DEBUG_INFO_UPDATE_LOC();
return V(llvm::ConstantPointerNull::get(llvm::PointerType::get(T(Ty), 0)));
}
Value *Nucleus::createConstantVector(const int64_t *constants, Type *type)
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT(llvm::isa<llvm::VectorType>(T(type)));
const int numConstants = elementCount(type); // Number of provided constants for the (emulated) type.
const int numElements = llvm::cast<llvm::FixedVectorType>(T(type))->getNumElements(); // Number of elements of the underlying vector type.
ASSERT(numElements <= 16 && numConstants <= numElements);
llvm::Constant *constantVector[16];
for(int i = 0; i < numElements; i++)
{
constantVector[i] = llvm::ConstantInt::get(T(type)->getContainedType(0), constants[i % numConstants]);
}
return V(llvm::ConstantVector::get(llvm::ArrayRef<llvm::Constant *>(constantVector, numElements)));
}
Value *Nucleus::createConstantVector(const double *constants, Type *type)
{
RR_DEBUG_INFO_UPDATE_LOC();
ASSERT(llvm::isa<llvm::VectorType>(T(type)));
const int numConstants = elementCount(type); // Number of provided constants for the (emulated) type.
const int numElements = llvm::cast<llvm::FixedVectorType>(T(type))->getNumElements(); // Number of elements of the underlying vector type.
ASSERT(numElements <= 8 && numConstants <= numElements);
llvm::Constant *constantVector[8];
for(int i = 0; i < numElements; i++)
{
constantVector[i] = llvm::ConstantFP::get(T(type)->getContainedType(0), constants[i % numConstants]);
}
return V(llvm::ConstantVector::get(llvm::ArrayRef<llvm::Constant *>(constantVector, numElements)));
}
Value *Nucleus::createConstantString(const char *v)
{
// NOTE: Do not call RR_DEBUG_INFO_UPDATE_LOC() here to avoid recursion when called from rr::Printv
auto ptr = jit->builder->CreateGlobalStringPtr(v);
return V(ptr);
}
void Nucleus::setOptimizerCallback(OptimizerCallback *callback)
{
// The LLVM backend does not produce optimizer reports.
(void)callback;
}
Type *Void::type()
{
return T(llvm::Type::getVoidTy(*jit->context));
}
Type *Bool::type()
{
return T(llvm::Type::getInt1Ty(*jit->context));
}
Type *Byte::type()
{
return T(llvm::Type::getInt8Ty(*jit->context));
}
Type *SByte::type()
{
return T(llvm::Type::getInt8Ty(*jit->context));
}
Type *Short::type()
{
return T(llvm::Type::getInt16Ty(*jit->context));
}
Type *UShort::type()
{
return T(llvm::Type::getInt16Ty(*jit->context));
}
Type *Byte4::type()
{
return T(Type_v4i8);
}
Type *SByte4::type()
{
return T(Type_v4i8);
}
RValue<Byte8> AddSat(RValue<Byte8> x, RValue<Byte8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::paddusb(x, y);
#else
return As<Byte8>(V(lowerPUADDSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<Byte8> SubSat(RValue<Byte8> x, RValue<Byte8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psubusb(x, y);
#else
return As<Byte8>(V(lowerPUSUBSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<Int> SignMask(RValue<Byte8> x)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmovmskb(x);
#else
return As<Int>(V(lowerSignMask(V(x.value()), T(Int::type()))));
#endif
}
// RValue<Byte8> CmpGT(RValue<Byte8> x, RValue<Byte8> y)
// {
//#if defined(__i386__) || defined(__x86_64__)
// return x86::pcmpgtb(x, y); // FIXME: Signedness
//#else
// return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value()), V(y.value()), T(Byte8::type()))));
//#endif
// }
RValue<Byte8> CmpEQ(RValue<Byte8> x, RValue<Byte8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pcmpeqb(x, y);
#else
return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value()), V(y.value()), T(Byte8::type()))));
#endif
}
Type *Byte8::type()
{
return T(Type_v8i8);
}
RValue<SByte8> AddSat(RValue<SByte8> x, RValue<SByte8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::paddsb(x, y);
#else
return As<SByte8>(V(lowerPSADDSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<SByte8> SubSat(RValue<SByte8> x, RValue<SByte8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psubsb(x, y);
#else
return As<SByte8>(V(lowerPSSUBSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<Int> SignMask(RValue<SByte8> x)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmovmskb(As<Byte8>(x));
#else
return As<Int>(V(lowerSignMask(V(x.value()), T(Int::type()))));
#endif
}
RValue<Byte8> CmpGT(RValue<SByte8> x, RValue<SByte8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pcmpgtb(x, y);
#else
return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value()), V(y.value()), T(Byte8::type()))));
#endif
}
RValue<Byte8> CmpEQ(RValue<SByte8> x, RValue<SByte8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pcmpeqb(As<Byte8>(x), As<Byte8>(y));
#else
return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value()), V(y.value()), T(Byte8::type()))));
#endif
}
Type *SByte8::type()
{
return T(Type_v8i8);
}
Type *Byte16::type()
{
return T(llvm::VectorType::get(T(Byte::type()), 16, false));
}
Type *SByte16::type()
{
return T(llvm::VectorType::get(T(SByte::type()), 16, false));
}
Type *Short2::type()
{
return T(Type_v2i16);
}
Type *UShort2::type()
{
return T(Type_v2i16);
}
Short4::Short4(RValue<Int4> cast)
{
RR_DEBUG_INFO_UPDATE_LOC();
int select[8] = { 0, 2, 4, 6, 0, 2, 4, 6 };
Value *short8 = Nucleus::createBitCast(cast.value(), Short8::type());
Value *packed = Nucleus::createShuffleVector(short8, short8, select);
Value *short4 = As<Short4>(Int2(As<Int4>(packed))).value();
storeValue(short4);
}
// Short4::Short4(RValue<Float> cast)
// {
// }
Short4::Short4(RValue<Float4> cast)
{
RR_DEBUG_INFO_UPDATE_LOC();
Int4 v4i32 = Int4(cast);
#if defined(__i386__) || defined(__x86_64__)
v4i32 = As<Int4>(x86::packssdw(v4i32, v4i32));
#else
Value *v = v4i32.loadValue();
v4i32 = As<Int4>(V(lowerPack(V(v), V(v), true)));
#endif
storeValue(As<Short4>(Int2(v4i32)).value());
}
RValue<Short4> operator<<(RValue<Short4> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
// return RValue<Short4>(Nucleus::createShl(lhs.value(), rhs.value()));
return x86::psllw(lhs, rhs);
#else
return As<Short4>(V(lowerVectorShl(V(lhs.value()), rhs)));
#endif
}
RValue<Short4> operator>>(RValue<Short4> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psraw(lhs, rhs);
#else
return As<Short4>(V(lowerVectorAShr(V(lhs.value()), rhs)));
#endif
}
RValue<Short4> Max(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmaxsw(x, y);
#else
return RValue<Short4>(V(lowerPMINMAX(V(x.value()), V(y.value()), llvm::ICmpInst::ICMP_SGT)));
#endif
}
RValue<Short4> Min(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pminsw(x, y);
#else
return RValue<Short4>(V(lowerPMINMAX(V(x.value()), V(y.value()), llvm::ICmpInst::ICMP_SLT)));
#endif
}
RValue<Short4> AddSat(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::paddsw(x, y);
#else
return As<Short4>(V(lowerPSADDSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<Short4> SubSat(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psubsw(x, y);
#else
return As<Short4>(V(lowerPSSUBSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<Short4> MulHigh(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmulhw(x, y);
#else
return As<Short4>(V(lowerMulHigh(V(x.value()), V(y.value()), true)));
#endif
}
RValue<Int2> MulAdd(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmaddwd(x, y);
#else
return As<Int2>(V(lowerMulAdd(V(x.value()), V(y.value()))));
#endif
}
RValue<SByte8> PackSigned(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
auto result = x86::packsswb(x, y);
#else
auto result = V(lowerPack(V(x.value()), V(y.value()), true));
#endif
return As<SByte8>(Swizzle(As<Int4>(result), 0x0202));
}
RValue<Byte8> PackUnsigned(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
auto result = x86::packuswb(x, y);
#else
auto result = V(lowerPack(V(x.value()), V(y.value()), false));
#endif
return As<Byte8>(Swizzle(As<Int4>(result), 0x0202));
}
RValue<Short4> CmpGT(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pcmpgtw(x, y);
#else
return As<Short4>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value()), V(y.value()), T(Short4::type()))));
#endif
}
RValue<Short4> CmpEQ(RValue<Short4> x, RValue<Short4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pcmpeqw(x, y);
#else
return As<Short4>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value()), V(y.value()), T(Short4::type()))));
#endif
}
Type *Short4::type()
{
return T(Type_v4i16);
}
UShort4::UShort4(RValue<Float4> cast, bool saturate)
{
RR_DEBUG_INFO_UPDATE_LOC();
if(saturate)
{
#if defined(__i386__) || defined(__x86_64__)
if(CPUID::supportsSSE4_1())
{
Int4 int4(Min(cast, Float4(0xFFFF))); // packusdw takes care of 0x0000 saturation
*this = As<Short4>(PackUnsigned(int4, int4));
}
else
#endif
{
*this = Short4(Int4(Max(Min(cast, Float4(0xFFFF)), Float4(0x0000))));
}
}
else
{
*this = Short4(Int4(cast));
}
}
RValue<UShort4> operator<<(RValue<UShort4> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
// return RValue<Short4>(Nucleus::createShl(lhs.value(), rhs.value()));
return As<UShort4>(x86::psllw(As<Short4>(lhs), rhs));
#else
return As<UShort4>(V(lowerVectorShl(V(lhs.value()), rhs)));
#endif
}
RValue<UShort4> operator>>(RValue<UShort4> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
// return RValue<Short4>(Nucleus::createLShr(lhs.value(), rhs.value()));
return x86::psrlw(lhs, rhs);
#else
return As<UShort4>(V(lowerVectorLShr(V(lhs.value()), rhs)));
#endif
}
RValue<UShort4> Max(RValue<UShort4> x, RValue<UShort4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
return RValue<UShort4>(Max(As<Short4>(x) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u), As<Short4>(y) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u)) + Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u));
}
RValue<UShort4> Min(RValue<UShort4> x, RValue<UShort4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
return RValue<UShort4>(Min(As<Short4>(x) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u), As<Short4>(y) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u)) + Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u));
}
RValue<UShort4> AddSat(RValue<UShort4> x, RValue<UShort4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::paddusw(x, y);
#else
return As<UShort4>(V(lowerPUADDSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<UShort4> SubSat(RValue<UShort4> x, RValue<UShort4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psubusw(x, y);
#else
return As<UShort4>(V(lowerPUSUBSAT(V(x.value()), V(y.value()))));
#endif
}
RValue<UShort4> MulHigh(RValue<UShort4> x, RValue<UShort4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmulhuw(x, y);
#else
return As<UShort4>(V(lowerMulHigh(V(x.value()), V(y.value()), false)));
#endif
}
RValue<UShort4> Average(RValue<UShort4> x, RValue<UShort4> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pavgw(x, y);
#else
return As<UShort4>(V(lowerPAVG(V(x.value()), V(y.value()))));
#endif
}
Type *UShort4::type()
{
return T(Type_v4i16);
}
RValue<Short8> operator<<(RValue<Short8> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psllw(lhs, rhs);
#else
return As<Short8>(V(lowerVectorShl(V(lhs.value()), rhs)));
#endif
}
RValue<Short8> operator>>(RValue<Short8> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psraw(lhs, rhs);
#else
return As<Short8>(V(lowerVectorAShr(V(lhs.value()), rhs)));
#endif
}
RValue<Int4> MulAdd(RValue<Short8> x, RValue<Short8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmaddwd(x, y);
#else
return As<Int4>(V(lowerMulAdd(V(x.value()), V(y.value()))));
#endif
}
RValue<Short8> MulHigh(RValue<Short8> x, RValue<Short8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmulhw(x, y);
#else
return As<Short8>(V(lowerMulHigh(V(x.value()), V(y.value()), true)));
#endif
}
Type *Short8::type()
{
return T(llvm::VectorType::get(T(Short::type()), 8, false));
}
RValue<UShort8> operator<<(RValue<UShort8> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return As<UShort8>(x86::psllw(As<Short8>(lhs), rhs));
#else
return As<UShort8>(V(lowerVectorShl(V(lhs.value()), rhs)));
#endif
}
RValue<UShort8> operator>>(RValue<UShort8> lhs, unsigned char rhs)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::psrlw(lhs, rhs); // FIXME: Fallback required
#else
return As<UShort8>(V(lowerVectorLShr(V(lhs.value()), rhs)));
#endif
}
RValue<UShort8> MulHigh(RValue<UShort8> x, RValue<UShort8> y)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::pmulhuw(x, y);
#else
return As<UShort8>(V(lowerMulHigh(V(x.value()), V(y.value()), false)));
#endif
}
Type *UShort8::type()
{
return T(llvm::VectorType::get(T(UShort::type()), 8, false));
}
RValue<Int> operator++(Int &val, int) // Post-increment
{
RR_DEBUG_INFO_UPDATE_LOC();
RValue<Int> res = val;
Value *inc = Nucleus::createAdd(res.value(), Nucleus::createConstantInt(1));
val.storeValue(inc);
return res;
}
const Int &operator++(Int &val) // Pre-increment
{
RR_DEBUG_INFO_UPDATE_LOC();
Value *inc = Nucleus::createAdd(val.loadValue(), Nucleus::createConstantInt(1));
val.storeValue(inc);
return val;
}
RValue<Int> operator--(Int &val, int) // Post-decrement
{
RR_DEBUG_INFO_UPDATE_LOC();
RValue<Int> res = val;
Value *inc = Nucleus::createSub(res.value(), Nucleus::createConstantInt(1));
val.storeValue(inc);
return res;
}
const Int &operator--(Int &val) // Pre-decrement
{
RR_DEBUG_INFO_UPDATE_LOC();
Value *inc = Nucleus::createSub(val.loadValue(), Nucleus::createConstantInt(1));
val.storeValue(inc);
return val;
}
RValue<Int> RoundInt(RValue<Float> cast)
{
RR_DEBUG_INFO_UPDATE_LOC();
#if defined(__i386__) || defined(__x86_64__)
return x86::cvtss2si(cast);
#else
return RValue<Int>(V(lowerRoundInt(V(cast.value()), T(Int::type()))));
#endif
}
Type *Int::type()
{
return T(llvm::Type::getInt32Ty(*jit->context));
}
Type *Long::type()
{
return T(llvm::Type::getInt64Ty(*jit->context));
}
UInt::UInt(RValue<Float> cast)
{
RR_DEBUG_INFO_UPDATE_LOC();
Value *integer = Nucleus::createFPToUI(cast.value(), UInt::type());
storeValue(integer);
}
RValue<UInt> operator++(UInt &val, int) // Post-increment
{