Make sure that all globals are internal, except for "start" functions.
The existing code, when run on a fuzzed example, generates a runtime
assertion. The reason for this is that the input defines "memmove" as
an external global. However, the code generator can generate calls to
"memmove" which assumes it is internal (see PNaCl ABI). As a result,
the assertion that checks that global names are unique (for memmove)
fails.
This code fixes the problem by checking that global names are
internal, unless they are one of the "start" functions,
or the function is an intrinsic. To allow for
non-PNaCl ABI input, a flag was added to allow functions to be
external. However, in such cases the external can't be one of
Subzero's runtime helper functions.
BUG= https://code.google.com/p/nativeclient/issues/detail?id=4330
R=jpp@chromium.org, stichnot@chromium.org
Review URL: https://codereview.chromium.org/1387963002 .
diff --git a/src/IceClFlags.cpp b/src/IceClFlags.cpp
index a27fb3f..5ed9ea0 100644
--- a/src/IceClFlags.cpp
+++ b/src/IceClFlags.cpp
@@ -34,6 +34,12 @@
cl::desc("Allow error recovery when reading PNaCl bitcode."),
cl::init(false));
+cl::opt<bool> AllowExternDefinedSymbols(
+ "allow-externally-defined-symbols",
+ cl::desc("Allow global symbols to be externally defined (other than _start "
+ "and __pnacl_pso_root)."),
+ cl::init(false));
+
cl::opt<bool> AllowIacaMarks(
"allow-iaca-marks",
cl::desc("Allow IACA (Intel Architecture Code Analyzer) marks to be "
@@ -360,6 +366,7 @@
void ClFlags::resetClFlags(ClFlags &OutFlags) {
// bool fields
OutFlags.AllowErrorRecovery = false;
+ OutFlags.AllowExternDefinedSymbols = false;
OutFlags.AllowIacaMarks = false;
OutFlags.AllowUninitializedGlobals = false;
OutFlags.DataSections = false;
@@ -420,6 +427,8 @@
}
OutFlags.setAllowErrorRecovery(::AllowErrorRecovery);
+ OutFlags.setAllowExternDefinedSymbols(::AllowExternDefinedSymbols ||
+ ::DisableInternal);
OutFlags.setAllowIacaMarks(::AllowIacaMarks);
OutFlags.setAllowUninitializedGlobals(::AllowUninitializedGlobals);
OutFlags.setDataSections(::DataSections);
diff --git a/src/IceClFlags.h b/src/IceClFlags.h
index 87e16cd..0aba214 100644
--- a/src/IceClFlags.h
+++ b/src/IceClFlags.h
@@ -39,6 +39,13 @@
bool getAllowErrorRecovery() const { return AllowErrorRecovery; }
void setAllowErrorRecovery(bool NewValue) { AllowErrorRecovery = NewValue; }
+ bool getAllowExternDefinedSymbols() const {
+ return AllowExternDefinedSymbols;
+ }
+ void setAllowExternDefinedSymbols(bool NewValue) {
+ AllowExternDefinedSymbols = NewValue;
+ }
+
bool getAllowIacaMarks() const { return AllowIacaMarks; }
void setAllowIacaMarks(bool NewValue) { AllowIacaMarks = NewValue; }
@@ -238,6 +245,7 @@
private:
bool AllowErrorRecovery;
+ bool AllowExternDefinedSymbols;
bool AllowIacaMarks;
bool AllowUninitializedGlobals;
bool DataSections;
diff --git a/src/IceConverter.cpp b/src/IceConverter.cpp
index a4d4f53..349fde5 100644
--- a/src/IceConverter.cpp
+++ b/src/IceConverter.cpp
@@ -867,6 +867,13 @@
FunctionDeclaration *IceFunc = FunctionDeclaration::create(
Ctx, Signature, Func.getCallingConv(), Func.getLinkage(), Func.empty());
IceFunc->setName(Func.getName());
+ if (!IceFunc->verifyLinkageCorrect(Ctx)) {
+ std::string Buffer;
+ raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Function " << IceFunc->getName()
+ << " has incorrect linkage: " << IceFunc->getLinkageName();
+ report_fatal_error(StrBuf.str());
+ }
GlobalDeclarationMap[&Func] = IceFunc;
}
// Install global variable declarations.
@@ -879,6 +886,13 @@
Var->setAlignment(GV->getAlignment());
Var->setIsConstant(GV->isConstant());
Var->setLinkage(GV->getLinkage());
+ if (!Var->verifyLinkageCorrect(Ctx)) {
+ std::string Buffer;
+ raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Global " << Var->getName()
+ << " has incorrect linkage: " << Var->getLinkageName();
+ report_fatal_error(StrBuf.str());
+ }
GlobalDeclarationMap[GV] = Var;
}
}
diff --git a/src/IceELFSection.cpp b/src/IceELFSection.cpp
index 3e33c99..3768150 100644
--- a/src/IceELFSection.cpp
+++ b/src/IceELFSection.cpp
@@ -116,7 +116,12 @@
NewSymbol.Section = NullSection;
NewSymbol.Number = ELFSym::UnknownNumber;
bool Unique = GlobalSymbols.insert(std::make_pair(Name, NewSymbol)).second;
- assert(Unique);
+ if (!Unique) {
+ std::string Buffer;
+ llvm::raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Symbol external and defined: " << Name;
+ llvm::report_fatal_error(StrBuf.str());
+ }
(void)Unique;
}
diff --git a/src/IceGlobalInits.h b/src/IceGlobalInits.h
index 8f51db2..82e5cde 100644
--- a/src/IceGlobalInits.h
+++ b/src/IceGlobalInits.h
@@ -87,11 +87,29 @@
return getSuppressMangling() ? Name : Ctx->mangleName(Name);
}
+ /// Returns textual name of linkage.
+ const char *getLinkageName() const {
+ return isInternal() ? "internal" : "external";
+ }
+
protected:
GlobalDeclaration(GlobalDeclarationKind Kind,
llvm::GlobalValue::LinkageTypes Linkage)
: Kind(Kind), Linkage(Linkage) {}
+ /// Returns true if linkage is defined correctly for the global declaration,
+ /// based on default rules.
+ bool verifyLinkageDefault(const GlobalContext *Ctx) const {
+ switch (Linkage) {
+ default:
+ return false;
+ case llvm::GlobalValue::InternalLinkage:
+ return true;
+ case llvm::GlobalValue::ExternalLinkage:
+ return Ctx->getFlags().getAllowExternDefinedSymbols();
+ }
+ }
+
const GlobalDeclarationKind Kind;
IceString Name;
llvm::GlobalValue::LinkageTypes Linkage;
@@ -124,6 +142,13 @@
void dump(GlobalContext *Ctx, Ostream &Stream) const final;
bool getSuppressMangling() const final { return isExternal() && IsProto; }
+ /// Returns true if linkage is correct for the function declaration.
+ bool verifyLinkageCorrect(const GlobalContext *Ctx) const {
+ if (isPNaClABIExternalName() || isIntrinsicName(Ctx))
+ return Linkage == llvm::GlobalValue::ExternalLinkage;
+ return verifyLinkageDefault(Ctx);
+ }
+
private:
const Ice::FuncSigType Signature;
llvm::CallingConv::ID CallingConv;
@@ -134,6 +159,19 @@
llvm::GlobalValue::LinkageTypes Linkage, bool IsProto)
: GlobalDeclaration(FunctionDeclarationKind, Linkage),
Signature(Signature), CallingConv(CallingConv), IsProto(IsProto) {}
+
+ bool isPNaClABIExternalName() const {
+ const char *Name = getName().c_str();
+ return strcmp(Name, "_start") == 0 || strcmp(Name, "__pnacl_pso_root") == 0;
+ }
+
+ bool isIntrinsicName(const GlobalContext *Ctx) const {
+ if (!hasName())
+ return false;
+ bool BadIntrinsic;
+ return Ctx->getIntrinsicsInfo().find(getName(), BadIntrinsic) &&
+ !BadIntrinsic;
+ }
};
/// Models a global variable declaration, and its initializers.
@@ -309,6 +347,11 @@
/// initialization).
void dump(GlobalContext *Ctx, Ostream &Stream) const final;
+ /// Returns true if linkage is correct for the variable declaration.
+ bool verifyLinkageCorrect(const GlobalContext *Ctx) const {
+ return verifyLinkageDefault(Ctx);
+ }
+
static bool classof(const GlobalDeclaration *Addr) {
return Addr->getKind() == VariableDeclarationKind;
}
diff --git a/src/PNaClTranslator.cpp b/src/PNaClTranslator.cpp
index c1a5919..7124da0 100644
--- a/src/PNaClTranslator.cpp
+++ b/src/PNaClTranslator.cpp
@@ -475,7 +475,16 @@
// Converts function declarations into constant value IDs.
void createValueIDsForFunctions() {
+ Ice::GlobalContext *Ctx = getTranslator().getContext();
for (const Ice::FunctionDeclaration *Func : FunctionDeclarations) {
+ if (!Func->verifyLinkageCorrect(Ctx)) {
+ std::string Buffer;
+ raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Function " << Func->getName()
+ << " has incorrect linkage: " << Func->getLinkageName();
+ Error(StrBuf.str());
+ continue;
+ }
Ice::Constant *C = nullptr;
if (!isIRGenerationDisabled()) {
C = getConstantSym(Func->getName(), Func->getSuppressMangling(),
@@ -487,7 +496,15 @@
// Converts global variable declarations into constant value IDs.
void createValueIDsForGlobalVars() {
+ Ice::GlobalContext *Ctx = getTranslator().getContext();
for (const Ice::VariableDeclaration *Decl : *VariableDeclarations) {
+ if (!Decl->verifyLinkageCorrect(Ctx)) {
+ std::string Buffer;
+ raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Global " << Decl->getName()
+ << " has incorrect linkage: " << Decl->getLinkageName();
+ Error(StrBuf.str());
+ }
Ice::Constant *C = nullptr;
if (!isIRGenerationDisabled()) {
C = getConstantSym(Decl->getName(), Decl->getSuppressMangling(),
diff --git a/tests_lit/assembler/x86/immediate_encodings.ll b/tests_lit/assembler/x86/immediate_encodings.ll
index e16ade3..5a35285 100644
--- a/tests_lit/assembler/x86/immediate_encodings.ll
+++ b/tests_lit/assembler/x86/immediate_encodings.ll
@@ -250,7 +250,7 @@
; CHECK-LABEL: testMul32Imm16Neg
; CHECK: 69 c0 01 ff ff ff imul eax,eax,0xffffff01
-define i32 @testMul32Imm32ThreeAddress(i32 %a) {
+define internal i32 @testMul32Imm32ThreeAddress(i32 %a) {
entry:
%mul = mul i32 232, %a
%add = add i32 %mul, %a
@@ -259,7 +259,7 @@
; CHECK-LABEL: testMul32Imm32ThreeAddress
; CHECK: 69 c8 e8 00 00 00 imul ecx,eax,0xe8
-define i32 @testMul32Mem32Imm32ThreeAddress(i32 %addr_arg) {
+define internal i32 @testMul32Mem32Imm32ThreeAddress(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i32*
%a = load i32, i32* %__1, align 1
@@ -269,7 +269,7 @@
; CHECK-LABEL: testMul32Mem32Imm32ThreeAddress
; CHECK: 69 00 e8 00 00 00 imul eax,DWORD PTR [eax],0xe8
-define i32 @testMul32Imm8ThreeAddress(i32 %a) {
+define internal i32 @testMul32Imm8ThreeAddress(i32 %a) {
entry:
%mul = mul i32 127, %a
%add = add i32 %mul, %a
@@ -278,7 +278,7 @@
; CHECK-LABEL: testMul32Imm8ThreeAddress
; CHECK: 6b c8 7f imul ecx,eax,0x7f
-define i32 @testMul32Mem32Imm8ThreeAddress(i32 %addr_arg) {
+define internal i32 @testMul32Mem32Imm8ThreeAddress(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i32*
%a = load i32, i32* %__1, align 1
@@ -288,7 +288,7 @@
; CHECK-LABEL: testMul32Mem32Imm8ThreeAddress
; CHECK: 6b 00 7f imul eax,DWORD PTR [eax],0x7f
-define i32 @testMul16Imm16ThreeAddress(i32 %a) {
+define internal i32 @testMul16Imm16ThreeAddress(i32 %a) {
entry:
%arg_i16 = trunc i32 %a to i16
%mul = mul i16 232, %arg_i16
@@ -299,7 +299,7 @@
; CHECK-LABEL: testMul16Imm16ThreeAddress
; CHECK: 66 69 c8 e8 00 imul cx,ax,0xe8
-define i32 @testMul16Mem16Imm16ThreeAddress(i32 %addr_arg) {
+define internal i32 @testMul16Mem16Imm16ThreeAddress(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i16*
%a = load i16, i16* %__1, align 1
@@ -310,7 +310,7 @@
; CHECK-LABEL: testMul16Mem16Imm16ThreeAddress
; CHECK: 66 69 00 e8 00 imul ax,WORD PTR [eax],0xe8
-define i32 @testMul16Imm8ThreeAddress(i32 %a) {
+define internal i32 @testMul16Imm8ThreeAddress(i32 %a) {
entry:
%arg_i16 = trunc i32 %a to i16
%mul = mul i16 127, %arg_i16
@@ -321,7 +321,7 @@
; CHECK-LABEL: testMul16Imm8ThreeAddress
; CHECK: 66 6b c8 7f imul cx,ax,0x7f
-define i32 @testMul16Mem16Imm8ThreeAddress(i32 %addr_arg) {
+define internal i32 @testMul16Mem16Imm8ThreeAddress(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i16*
%a = load i16, i16* %__1, align 1
@@ -373,7 +373,7 @@
; Test a few register encodings of "test".
declare i64 @llvm.ctlz.i64(i64, i1)
-define i64 @test_via_ctlz_64(i64 %x, i64 %y, i64 %z, i64 %w) {
+define internal i64 @test_via_ctlz_64(i64 %x, i64 %y, i64 %z, i64 %w) {
entry:
%r = call i64 @llvm.ctlz.i64(i64 %x, i1 false)
%r2 = call i64 @llvm.ctlz.i64(i64 %y, i1 false)
diff --git a/tests_lit/assembler/x86/jump_encodings.ll b/tests_lit/assembler/x86/jump_encodings.ll
index 5a12527..e8c9e1d 100644
--- a/tests_lit/assembler/x86/jump_encodings.ll
+++ b/tests_lit/assembler/x86/jump_encodings.ll
@@ -10,7 +10,7 @@
declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32)
-define void @test_near_backward(i32 %iptr, i32 %val) {
+define internal void @test_near_backward(i32 %iptr, i32 %val) {
entry:
br label %next
next:
@@ -34,7 +34,7 @@
; Test one of the backward branches being too large for 8 bits
; and one being just okay.
-define void @test_far_backward1(i32 %iptr, i32 %val) {
+define internal void @test_far_backward1(i32 %iptr, i32 %val) {
entry:
br label %next
next:
@@ -79,7 +79,7 @@
; Same as test_far_backward1, but with the conditional branch being
; the one that is too far.
-define void @test_far_backward2(i32 %iptr, i32 %val) {
+define internal void @test_far_backward2(i32 %iptr, i32 %val) {
entry:
br label %next
next:
@@ -126,7 +126,7 @@
; CHECK: 8c: 0f 8e 7a ff ff ff jle c
; CHECK-NEXT: 92: eb 82 jmp 16
-define void @test_near_forward(i32 %iptr, i32 %val) {
+define internal void @test_near_forward(i32 %iptr, i32 %val) {
entry:
br label %next1
next1:
@@ -157,7 +157,8 @@
; to make sure that the instruction size accounting for the forward
; branches are correct, by the time the backward branch is hit.
; A 64-bit compare happens to use local forward branches.
-define void @test_local_forward_then_back(i64 %val64, i32 %iptr, i32 %val) {
+define internal void @test_local_forward_then_back(i64 %val64, i32 %iptr,
+ i32 %val) {
entry:
br label %next
next:
@@ -180,7 +181,7 @@
; Test that backward local branches also work and are small.
; Some of the atomic instructions use a cmpxchg loop.
-define void @test_local_backward(i64 %val64, i32 %iptr, i32 %val) {
+define internal void @test_local_backward(i64 %val64, i32 %iptr, i32 %val) {
entry:
br label %next
next:
diff --git a/tests_lit/assembler/x86/opcode_register_encodings.ll b/tests_lit/assembler/x86/opcode_register_encodings.ll
index 57226e7..d926e8f 100644
--- a/tests_lit/assembler/x86/opcode_register_encodings.ll
+++ b/tests_lit/assembler/x86/opcode_register_encodings.ll
@@ -5,7 +5,7 @@
; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 -mattr=sse4.1 \
; RUN: -sandbox | FileCheck %s
-define <8 x i16> @test_mul_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_mul_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = mul <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -14,7 +14,10 @@
}
; Test register and address mode encoding.
-define <8 x i16> @test_mul_v8i16_more_regs(<8 x i1> %cond, <8 x i16> %arg0, <8 x i16> %arg1, <8 x i16> %arg2, <8 x i16> %arg3, <8 x i16> %arg4, <8 x i16> %arg5, <8 x i16> %arg6, <8 x i16> %arg7, <8 x i16> %arg8) {
+define internal <8 x i16> @test_mul_v8i16_more_regs(
+ <8 x i1> %cond, <8 x i16> %arg0, <8 x i16> %arg1, <8 x i16> %arg2,
+ <8 x i16> %arg3, <8 x i16> %arg4, <8 x i16> %arg5, <8 x i16> %arg6,
+ <8 x i16> %arg7, <8 x i16> %arg8) {
entry:
%res1 = sub <8 x i16> %arg0, %arg1
%res2 = sub <8 x i16> %arg0, %arg2
@@ -43,7 +46,7 @@
; CHECK-DAG: psubw xmm1,XMMWORD PTR [esp
}
-define <4 x i32> @test_mul_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_mul_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = mul <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -51,7 +54,10 @@
; CHECK: 66 0f 38 40 c1 pmulld xmm0,xmm1
}
-define <4 x i32> @test_mul_v4i32_more_regs(<4 x i1> %cond, <4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3, <4 x i32> %arg4, <4 x i32> %arg5, <4 x i32> %arg6, <4 x i32> %arg7, <4 x i32> %arg8) {
+define internal <4 x i32> @test_mul_v4i32_more_regs(
+ <4 x i1> %cond, <4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %arg2,
+ <4 x i32> %arg3, <4 x i32> %arg4, <4 x i32> %arg5, <4 x i32> %arg6,
+ <4 x i32> %arg7, <4 x i32> %arg8) {
entry:
%res1 = sub <4 x i32> %arg0, %arg1
%res2 = sub <4 x i32> %arg0, %arg2
@@ -83,7 +89,8 @@
; Test movq, which is used by atomic stores.
declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32)
-define void @test_atomic_store_64(i32 %iptr, i32 %iptr2, i32 %iptr3, i64 %v) {
+define internal void @test_atomic_store_64(i32 %iptr, i32 %iptr2,
+ i32 %iptr3, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%ptr2 = inttoptr i32 %iptr2 to i64*
@@ -99,7 +106,8 @@
; CHECK-DAG: 66 0f d6 0{{.*}} movq QWORD PTR [e{{.*}}],xmm0
; Test "movups" via vector stores and loads.
-define void @store_v16xI8(i32 %addr, i32 %addr2, i32 %addr3, <16 x i8> %v) {
+define internal void @store_v16xI8(i32 %addr, i32 %addr2, i32 %addr3,
+ <16 x i8> %v) {
%addr_v16xI8 = inttoptr i32 %addr to <16 x i8>*
%addr2_v16xI8 = inttoptr i32 %addr2 to <16 x i8>*
%addr3_v16xI8 = inttoptr i32 %addr3 to <16 x i8>*
@@ -111,7 +119,7 @@
; CHECK-LABEL: store_v16xI8
; CHECK: 0f 11 0{{.*}} movups XMMWORD PTR [e{{.*}}],xmm0
-define <16 x i8> @load_v16xI8(i32 %addr, i32 %addr2, i32 %addr3) {
+define internal <16 x i8> @load_v16xI8(i32 %addr, i32 %addr2, i32 %addr3) {
%addr_v16xI8 = inttoptr i32 %addr to <16 x i8>*
%addr2_v16xI8 = inttoptr i32 %addr2 to <16 x i8>*
%addr3_v16xI8 = inttoptr i32 %addr3 to <16 x i8>*
@@ -129,7 +137,7 @@
declare i8* @llvm.nacl.read.tp()
; Also test more address complex operands via address-mode-optimization.
-define i32 @test_nacl_read_tp_more_addressing() {
+define internal i32 @test_nacl_read_tp_more_addressing() {
entry:
%ptr = call i8* @llvm.nacl.read.tp()
%__1 = ptrtoint i8* %ptr to i32
@@ -162,7 +170,8 @@
; The 16-bit pinsrw/pextrw (SSE2) are quite different from
; the pinsr{b,d}/pextr{b,d} (SSE4.1).
-define <4 x i32> @test_pinsrd(<4 x i32> %vec, i32 %elt1, i32 %elt2, i32 %elt3, i32 %elt4) {
+define internal <4 x i32> @test_pinsrd(<4 x i32> %vec, i32 %elt1, i32 %elt2,
+ i32 %elt3, i32 %elt4) {
entry:
%elt12 = add i32 %elt1, %elt2
%elt34 = add i32 %elt3, %elt4
@@ -176,7 +185,8 @@
; CHECK-DAG: 66 0f 3a 22 c{{.*}} 02 pinsrd xmm0,e{{.*}}
; CHECK-DAG: 66 0f 3a 22 c{{.*}} 03 pinsrd xmm0,e{{.*}}
-define <16 x i8> @test_pinsrb(<16 x i8> %vec, i32 %elt1_w, i32 %elt2_w, i32 %elt3_w, i32 %elt4_w) {
+define internal <16 x i8> @test_pinsrb(<16 x i8> %vec, i32 %elt1_w, i32 %elt2_w,
+ i32 %elt3_w, i32 %elt4_w) {
entry:
%elt1 = trunc i32 %elt1_w to i8
%elt2 = trunc i32 %elt2_w to i8
@@ -194,7 +204,8 @@
; CHECK-DAG: 66 0f 3a 20 c{{.*}} 07 pinsrb xmm0,e{{.*}}
; CHECK-DAG: 66 0f 3a 20 c{{.*}} 0f pinsrb xmm0,e{{.*}}
-define <8 x i16> @test_pinsrw(<8 x i16> %vec, i32 %elt1_w, i32 %elt2_w, i32 %elt3_w, i32 %elt4_w) {
+define internal <8 x i16> @test_pinsrw(<8 x i16> %vec, i32 %elt1_w, i32 %elt2_w,
+ i32 %elt3_w, i32 %elt4_w) {
entry:
%elt1 = trunc i32 %elt1_w to i16
%elt2 = trunc i32 %elt2_w to i16
@@ -212,7 +223,8 @@
; CHECK-DAG: 66 0f c4 c{{.*}} 04 pinsrw xmm0,e{{.*}}
; CHECK-DAG: 66 0f c4 c{{.*}} 07 pinsrw xmm0,e{{.*}}
-define i32 @test_pextrd(i32 %c, <4 x i32> %vec1, <4 x i32> %vec2, <4 x i32> %vec3, <4 x i32> %vec4) {
+define internal i32 @test_pextrd(i32 %c, <4 x i32> %vec1, <4 x i32> %vec2,
+ <4 x i32> %vec3, <4 x i32> %vec4) {
entry:
switch i32 %c, label %three [i32 0, label %zero
i32 1, label %one
@@ -236,7 +248,8 @@
; CHECK-DAG: 66 0f 3a 16 d0 02 pextrd eax,xmm2
; CHECK-DAG: 66 0f 3a 16 d8 03 pextrd eax,xmm3
-define i32 @test_pextrb(i32 %c, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, <16 x i8> %vec4) {
+define internal i32 @test_pextrb(i32 %c, <16 x i8> %vec1, <16 x i8> %vec2,
+ <16 x i8> %vec3, <16 x i8> %vec4) {
entry:
switch i32 %c, label %three [i32 0, label %zero
i32 1, label %one
@@ -264,7 +277,8 @@
; CHECK-DAG: 66 0f 3a 14 d0 0c pextrb eax,xmm2
; CHECK-DAG: 66 0f 3a 14 d8 0f pextrb eax,xmm3
-define i32 @test_pextrw(i32 %c, <8 x i16> %vec1, <8 x i16> %vec2, <8 x i16> %vec3, <8 x i16> %vec4) {
+define internal i32 @test_pextrw(i32 %c, <8 x i16> %vec1, <8 x i16> %vec2,
+ <8 x i16> %vec3, <8 x i16> %vec4) {
entry:
switch i32 %c, label %three [i32 0, label %zero
i32 1, label %one
diff --git a/tests_lit/assembler/x86/sandboxing.ll b/tests_lit/assembler/x86/sandboxing.ll
index fc8dcc4..c66bb1a 100644
--- a/tests_lit/assembler/x86/sandboxing.ll
+++ b/tests_lit/assembler/x86/sandboxing.ll
@@ -4,6 +4,7 @@
; minimal use of registers and stack slots in the lowering sequence.
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
+; RUN: -allow-externally-defined-symbols \
; RUN: -ffunction-sections -sandbox | FileCheck %s
declare void @call_target()
@@ -12,7 +13,7 @@
@global_int = internal global [4 x i8] zeroinitializer
; A direct call sequence uses the right mask and register-call sequence.
-define void @test_direct_call() {
+define internal void @test_direct_call() {
entry:
call void @call_target()
ret void
@@ -23,7 +24,7 @@
; CHECK-NEXT: 20:
; An indirect call sequence uses the right mask and register-call sequence.
-define void @test_indirect_call(i32 %target) {
+define internal void @test_indirect_call(i32 %target) {
entry:
%__1 = inttoptr i32 %target to void ()*
call void %__1()
@@ -37,7 +38,7 @@
; CHECk-NEXT: 20:
; A return sequences uses the right pop / mask / jmp sequence.
-define void @test_ret() {
+define internal void @test_ret() {
entry:
ret void
}
@@ -47,7 +48,7 @@
; CHECK-NEXT: jmp ecx
; A perfectly packed bundle should not have nops at the end.
-define void @packed_bundle() {
+define internal void @packed_bundle() {
entry:
call void @call_target()
; bundle boundary
@@ -72,7 +73,7 @@
; CHECK-NEXT: 47: {{.*}} mov WORD PTR
; An imperfectly packed bundle should have one or more nops at the end.
-define void @nonpacked_bundle() {
+define internal void @nonpacked_bundle() {
entry:
call void @call_target()
; bundle boundary
@@ -95,7 +96,7 @@
; A zero-byte instruction (e.g. local label definition) at a bundle
; boundary should not trigger nop padding.
-define void @label_at_boundary(i32 %arg, float %farg1, float %farg2) {
+define internal void @label_at_boundary(i32 %arg, float %farg1, float %farg2) {
entry:
%argi8 = trunc i32 %arg to i8
call void @call_target()
@@ -123,7 +124,7 @@
; CHECK-NEXT: 40: {{.*}} mov WORD PTR
; Bundle lock without padding.
-define void @bundle_lock_without_padding() {
+define internal void @bundle_lock_without_padding() {
entry:
%addr_short = bitcast [2 x i8]* @global_short to i16*
store i16 0, i16* %addr_short, align 1 ; 9-byte instruction
@@ -136,7 +137,7 @@
; CHECK-NEXT: jmp ecx
; Bundle lock with padding.
-define void @bundle_lock_with_padding() {
+define internal void @bundle_lock_with_padding() {
entry:
call void @call_target()
; bundle boundary
@@ -167,7 +168,7 @@
; CHECK-NEXT: 43: {{.*}} jmp ecx
; Bundle lock align_to_end without any padding.
-define void @bundle_lock_align_to_end_padding_0() {
+define internal void @bundle_lock_align_to_end_padding_0() {
entry:
call void @call_target()
; bundle boundary
@@ -186,7 +187,7 @@
; CHECK-NEXT: 3b: {{.*}} call
; Bundle lock align_to_end with one bunch of padding.
-define void @bundle_lock_align_to_end_padding_1() {
+define internal void @bundle_lock_align_to_end_padding_1() {
entry:
call void @call_target()
; bundle boundary
@@ -206,7 +207,7 @@
; CHECK: 3b: {{.*}} call
; Bundle lock align_to_end with two bunches of padding.
-define void @bundle_lock_align_to_end_padding_2(i32 %target) {
+define internal void @bundle_lock_align_to_end_padding_2(i32 %target) {
entry:
call void @call_target()
; bundle boundary
@@ -242,7 +243,7 @@
; properly checkpointed and restored during the two passes, as
; observed by the stack adjustment for accessing stack-allocated
; variables.
-define void @checkpoint_restore_stack_adjustment(i32 %arg) {
+define internal void @checkpoint_restore_stack_adjustment(i32 %arg) {
entry:
call void @call_target()
; bundle boundary
diff --git a/tests_lit/llvm2ice_tests/64bit.pnacl.ll b/tests_lit/llvm2ice_tests/64bit.pnacl.ll
index f6c30fa..3807ba4 100644
--- a/tests_lit/llvm2ice_tests/64bit.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/64bit.pnacl.ll
@@ -3,11 +3,11 @@
; i32 operations on x86-32.
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -O2 \
+; RUN: --target x8632 -i %s --args -O2 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -Om1 \
+; RUN: --target x8632 -i %s --args -Om1 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck --check-prefix=OPTM1 %s
; TODO(jvoung): Stop skipping unimplemented parts (via --skip-unimplemented)
@@ -16,11 +16,13 @@
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble --disassemble --target arm32 \
; RUN: -i %s --args -Om1 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
diff --git a/tests_lit/llvm2ice_tests/8bit.pnacl.ll b/tests_lit/llvm2ice_tests/8bit.pnacl.ll
index 837cb0b..8f8f515 100644
--- a/tests_lit/llvm2ice_tests/8bit.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/8bit.pnacl.ll
@@ -1,7 +1,9 @@
; This tries to be a comprehensive test of i8 operations.
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 | FileCheck %s
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
declare void @useInt(i32 %x)
diff --git a/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll b/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll
index 19959b0..401359a 100644
--- a/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll
+++ b/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll
@@ -7,7 +7,8 @@
; REQUIRES: target_X8632
; REQUIRES: allow_dump
-; RUN: %p2i -i %s --args -O2 --verbose addropt | FileCheck %s
+; RUN: %p2i -i %s --args -O2 --verbose addropt \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
declare i32 @_calloc_r(i32, i32, i32)
diff --git a/tests_lit/llvm2ice_tests/address-mode-opt.ll b/tests_lit/llvm2ice_tests/address-mode-opt.ll
index 1c929f8..d5c2064 100644
--- a/tests_lit/llvm2ice_tests/address-mode-opt.ll
+++ b/tests_lit/llvm2ice_tests/address-mode-opt.ll
@@ -1,11 +1,11 @@
; This file checks support for address mode optimization.
; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
-; RUN: | FileCheck %s
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 -mattr=sse4.1 \
-; RUN: | FileCheck --check-prefix=SSE41 %s
+; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=SSE41 %s
-define float @load_arg_plus_200000(float* %arg) {
+define internal float @load_arg_plus_200000(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr.int = add i32 %arg.int, 200000
@@ -16,7 +16,7 @@
; CHECK: movss xmm0,DWORD PTR [eax+0x30d40]
}
-define float @load_200000_plus_arg(float* %arg) {
+define internal float @load_200000_plus_arg(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr.int = add i32 200000, %arg.int
@@ -27,7 +27,7 @@
; CHECK: movss xmm0,DWORD PTR [eax+0x30d40]
}
-define float @load_arg_minus_200000(float* %arg) {
+define internal float @load_arg_minus_200000(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr.int = sub i32 %arg.int, 200000
@@ -38,7 +38,7 @@
; CHECK: movss xmm0,DWORD PTR [eax-0x30d40]
}
-define float @load_200000_minus_arg(float* %arg) {
+define internal float @load_200000_minus_arg(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr.int = sub i32 200000, %arg.int
@@ -49,7 +49,7 @@
; CHECK: movss xmm0,DWORD PTR [e{{..}}]
}
-define <8 x i16> @load_mul_v8i16_mem(<8 x i16> %arg0, i32 %arg1_iptr) {
+define internal <8 x i16> @load_mul_v8i16_mem(<8 x i16> %arg0, i32 %arg1_iptr) {
entry:
%addr_sub = sub i32 %arg1_iptr, 200000
%addr_ptr = inttoptr i32 %addr_sub to <8 x i16>*
@@ -61,7 +61,7 @@
; CHECK-NOT: pmullw xmm{{.*}},XMMWORD PTR [e{{..}}-0x30d40]
}
-define <4 x i32> @load_mul_v4i32_mem(<4 x i32> %arg0, i32 %arg1_iptr) {
+define internal <4 x i32> @load_mul_v4i32_mem(<4 x i32> %arg0, i32 %arg1_iptr) {
entry:
%addr_sub = sub i32 %arg1_iptr, 200000
%addr_ptr = inttoptr i32 %addr_sub to <4 x i32>*
@@ -77,7 +77,7 @@
; SSE41-NOT: pmulld xmm{{.*}},XMMWORD PTR [e{{..}}-0x30d40]
}
-define float @address_mode_opt_chaining(float* %arg) {
+define internal float @address_mode_opt_chaining(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr1.int = add i32 12, %arg.int
@@ -89,7 +89,7 @@
; CHECK: movss xmm0,DWORD PTR [eax+0x8]
}
-define float @address_mode_opt_chaining_overflow(float* %arg) {
+define internal float @address_mode_opt_chaining_overflow(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr1.int = add i32 2147483640, %arg.int
@@ -102,7 +102,7 @@
; CHECK: movss xmm0,DWORD PTR [{{.*}}+0x7ffffffb]
}
-define float @address_mode_opt_chaining_overflow_sub(float* %arg) {
+define internal float @address_mode_opt_chaining_overflow_sub(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr1.int = sub i32 %arg.int, 2147483640
@@ -115,7 +115,7 @@
; CHECK: movss xmm0,DWORD PTR [{{.*}}-0x7ffffffb]
}
-define float @address_mode_opt_chaining_no_overflow(float* %arg) {
+define internal float @address_mode_opt_chaining_no_overflow(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr1.int = sub i32 %arg.int, 2147483640
@@ -127,7 +127,7 @@
; CHECK: movss xmm0,DWORD PTR [{{.*}}+0x3]
}
-define float @address_mode_opt_add_pos_min_int(float* %arg) {
+define internal float @address_mode_opt_add_pos_min_int(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr1.int = add i32 %arg.int, 2147483648
@@ -138,7 +138,7 @@
; CHECK: movss xmm0,DWORD PTR [{{.*}}-0x80000000]
}
-define float @address_mode_opt_sub_min_int(float* %arg) {
+define internal float @address_mode_opt_sub_min_int(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr1.int = sub i32 %arg.int, 2147483648
diff --git a/tests_lit/llvm2ice_tests/align-spill-locations.ll b/tests_lit/llvm2ice_tests/align-spill-locations.ll
index dc8410c..c0b1cda 100644
--- a/tests_lit/llvm2ice_tests/align-spill-locations.ll
+++ b/tests_lit/llvm2ice_tests/align-spill-locations.ll
@@ -1,7 +1,9 @@
; This checks to ensure that Subzero aligns spill slots.
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 | FileCheck %s
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; The location of the stack slot for a variable is inferred from the
; return sequence.
@@ -10,7 +12,7 @@
; multiple basic blocks (not an LLVM global variable) and "local"
; refers to a variable that is live in only a single basic block.
-define <4 x i32> @align_global_vector(i32 %arg) {
+define internal <4 x i32> @align_global_vector(i32 %arg) {
entry:
%vec.global = insertelement <4 x i32> undef, i32 %arg, i32 0
br label %block
@@ -23,7 +25,7 @@
; CHECK-NEXT: ret
}
-define <4 x i32> @align_local_vector(i32 %arg) {
+define internal <4 x i32> @align_local_vector(i32 %arg) {
entry:
br label %block
block:
@@ -38,7 +40,7 @@
declare void @ForceXmmSpills()
-define <4 x i32> @align_global_vector_ebp_based(i32 %arg) {
+define internal <4 x i32> @align_global_vector_ebp_based(i32 %arg) {
entry:
br label %eblock ; Disable alloca optimization
eblock:
@@ -55,7 +57,7 @@
; CHECK: ret
}
-define <4 x i32> @align_local_vector_ebp_based(i32 %arg) {
+define internal <4 x i32> @align_local_vector_ebp_based(i32 %arg) {
entry:
br label %eblock ; Disable alloca optimization
eblock:
@@ -70,7 +72,7 @@
; CHECK: ret
}
-define <4 x i32> @align_local_vector_and_global_float(i32 %arg) {
+define internal <4 x i32> @align_local_vector_and_global_float(i32 %arg) {
entry:
%float.global = sitofp i32 %arg to float
call void @ForceXmmSpillsAndUseFloat(float %float.global)
diff --git a/tests_lit/llvm2ice_tests/alloc.ll b/tests_lit/llvm2ice_tests/alloc.ll
index b342d1e..49faa7e 100644
--- a/tests_lit/llvm2ice_tests/alloc.ll
+++ b/tests_lit/llvm2ice_tests/alloc.ll
@@ -1,11 +1,11 @@
; This is a basic test of the alloca instruction.
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -O2 \
+; RUN: --target x8632 -i %s --args -O2 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -Om1 \
+; RUN: --target x8632 -i %s --args -Om1 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; TODO(jvoung): Stop skipping unimplemented parts (via --skip-unimplemented)
@@ -14,16 +14,18 @@
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -Om1 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
-define void @fixed_416_align_16(i32 %n) {
+define internal void @fixed_416_align_16(i32 %n) {
entry:
%array = alloca i8, i32 416, align 16
%__2 = ptrtoint i8* %array to i32
@@ -40,7 +42,7 @@
; ARM32: sub sp, sp, #416
; ARM32: bl {{.*}} R_{{.*}} f1
-define void @fixed_416_align_32(i32 %n) {
+define internal void @fixed_416_align_32(i32 %n) {
entry:
%array = alloca i8, i32 400, align 32
%__2 = ptrtoint i8* %array to i32
@@ -60,7 +62,7 @@
; ARM32: bl {{.*}} R_{{.*}} f1
; Show that the amount to allocate will be rounded up.
-define void @fixed_351_align_16(i32 %n) {
+define internal void @fixed_351_align_16(i32 %n) {
entry:
%array = alloca i8, i32 351, align 16
%__2 = ptrtoint i8* %array to i32
@@ -77,7 +79,7 @@
; ARM32: sub sp, sp, #352
; ARM32: bl {{.*}} R_{{.*}} f1
-define void @fixed_351_align_32(i32 %n) {
+define internal void @fixed_351_align_32(i32 %n) {
entry:
%array = alloca i8, i32 351, align 32
%__2 = ptrtoint i8* %array to i32
@@ -100,7 +102,7 @@
declare void @f2(i32 %ignored)
-define void @variable_n_align_16(i32 %n) {
+define internal void @variable_n_align_16(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 16
%__2 = ptrtoint i8* %array to i32
@@ -122,7 +124,7 @@
; ARM32: sub sp, sp, r0
; ARM32: bl {{.*}} R_{{.*}} f2
-define void @variable_n_align_32(i32 %n) {
+define internal void @variable_n_align_32(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 32
%__2 = ptrtoint i8* %array to i32
@@ -156,7 +158,7 @@
; ARM32: pop {fp, lr}
; Test alloca with default (0) alignment.
-define void @align0(i32 %n) {
+define internal void @align0(i32 %n) {
entry:
%array = alloca i8, i32 %n
%__2 = ptrtoint i8* %array to i32
@@ -175,7 +177,7 @@
; Test a large alignment where a mask might not fit in an immediate
; field of an instruction for some architectures.
-define void @align1MB(i32 %n) {
+define internal void @align1MB(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 1048576
%__2 = ptrtoint i8* %array to i32
@@ -202,7 +204,7 @@
; Test a large alignment where a mask might still fit in an immediate
; field of an instruction for some architectures.
-define void @align512MB(i32 %n) {
+define internal void @align512MB(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 536870912
%__2 = ptrtoint i8* %array to i32
@@ -223,7 +225,7 @@
; ARM32: sub sp, sp, r0
; Test that a simple alloca sequence doesn't trigger a frame pointer.
-define void @fixed_no_frameptr(i32 %arg) {
+define internal void @fixed_no_frameptr(i32 %arg) {
entry:
%a1 = alloca i8, i32 8, align 4
%a2 = alloca i8, i32 12, align 4
@@ -240,7 +242,7 @@
; CHECK-NOT: mov ebp,esp
; Test that a more complex alloca sequence does trigger a frame pointer.
-define void @var_with_frameptr(i32 %arg) {
+define internal void @var_with_frameptr(i32 %arg) {
entry:
%a1 = alloca i8, i32 8, align 4
%a2 = alloca i8, i32 12, align 4
diff --git a/tests_lit/llvm2ice_tests/arith-opt.ll b/tests_lit/llvm2ice_tests/arith-opt.ll
index 66be97f..e0cb0db 100644
--- a/tests_lit/llvm2ice_tests/arith-opt.ll
+++ b/tests_lit/llvm2ice_tests/arith-opt.ll
@@ -3,10 +3,11 @@
; REQUIRES: allow_dump
-; RUN: %p2i -i %s --filetype=asm --args --verbose inst -threads=0 | FileCheck %s
+; RUN: %p2i -i %s --filetype=asm --args --verbose inst -threads=0 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
-define i32 @Add(i32 %a, i32 %b) {
-; CHECK: define i32 @Add
+define internal i32 @Add(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Add
entry:
%add = add i32 %b, %a
; CHECK: add
@@ -17,8 +18,8 @@
declare void @Use(i32)
-define i32 @And(i32 %a, i32 %b) {
-; CHECK: define i32 @And
+define internal i32 @And(i32 %a, i32 %b) {
+; CHECK: define internal i32 @And
entry:
%and = and i32 %b, %a
; CHECK: and
@@ -27,8 +28,8 @@
ret i32 %and
}
-define i32 @Or(i32 %a, i32 %b) {
-; CHECK: define i32 @Or
+define internal i32 @Or(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Or
entry:
%or = or i32 %b, %a
; CHECK: or
@@ -37,8 +38,8 @@
ret i32 %or
}
-define i32 @Xor(i32 %a, i32 %b) {
-; CHECK: define i32 @Xor
+define internal i32 @Xor(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Xor
entry:
%xor = xor i32 %b, %a
; CHECK: xor
@@ -47,8 +48,8 @@
ret i32 %xor
}
-define i32 @Sub(i32 %a, i32 %b) {
-; CHECK: define i32 @Sub
+define internal i32 @Sub(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Sub
entry:
%sub = sub i32 %a, %b
; CHECK: sub
@@ -57,8 +58,8 @@
ret i32 %sub
}
-define i32 @Mul(i32 %a, i32 %b) {
-; CHECK: define i32 @Mul
+define internal i32 @Mul(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Mul
entry:
%mul = mul i32 %b, %a
; CHECK: imul
@@ -67,8 +68,8 @@
ret i32 %mul
}
-define i32 @Sdiv(i32 %a, i32 %b) {
-; CHECK: define i32 @Sdiv
+define internal i32 @Sdiv(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Sdiv
entry:
%div = sdiv i32 %a, %b
; CHECK: cdq
@@ -78,8 +79,8 @@
ret i32 %div
}
-define i32 @Srem(i32 %a, i32 %b) {
-; CHECK: define i32 @Srem
+define internal i32 @Srem(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Srem
entry:
%rem = srem i32 %a, %b
; CHECK: cdq
@@ -89,8 +90,8 @@
ret i32 %rem
}
-define i32 @Udiv(i32 %a, i32 %b) {
-; CHECK: define i32 @Udiv
+define internal i32 @Udiv(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Udiv
entry:
%div = udiv i32 %a, %b
; CHECK: div
@@ -99,8 +100,8 @@
ret i32 %div
}
-define i32 @Urem(i32 %a, i32 %b) {
-; CHECK: define i32 @Urem
+define internal i32 @Urem(i32 %a, i32 %b) {
+; CHECK: define internal i32 @Urem
entry:
%rem = urem i32 %a, %b
; CHECK: div
@@ -111,7 +112,7 @@
; Check for a valid addressing mode in the x86-32 mul instruction when
; the second source operand is an immediate.
-define i64 @MulImm() {
+define internal i64 @MulImm() {
entry:
%mul = mul i64 3, 4
ret i64 %mul
diff --git a/tests_lit/llvm2ice_tests/arith.ll b/tests_lit/llvm2ice_tests/arith.ll
index 3027ee2..0d4a0b8 100644
--- a/tests_lit/llvm2ice_tests/arith.ll
+++ b/tests_lit/llvm2ice_tests/arith.ll
@@ -23,7 +23,7 @@
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
-define i32 @Add(i32 %a, i32 %b) {
+define internal i32 @Add(i32 %a, i32 %b) {
entry:
%add = add i32 %b, %a
ret i32 %add
@@ -33,7 +33,7 @@
; ARM32-LABEL: Add
; ARM32: add r
-define i32 @And(i32 %a, i32 %b) {
+define internal i32 @And(i32 %a, i32 %b) {
entry:
%and = and i32 %b, %a
ret i32 %and
@@ -43,7 +43,7 @@
; ARM32-LABEL: And
; ARM32: and r
-define i32 @Or(i32 %a, i32 %b) {
+define internal i32 @Or(i32 %a, i32 %b) {
entry:
%or = or i32 %b, %a
ret i32 %or
@@ -53,7 +53,7 @@
; ARM32-LABEL: Or
; ARM32: orr r
-define i32 @Xor(i32 %a, i32 %b) {
+define internal i32 @Xor(i32 %a, i32 %b) {
entry:
%xor = xor i32 %b, %a
ret i32 %xor
@@ -63,7 +63,7 @@
; ARM32-LABEL: Xor
; ARM32: eor r
-define i32 @Sub(i32 %a, i32 %b) {
+define internal i32 @Sub(i32 %a, i32 %b) {
entry:
%sub = sub i32 %a, %b
ret i32 %sub
@@ -73,7 +73,7 @@
; ARM32-LABEL: Sub
; ARM32: sub r
-define i32 @Mul(i32 %a, i32 %b) {
+define internal i32 @Mul(i32 %a, i32 %b) {
entry:
%mul = mul i32 %b, %a
ret i32 %mul
@@ -85,7 +85,7 @@
; Check for a valid ARM mul instruction where operands have to be registers.
; On the other hand x86-32 does allow an immediate.
-define i32 @MulImm(i32 %a, i32 %b) {
+define internal i32 @MulImm(i32 %a, i32 %b) {
entry:
%mul = mul i32 %a, 99
ret i32 %mul
@@ -98,7 +98,7 @@
; Check for a valid addressing mode in the x86-32 mul instruction when
; the second source operand is an immediate.
-define i64 @MulImm64(i64 %a) {
+define internal i64 @MulImm64(i64 %a) {
entry:
%mul = mul i64 %a, 99
ret i64 %mul
@@ -119,7 +119,7 @@
; ARM32: umull r
; ARM32: add r
-define i32 @Sdiv(i32 %a, i32 %b) {
+define internal i32 @Sdiv(i32 %a, i32 %b) {
entry:
%div = sdiv i32 %a, %b
ret i32 %div
@@ -138,7 +138,7 @@
; ARM32HWDIV: bne
; ARM32HWDIV: sdiv
-define i32 @SdivConst(i32 %a) {
+define internal i32 @SdivConst(i32 %a) {
entry:
%div = sdiv i32 %a, 219
ret i32 %div
@@ -154,7 +154,7 @@
; ARM32HWDIV-NOT: tst
; ARM32HWDIV: sdiv
-define i32 @Srem(i32 %a, i32 %b) {
+define internal i32 @Srem(i32 %a, i32 %b) {
entry:
%rem = srem i32 %a, %b
ret i32 %rem
@@ -173,7 +173,7 @@
; ARM32HWDIV: sdiv
; ARM32HWDIV: mls
-define i32 @Udiv(i32 %a, i32 %b) {
+define internal i32 @Udiv(i32 %a, i32 %b) {
entry:
%div = udiv i32 %a, %b
ret i32 %div
@@ -190,7 +190,7 @@
; ARM32HWDIV: bne
; ARM32HWDIV: udiv
-define i32 @Urem(i32 %a, i32 %b) {
+define internal i32 @Urem(i32 %a, i32 %b) {
entry:
%rem = urem i32 %a, %b
ret i32 %rem
@@ -213,7 +213,7 @@
@G = internal global [4 x i8] zeroinitializer, align 4
-define i32 @ShlReloc(i32 %a) {
+define internal i32 @ShlReloc(i32 %a) {
entry:
%opnd = ptrtoint [4 x i8]* @G to i32
%result = shl i32 %a, %opnd
@@ -222,7 +222,7 @@
; CHECK-LABEL: ShlReloc
; CHECK: shl {{.*}},cl
-define i32 @LshrReloc(i32 %a) {
+define internal i32 @LshrReloc(i32 %a) {
entry:
%opnd = ptrtoint [4 x i8]* @G to i32
%result = lshr i32 %a, %opnd
@@ -231,7 +231,7 @@
; CHECK-LABEL: LshrReloc
; CHECK: shr {{.*}},cl
-define i32 @AshrReloc(i32 %a) {
+define internal i32 @AshrReloc(i32 %a) {
entry:
%opnd = ptrtoint [4 x i8]* @G to i32
%result = ashr i32 %a, %opnd
diff --git a/tests_lit/llvm2ice_tests/asm-verbose.ll b/tests_lit/llvm2ice_tests/asm-verbose.ll
index 176e90a..65b8970 100644
--- a/tests_lit/llvm2ice_tests/asm-verbose.ll
+++ b/tests_lit/llvm2ice_tests/asm-verbose.ll
@@ -10,7 +10,8 @@
; RUN: %p2i --target arm32 -i %s --filetype=asm --args -O2 -asm-verbose \
; RUN: | FileCheck %s
-define i32 @single_bb(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
+define internal i32 @single_bb(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3,
+ i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
b1:
%t1 = add i32 %arg0, %arg1
%t2 = add i32 %t1, %arg2
diff --git a/tests_lit/llvm2ice_tests/bool-folding.ll b/tests_lit/llvm2ice_tests/bool-folding.ll
index 584c797..9a89200 100644
--- a/tests_lit/llvm2ice_tests/bool-folding.ll
+++ b/tests_lit/llvm2ice_tests/bool-folding.ll
@@ -2,17 +2,19 @@
; variables are combined to implicitly use flags instead of explicitly using
; stack or register variables.
-; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 | FileCheck %s
+; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; RUN: %if --need=allow_dump --need=target_ARM32 --command %p2i --filetype=asm \
; RUN: --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --need=target_ARM32 --command FileCheck %s \
; RUN: --check-prefix=ARM32
declare void @use_value(i32)
; Basic cmp/branch folding.
-define i32 @fold_cmp_br(i32 %arg1, i32 %arg2) {
+define internal i32 @fold_cmp_br(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
br i1 %cmp1, label %branch1, label %branch2
@@ -31,7 +33,7 @@
; Cmp/branch folding with intervening instructions.
-define i32 @fold_cmp_br_intervening_insts(i32 %arg1, i32 %arg2) {
+define internal i32 @fold_cmp_br_intervening_insts(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
call void @use_value(i32 %arg1)
@@ -57,7 +59,7 @@
; Cmp/branch non-folding because of live-out.
-define i32 @no_fold_cmp_br_liveout(i32 %arg1, i32 %arg2) {
+define internal i32 @no_fold_cmp_br_liveout(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
br label %next
@@ -82,7 +84,7 @@
; Cmp/branch non-folding because of extra non-whitelisted uses.
-define i32 @no_fold_cmp_br_non_whitelist(i32 %arg1, i32 %arg2) {
+define internal i32 @no_fold_cmp_br_non_whitelist(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
%result = zext i1 %cmp1 to i32
@@ -110,7 +112,7 @@
; Basic cmp/select folding.
-define i32 @fold_cmp_select(i32 %arg1, i32 %arg2) {
+define internal i32 @fold_cmp_select(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
%result = select i1 %cmp1, i32 %arg1, i32 %arg2
@@ -128,7 +130,7 @@
; 64-bit cmp/select folding.
-define i64 @fold_cmp_select_64(i64 %arg1, i64 %arg2) {
+define internal i64 @fold_cmp_select_64(i64 %arg1, i64 %arg2) {
entry:
%arg1_trunc = trunc i64 %arg1 to i32
%arg2_trunc = trunc i64 %arg2 to i32
@@ -153,7 +155,7 @@
; ARM32: bx lr
-define i64 @fold_cmp_select_64_undef(i64 %arg1) {
+define internal i64 @fold_cmp_select_64_undef(i64 %arg1) {
entry:
%arg1_trunc = trunc i64 %arg1 to i32
%cmp1 = icmp slt i32 undef, %arg1_trunc
@@ -176,7 +178,7 @@
; Cmp/select folding with intervening instructions.
-define i32 @fold_cmp_select_intervening_insts(i32 %arg1, i32 %arg2) {
+define internal i32 @fold_cmp_select_intervening_insts(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
call void @use_value(i32 %arg1)
@@ -203,7 +205,7 @@
; Cmp/multi-select folding.
-define i32 @fold_cmp_select_multi(i32 %arg1, i32 %arg2) {
+define internal i32 @fold_cmp_select_multi(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
%a = select i1 %cmp1, i32 %arg1, i32 %arg2
@@ -241,7 +243,7 @@
; Cmp/multi-select non-folding because of live-out.
-define i32 @no_fold_cmp_select_multi_liveout(i32 %arg1, i32 %arg2) {
+define internal i32 @no_fold_cmp_select_multi_liveout(i32 %arg1, i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
%a = select i1 %cmp1, i32 %arg1, i32 %arg2
@@ -282,7 +284,8 @@
; ARM32: bx lr
; Cmp/multi-select non-folding because of extra non-whitelisted uses.
-define i32 @no_fold_cmp_select_multi_non_whitelist(i32 %arg1, i32 %arg2) {
+define internal i32 @no_fold_cmp_select_multi_non_whitelist(i32 %arg1,
+ i32 %arg2) {
entry:
%cmp1 = icmp slt i32 %arg1, %arg2
%a = select i1 %cmp1, i32 %arg1, i32 %arg2
diff --git a/tests_lit/llvm2ice_tests/bool-opt.ll b/tests_lit/llvm2ice_tests/bool-opt.ll
index a574758..c7a31e0 100644
--- a/tests_lit/llvm2ice_tests/bool-opt.ll
+++ b/tests_lit/llvm2ice_tests/bool-opt.ll
@@ -1,10 +1,11 @@
; Trivial smoke test of icmp without fused branch opportunity.
-; RUN: %p2i -i %s --filetype=obj --disassemble --args | FileCheck %s
+; RUN: %p2i -i %s --filetype=obj --disassemble --args \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; Check that correct addressing modes are used for comparing two
; immediates.
-define void @testIcmpImm() {
+define internal void @testIcmpImm() {
entry:
%cmp = icmp eq i32 1, 2
%cmp_ext = zext i1 %cmp to i32
diff --git a/tests_lit/llvm2ice_tests/branch-opt.ll b/tests_lit/llvm2ice_tests/branch-opt.ll
index 24b6adb..e243018 100644
--- a/tests_lit/llvm2ice_tests/branch-opt.ll
+++ b/tests_lit/llvm2ice_tests/branch-opt.ll
@@ -2,11 +2,11 @@
; optimizations under Om1).
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -O2 \
+; RUN: --target x8632 -i %s --args -O2 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck --check-prefix=O2 %s
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -Om1 \
+; RUN: --target x8632 -i %s --args -Om1 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck --check-prefix=OM1 %s
; TODO(jvoung): Stop skipping unimplemented parts (via --skip-unimplemented)
@@ -15,12 +15,14 @@
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32O2 %s
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -Om1 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck \
; RUN: --check-prefix ARM32OM1 %s
@@ -28,7 +30,7 @@
declare void @dummy()
; An unconditional branch to the next block should be removed.
-define void @testUncondToNextBlock() {
+define internal void @testUncondToNextBlock() {
entry:
call void @dummy()
br label %next
@@ -59,7 +61,7 @@
; For a conditional branch with a fallthrough to the next block, the
; fallthrough branch should be removed.
-define void @testCondFallthroughToNextBlock(i32 %arg) {
+define internal void @testCondFallthroughToNextBlock(i32 %arg) {
entry:
%cmp = icmp sge i32 %arg, 123
br i1 %cmp, label %target, label %fallthrough
@@ -116,7 +118,7 @@
; different block as the fallthrough, the branch condition should be
; inverted, the fallthrough block changed to the target, and the
; branch to the next block removed.
-define void @testCondTargetNextBlock(i32 %arg) {
+define internal void @testCondTargetNextBlock(i32 %arg) {
entry:
%cmp = icmp sge i32 %arg, 123
br i1 %cmp, label %fallthrough, label %target
@@ -172,7 +174,7 @@
; Unconditional branches to the block after a contracted block should be
; removed.
-define void @testUncondToBlockAfterContract() {
+define internal void @testUncondToBlockAfterContract() {
entry:
call void @dummy()
br label %target
diff --git a/tests_lit/llvm2ice_tests/branch-simple.ll b/tests_lit/llvm2ice_tests/branch-simple.ll
index 5f821d8..cbe495d 100644
--- a/tests_lit/llvm2ice_tests/branch-simple.ll
+++ b/tests_lit/llvm2ice_tests/branch-simple.ll
@@ -9,7 +9,7 @@
; RUN: %p2i -i %s --args -O2 --verbose inst -threads=0 | FileCheck %s
; RUN: %p2i -i %s --args -Om1 --verbose inst -threads=0 | FileCheck %s
-define i32 @simple_cond_branch(i32 %foo, i32 %bar) {
+define internal i32 @simple_cond_branch(i32 %foo, i32 %bar) {
entry:
%r1 = icmp eq i32 %foo, %bar
br i1 %r1, label %Equal, label %Unequal
diff --git a/tests_lit/llvm2ice_tests/cmp-opt.ll b/tests_lit/llvm2ice_tests/cmp-opt.ll
index 99c2efd..fefd226 100644
--- a/tests_lit/llvm2ice_tests/cmp-opt.ll
+++ b/tests_lit/llvm2ice_tests/cmp-opt.ll
@@ -1,11 +1,11 @@
; Simple test of non-fused compare/branch.
; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
-; RUN: | FileCheck %s
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 \
-; RUN: | FileCheck --check-prefix=OPTM1 %s
+; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=OPTM1 %s
-define void @testBool(i32 %a, i32 %b) {
+define internal void @testBool(i32 %a, i32 %b) {
entry:
%cmp = icmp slt i32 %a, %b
%cmp1 = icmp sgt i32 %a, %b
diff --git a/tests_lit/llvm2ice_tests/commutativity.ll b/tests_lit/llvm2ice_tests/commutativity.ll
index e90c035..cf5c672 100644
--- a/tests_lit/llvm2ice_tests/commutativity.ll
+++ b/tests_lit/llvm2ice_tests/commutativity.ll
@@ -6,7 +6,7 @@
; RUN: --target x8632 -i %s --args -O2 \
; RUN: | %if --need=target_X8632 --command FileCheck %s
-define i32 @integerAddLeft(i32 %a, i32 %b) {
+define internal i32 @integerAddLeft(i32 %a, i32 %b) {
entry:
%tmp = add i32 %a, %b
%result = add i32 %a, %tmp
@@ -18,7 +18,7 @@
; CHECK-NEXT: add {{e..}},{{e..}}
; CHECK-NEXT: add {{e..}},{{e..}}
-define i32 @integerAddRight(i32 %a, i32 %b) {
+define internal i32 @integerAddRight(i32 %a, i32 %b) {
entry:
%tmp = add i32 %a, %b
%result = add i32 %b, %tmp
@@ -30,7 +30,7 @@
; CHECK-NEXT: add {{e..}},{{e..}}
; CHECK-NEXT: add {{e..}},{{e..}}
-define i32 @integerMultiplyLeft(i32 %a, i32 %b) {
+define internal i32 @integerMultiplyLeft(i32 %a, i32 %b) {
entry:
%tmp = mul i32 %a, %b
%result = mul i32 %a, %tmp
@@ -42,7 +42,7 @@
; CHECK-NEXT: imul {{e..}},{{e..}}
; CHECK-NEXT: imul {{e..}},{{e..}}
-define i32 @integerMultiplyRight(i32 %a, i32 %b) {
+define internal i32 @integerMultiplyRight(i32 %a, i32 %b) {
entry:
%tmp = mul i32 %a, %b
%result = mul i32 %b, %tmp
@@ -54,7 +54,7 @@
; CHECK-NEXT: imul {{e..}},{{e..}}
; CHECK-NEXT: imul {{e..}},{{e..}}
-define float @floatAddLeft(float %a, float %b) {
+define internal float @floatAddLeft(float %a, float %b) {
entry:
%tmp = fadd float %a, %b
%result = fadd float %a, %tmp
@@ -66,7 +66,7 @@
; CHECK-NEXT: addss xmm1,xmm0
; CHECK-NEXT: addss xmm0,xmm1
-define float @floatAddRight(float %a, float %b) {
+define internal float @floatAddRight(float %a, float %b) {
entry:
%tmp = fadd float %a, %b
%result = fadd float %b, %tmp
@@ -78,7 +78,7 @@
; CHECK-NEXT: addss xmm0,xmm1
; CHECK-NEXT: addss xmm1,xmm0
-define float @floatMultiplyLeft(float %a, float %b) {
+define internal float @floatMultiplyLeft(float %a, float %b) {
entry:
%tmp = fmul float %a, %b
%result = fmul float %a, %tmp
@@ -90,7 +90,7 @@
; CHECK-NEXT: mulss xmm1,xmm0
; CHECK-NEXT: mulss xmm0,xmm1
-define float @floatMultiplyRight(float %a, float %b) {
+define internal float @floatMultiplyRight(float %a, float %b) {
entry:
%tmp = fmul float %a, %b
%result = fmul float %b, %tmp
diff --git a/tests_lit/llvm2ice_tests/cond-br-same-target.ll b/tests_lit/llvm2ice_tests/cond-br-same-target.ll
index 6863b1f..f4a3876 100644
--- a/tests_lit/llvm2ice_tests/cond-br-same-target.ll
+++ b/tests_lit/llvm2ice_tests/cond-br-same-target.ll
@@ -4,7 +4,7 @@
; RUN: %p2i -i %s --insts | FileCheck %s
-define void @f(i32 %foo, i32 %bar) {
+define internal void @f(i32 %foo, i32 %bar) {
entry:
%c = icmp ult i32 %foo, %bar
br i1 %c, label %block, label %block
@@ -14,7 +14,7 @@
; Note that the branch is converted to an unconditional branch.
-; CHECK: define void @f(i32 %foo, i32 %bar) {
+; CHECK: define internal void @f(i32 %foo, i32 %bar) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %c = icmp ult i32 %foo, %bar
; CHECK-NEXT: br label %block
diff --git a/tests_lit/llvm2ice_tests/contract.ll b/tests_lit/llvm2ice_tests/contract.ll
index 59e6491..05afb46 100644
--- a/tests_lit/llvm2ice_tests/contract.ll
+++ b/tests_lit/llvm2ice_tests/contract.ll
@@ -4,7 +4,7 @@
; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
; RUN: | FileCheck %s
-define void @SimpleBranch() {
+define internal void @SimpleBranch() {
label0:
br label %label2
label1:
diff --git a/tests_lit/llvm2ice_tests/convert.ll b/tests_lit/llvm2ice_tests/convert.ll
index ecdb683..5c8e5d2 100644
--- a/tests_lit/llvm2ice_tests/convert.ll
+++ b/tests_lit/llvm2ice_tests/convert.ll
@@ -32,7 +32,7 @@
@u32v = internal global [4 x i8] zeroinitializer, align 4
@u64v = internal global [8 x i8] zeroinitializer, align 8
-define void @from_int8() {
+define internal void @from_int8() {
entry:
%__0 = bitcast [1 x i8]* @i8v to i8*
%v0 = load i8, i8* %__0, align 1
@@ -73,7 +73,7 @@
; ARM32-DAG: str r{{.*}}, [r{{[0-9]+}}]
; ARM32-DAG: str r{{.*}}, [{{.*}}, #4]
-define void @from_int16() {
+define internal void @from_int16() {
entry:
%__0 = bitcast [2 x i8]* @i16v to i16*
%v0 = load i16, i16* %__0, align 1
@@ -110,7 +110,7 @@
; ARM32: movw {{.*}}i64v
; ARM32: str r
-define void @from_int32() {
+define internal void @from_int32() {
entry:
%__0 = bitcast [4 x i8]* @i32v to i32*
%v0 = load i32, i32* %__0, align 1
@@ -143,7 +143,7 @@
; ARM32: movw {{.*}}i64v
; ARM32: str r
-define void @from_int64() {
+define internal void @from_int64() {
entry:
%__0 = bitcast [8 x i8]* @i64v to i64*
%v0 = load i64, i64* %__0, align 1
@@ -174,7 +174,7 @@
; ARM32: movw {{.*}}i32v
; ARM32: str r
-define void @from_uint8() {
+define internal void @from_uint8() {
entry:
%__0 = bitcast [1 x i8]* @u8v to i8*
%v0 = load i8, i8* %__0, align 1
@@ -213,7 +213,7 @@
; ARM32: movw {{.*}}i64v
; ARM32: str r
-define void @from_uint16() {
+define internal void @from_uint16() {
entry:
%__0 = bitcast [2 x i8]* @u16v to i16*
%v0 = load i16, i16* %__0, align 1
@@ -250,7 +250,7 @@
; ARM32: movw {{.*}}i64v
; ARM32: str r
-define void @from_uint32() {
+define internal void @from_uint32() {
entry:
%__0 = bitcast [4 x i8]* @u32v to i32*
%v0 = load i32, i32* %__0, align 1
@@ -283,7 +283,7 @@
; ARM32: movw {{.*}}i64v
; ARM32: str r
-define void @from_uint64() {
+define internal void @from_uint64() {
entry:
%__0 = bitcast [8 x i8]* @u64v to i64*
%v0 = load i64, i64* %__0, align 1
diff --git a/tests_lit/llvm2ice_tests/div_legalization.ll b/tests_lit/llvm2ice_tests/div_legalization.ll
index dec388d..b4832d1 100644
--- a/tests_lit/llvm2ice_tests/div_legalization.ll
+++ b/tests_lit/llvm2ice_tests/div_legalization.ll
@@ -4,7 +4,7 @@
; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 | FileCheck %s
; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 | FileCheck %s
-define i32 @Sdiv_const8_b(i8 %a) {
+define internal i32 @Sdiv_const8_b(i8 %a) {
; CHECK-LABEL: Sdiv_const8_b
entry:
%div = sdiv i8 %a, 12
@@ -14,7 +14,7 @@
ret i32 %div_ext
}
-define i32 @Sdiv_const16_b(i16 %a) {
+define internal i32 @Sdiv_const16_b(i16 %a) {
; CHECK-LABEL: Sdiv_const16_b
entry:
%div = sdiv i16 %a, 1234
@@ -24,7 +24,7 @@
ret i32 %div_ext
}
-define i32 @Sdiv_const32_b(i32 %a) {
+define internal i32 @Sdiv_const32_b(i32 %a) {
; CHECK-LABEL: Sdiv_const32_b
entry:
%div = sdiv i32 %a, 1234
@@ -33,7 +33,7 @@
ret i32 %div
}
-define i32 @Srem_const_b(i32 %a) {
+define internal i32 @Srem_const_b(i32 %a) {
; CHECK-LABEL: Srem_const_b
entry:
%rem = srem i32 %a, 2345
@@ -42,7 +42,7 @@
ret i32 %rem
}
-define i32 @Udiv_const_b(i32 %a) {
+define internal i32 @Udiv_const_b(i32 %a) {
; CHECK-LABEL: Udiv_const_b
entry:
%div = udiv i32 %a, 3456
@@ -51,7 +51,7 @@
ret i32 %div
}
-define i32 @Urem_const_b(i32 %a) {
+define internal i32 @Urem_const_b(i32 %a) {
; CHECK-LABEL: Urem_const_b
entry:
%rem = urem i32 %a, 4567
diff --git a/tests_lit/llvm2ice_tests/ebp_args.ll b/tests_lit/llvm2ice_tests/ebp_args.ll
index f7dd792..0b690fc 100644
--- a/tests_lit/llvm2ice_tests/ebp_args.ll
+++ b/tests_lit/llvm2ice_tests/ebp_args.ll
@@ -3,11 +3,12 @@
; adjustment was incorrectly added to the stack/frame offset for
; ebp-based frames.
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
declare i32 @memcpy_helper2(i32 %buf, i32 %buf2, i32 %n)
-define i32 @memcpy_helper(i32 %buf, i32 %n) {
+define internal i32 @memcpy_helper(i32 %buf, i32 %n) {
entry:
br label %eblock ; Disable alloca optimization
eblock:
diff --git a/tests_lit/llvm2ice_tests/elf_container.ll b/tests_lit/llvm2ice_tests/elf_container.ll
index f34baa8..4d4a12f 100644
--- a/tests_lit/llvm2ice_tests/elf_container.ll
+++ b/tests_lit/llvm2ice_tests/elf_container.ll
@@ -4,10 +4,12 @@
; For the integrated ELF writer, we can't pipe the output because we need
; to seek backward and patch up the file headers. So, use a temporary file.
; RUN: %p2i -i %s --filetype=obj --args -O2 --verbose none -o %t \
+; RUN: -allow-externally-defined-symbols \
; RUN: && llvm-readobj -file-headers -sections -section-data \
; RUN: -relocations -symbols %t | FileCheck %s
; RUN: %if --need=allow_dump --command %p2i -i %s --args -O2 --verbose none \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --command llvm-mc -triple=i686-nacl \
; RUN: -filetype=obj -o - \
; RUN: | %if --need=allow_dump --command llvm-readobj -file-headers \
@@ -16,6 +18,7 @@
; Add a run that shows relocations in code inline.
; RUN: %p2i -i %s --filetype=obj --args -O2 --verbose none -o %t \
+; RUN: -allow-externally-defined-symbols \
; RUN: && le32-nacl-objdump -w -d -r -Mintel %t \
; RUN: | FileCheck --check-prefix=TEXT-RELOCS %s
diff --git a/tests_lit/llvm2ice_tests/fp.arm.call.ll b/tests_lit/llvm2ice_tests/fp.arm.call.ll
index 425d8ed..511a9d2 100644
--- a/tests_lit/llvm2ice_tests/fp.arm.call.ll
+++ b/tests_lit/llvm2ice_tests/fp.arm.call.ll
@@ -3,11 +3,13 @@
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck %s
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble --disassemble --target arm32 \
; RUN: -i %s --args -Om1 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck %s
@@ -57,7 +59,7 @@
float %p5, float %p6, float %p7, float %p8, float %p9,
float %p10, float %p11, float %p12, float %p13,
float %p14, float %p15, float %p16, float %p17)
-define void @floatHarness() nounwind {
+define internal void @floatHarness() nounwind {
; CHECK-LABEL: floatHarness
call void @float1(float 1.0)
; CHECK-DAG: vldr s0
@@ -312,7 +314,7 @@
declare void @double10(double %p0, double %p1, double %p2, double %p3,
double %p4, double %p5, double %p6, double %p7,
double %p8, double %p9)
-define void @doubleHarness() nounwind {
+define internal void @doubleHarness() nounwind {
; CHECK-LABEL: doubleHarness
call void @double1(double 1.0)
; CHECK-DAG: vldr d0
@@ -429,7 +431,7 @@
declare void @testFDDDDDDDDFDF(float %p0, double %p1, double %p2, double %p3,
double %p4, double %p5, double %p6, double %p7,
double %p8, float %p9, double %p10, float %p11)
-define void @packsFloats() nounwind {
+define internal void @packsFloats() nounwind {
; CHECK-LABEL: packsFloats
call void @testFDF(float 1.0, double 2.0, float 3.0)
; CHECK-DAG: vldr s0
diff --git a/tests_lit/llvm2ice_tests/fp.call_ret.ll b/tests_lit/llvm2ice_tests/fp.call_ret.ll
index f81b317..b305ae9 100644
--- a/tests_lit/llvm2ice_tests/fp.call_ret.ll
+++ b/tests_lit/llvm2ice_tests/fp.call_ret.ll
@@ -4,10 +4,10 @@
; there are no special OPTM1 match lines.
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -O2 \
+; RUN: --target x8632 -i %s --args -O2 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -Om1 \
+; RUN: --target x8632 -i %s --args -Om1 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; Can't test on ARM yet. Need to use several vpush {contiguous FP regs},
diff --git a/tests_lit/llvm2ice_tests/fp.cmp.ll b/tests_lit/llvm2ice_tests/fp.cmp.ll
index 7e566bd..d41017a 100644
--- a/tests_lit/llvm2ice_tests/fp.cmp.ll
+++ b/tests_lit/llvm2ice_tests/fp.cmp.ll
@@ -3,16 +3,20 @@
; that should be present regardless of the optimization level, so
; there are no special OPTM1 match lines.
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 | FileCheck %s
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; RUN: %if --need=allow_dump --need=target_ARM32 --command %p2i --filetype=asm \
; RUN: --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --need=target_ARM32 --command FileCheck %s \
; RUN: --check-prefix=ARM32
; RUN: %if --need=allow_dump --need=target_ARM32 --command %p2i --filetype=asm \
; RUN: --target arm32 -i %s --args -Om1 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --need=target_ARM32 --command FileCheck %s \
; RUN: --check-prefix=ARM32
diff --git a/tests_lit/llvm2ice_tests/fp_const_pool.ll b/tests_lit/llvm2ice_tests/fp_const_pool.ll
index c1fa9df..587e967 100644
--- a/tests_lit/llvm2ice_tests/fp_const_pool.ll
+++ b/tests_lit/llvm2ice_tests/fp_const_pool.ll
@@ -5,15 +5,15 @@
; REQUIRES: allow_dump
-define void @consume_float(float %f) {
+define internal void @consume_float(float %f) {
ret void
}
-define void @consume_double(double %d) {
+define internal void @consume_double(double %d) {
ret void
}
-define void @test_zeros() {
+define internal void @test_zeros() {
entry:
call void @consume_float(float 0.0)
call void @consume_float(float -0.0)
@@ -34,7 +34,7 @@
; ZERO-NEXT: call void @consume_double(double -0.0
-define void @test_nans() {
+define internal void @test_nans() {
entry:
call void @consume_float(float 0x7FF8000000000000)
call void @consume_float(float 0x7FF8000000000000)
diff --git a/tests_lit/llvm2ice_tests/fpcall.ll b/tests_lit/llvm2ice_tests/fpcall.ll
index 969ca8d..07d1830 100644
--- a/tests_lit/llvm2ice_tests/fpcall.ll
+++ b/tests_lit/llvm2ice_tests/fpcall.ll
@@ -3,8 +3,10 @@
; particular, the top-of-stack must be popped regardless of whether
; its value is used.
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 | FileCheck %s
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -Om1 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
declare float @dummy()
diff --git a/tests_lit/llvm2ice_tests/fpconst.pnacl.ll b/tests_lit/llvm2ice_tests/fpconst.pnacl.ll
index 80c6f9f..d8048d7 100644
--- a/tests_lit/llvm2ice_tests/fpconst.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/fpconst.pnacl.ll
@@ -7,15 +7,19 @@
; http://llvm.org/docs/LangRef.html#simple-constants .
; RUN: %p2i --assemble --disassemble --filetype=obj --dis-flags=-s \
-; RUN: -i %s --args -O2 --verbose none | FileCheck %s
+; RUN: -i %s --args -O2 --verbose none -allow-externally-defined-symbols \
+; RUN: | FileCheck %s
; RUN: %p2i --assemble --disassemble --filetype=obj --dis-flags=-s \
-; RUN: -i %s --args -Om1 --verbose none | FileCheck %s
+; RUN: -i %s --args -Om1 --verbose none -allow-externally-defined-symbols \
+; RUN: | FileCheck %s
; RUN: %if --need allow_dump --command %p2i --assemble --disassemble \
; RUN: --dis-flags=-s -i %s --args -O2 --verbose none \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need allow_dump --command FileCheck %s
; RUN: %if --need allow_dump --command %p2i --assemble --disassemble \
; RUN: --dis-flags=-s -i %s --args -Om1 --verbose none \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need allow_dump --command FileCheck %s
@__init_array_start = internal constant [0 x i8] zeroinitializer, align 4
diff --git a/tests_lit/llvm2ice_tests/function_aligned.ll b/tests_lit/llvm2ice_tests/function_aligned.ll
index 5093d85..10e3aac 100644
--- a/tests_lit/llvm2ice_tests/function_aligned.ll
+++ b/tests_lit/llvm2ice_tests/function_aligned.ll
@@ -19,7 +19,7 @@
; RUN: | %if --need=target_MIPS32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix MIPS32 %s
-define void @foo() {
+define internal void @foo() {
ret void
}
; CHECK-LABEL: foo
@@ -34,7 +34,7 @@
; MIPS32: 0: {{.*}} jr ra
; MIPS32-NEXT: 4: {{.*}} nop
-define void @bar() {
+define internal void @bar() {
ret void
}
; CHECK-LABEL: bar
diff --git a/tests_lit/llvm2ice_tests/globalinit.pnacl.ll b/tests_lit/llvm2ice_tests/globalinit.pnacl.ll
index b2c0908..ed87740 100644
--- a/tests_lit/llvm2ice_tests/globalinit.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/globalinit.pnacl.ll
@@ -4,25 +4,29 @@
; Test initializers with -filetype=asm.
; RUN: %if --need=target_X8632 --command %p2i --filetype=asm --target x8632 \
-; RUN: -i %s --args -O2 | %if --need=target_X8632 --command FileCheck %s
+; RUN: -i %s --args -O2 -allow-externally-defined-symbols \
+; RUN: | %if --need=target_X8632 --command FileCheck %s
; RUN: %if --need=target_ARM32 --command %p2i --filetype=asm --target arm32 \
; RUN: -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --command FileCheck %s
; Test instructions for materializing addresses.
; RUN: %if --need=target_X8632 --command %p2i --filetype=asm --target x8632 \
-; RUN: -i %s --args -O2 \
+; RUN: -i %s --args -O2 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s --check-prefix=X8632
; Test instructions with -filetype=obj and try to cross reference instructions
; w/ the symbol table.
; RUN: %if --need=target_X8632 --command %p2i --assemble --disassemble \
; RUN: --target x8632 -i %s --args --verbose none \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck --check-prefix=IAS %s
; RUN: %if --need=target_X8632 --command %p2i --assemble --disassemble \
; RUN: --dis-flags=-t --target x8632 -i %s --args --verbose none \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck --check-prefix=SYMTAB %s
; This is not really IAS, but we can switch when that is implemented.
@@ -30,12 +34,14 @@
; RUN: %if --need=target_ARM32 --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s \
; RUN: --args --verbose none --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --command FileCheck \
; RUN: --check-prefix=IASARM32 %s
; RUN: %if --need=target_ARM32 --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --dis-flags=-t --target arm32 -i %s \
; RUN: --args --verbose none --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --command FileCheck --check-prefix=SYMTAB %s
define internal i32 @main(i32 %argc, i32 %argv) {
diff --git a/tests_lit/llvm2ice_tests/globalrelocs.ll b/tests_lit/llvm2ice_tests/globalrelocs.ll
index 0d163e6..ceb22a1 100644
--- a/tests_lit/llvm2ice_tests/globalrelocs.ll
+++ b/tests_lit/llvm2ice_tests/globalrelocs.ll
@@ -258,9 +258,9 @@
; CHECK: .zero 2
; CHECK: .size short, 2
-define void @func() {
+define internal void @func() {
ret void
}
-; DUMP: define void @func() {
+; DUMP: define internal void @func() {
diff --git a/tests_lit/llvm2ice_tests/ias-multi-reloc.ll b/tests_lit/llvm2ice_tests/ias-multi-reloc.ll
index 11a708e..0fedb1e 100644
--- a/tests_lit/llvm2ice_tests/ias-multi-reloc.ll
+++ b/tests_lit/llvm2ice_tests/ias-multi-reloc.ll
@@ -2,6 +2,7 @@
; relocations.
; RUN: %if --need=allow_dump --command %p2i -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --command FileCheck %s
; char global_char;
diff --git a/tests_lit/llvm2ice_tests/int-arg.ll b/tests_lit/llvm2ice_tests/int-arg.ll
index 76c2abe..c59f0c8 100644
--- a/tests_lit/llvm2ice_tests/int-arg.ll
+++ b/tests_lit/llvm2ice_tests/int-arg.ll
@@ -2,7 +2,7 @@
; calling convention for integers.
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -O2 \
+; RUN: --target x8632 -i %s --args -O2 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; TODO(jvoung): Stop skipping unimplemented parts (via --skip-unimplemented)
@@ -11,6 +11,7 @@
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
@@ -21,7 +22,7 @@
; i32
-define i32 @test_returning32_arg0(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
+define internal i32 @test_returning32_arg0(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
entry:
ret i32 %arg0
}
@@ -31,7 +32,7 @@
; ARM32-LABEL: test_returning32_arg0
; ARM32-NEXT: bx lr
-define i32 @test_returning32_arg1(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
+define internal i32 @test_returning32_arg1(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
entry:
ret i32 %arg1
}
@@ -43,7 +44,7 @@
; ARM32-NEXT: bx lr
-define i32 @test_returning32_arg2(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
+define internal i32 @test_returning32_arg2(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
entry:
ret i32 %arg2
}
@@ -55,7 +56,7 @@
; ARM32-NEXT: bx lr
-define i32 @test_returning32_arg3(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
+define internal i32 @test_returning32_arg3(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
entry:
ret i32 %arg3
}
@@ -67,7 +68,7 @@
; ARM32-NEXT: bx lr
-define i32 @test_returning32_arg4(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
+define internal i32 @test_returning32_arg4(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
entry:
ret i32 %arg4
}
@@ -79,7 +80,7 @@
; ARM32-NEXT: bx lr
-define i32 @test_returning32_arg5(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
+define internal i32 @test_returning32_arg5(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7) {
entry:
ret i32 %arg5
}
@@ -92,7 +93,7 @@
; i64
-define i64 @test_returning64_arg0(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
+define internal i64 @test_returning64_arg0(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
entry:
ret i64 %arg0
}
@@ -103,7 +104,7 @@
; ARM32-LABEL: test_returning64_arg0
; ARM32-NEXT: bx lr
-define i64 @test_returning64_arg1(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
+define internal i64 @test_returning64_arg1(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
entry:
ret i64 %arg1
}
@@ -116,7 +117,7 @@
; ARM32-NEXT: mov r1, r3
; ARM32-NEXT: bx lr
-define i64 @test_returning64_arg2(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
+define internal i64 @test_returning64_arg2(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
entry:
ret i64 %arg2
}
@@ -130,7 +131,7 @@
; ARM32-NEXT: ldr r1, [sp, #4]
; ARM32-NEXT: bx lr
-define i64 @test_returning64_arg3(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
+define internal i64 @test_returning64_arg3(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3) {
entry:
ret i64 %arg3
}
@@ -146,7 +147,7 @@
; Test that on ARM, the i64 arguments start with an even register.
-define i64 @test_returning64_even_arg1(i32 %arg0, i64 %arg1, i64 %arg2) {
+define internal i64 @test_returning64_even_arg1(i32 %arg0, i64 %arg1, i64 %arg2) {
entry:
ret i64 %arg1
}
@@ -160,7 +161,7 @@
; ARM32-NEXT: mov r1, r3
; ARM32-NEXT: bx lr
-define i64 @test_returning64_even_arg1b(i32 %arg0, i32 %arg0b, i64 %arg1, i64 %arg2) {
+define internal i64 @test_returning64_even_arg1b(i32 %arg0, i32 %arg0b, i64 %arg1, i64 %arg2) {
entry:
ret i64 %arg1
}
@@ -173,7 +174,7 @@
; ARM32-NEXT: mov r1, r3
; ARM32-NEXT: bx lr
-define i64 @test_returning64_even_arg2(i64 %arg0, i32 %arg1, i64 %arg2) {
+define internal i64 @test_returning64_even_arg2(i64 %arg0, i32 %arg1, i64 %arg2) {
entry:
ret i64 %arg2
}
@@ -187,7 +188,7 @@
; ARM32-DAG: ldr r1, [sp, #4]
; ARM32-NEXT: bx lr
-define i64 @test_returning64_even_arg2b(i64 %arg0, i32 %arg1, i32 %arg1b, i64 %arg2) {
+define internal i64 @test_returning64_even_arg2b(i64 %arg0, i32 %arg1, i32 %arg1b, i64 %arg2) {
entry:
ret i64 %arg2
}
@@ -200,7 +201,7 @@
; ARM32-NEXT: ldr r1, [sp, #4]
; ARM32-NEXT: bx lr
-define i32 @test_returning32_even_arg2(i64 %arg0, i32 %arg1, i32 %arg2) {
+define internal i32 @test_returning32_even_arg2(i64 %arg0, i32 %arg1, i32 %arg2) {
entry:
ret i32 %arg2
}
@@ -211,7 +212,7 @@
; ARM32-NEXT: mov r0, r3
; ARM32-NEXT: bx lr
-define i32 @test_returning32_even_arg2b(i32 %arg0, i32 %arg1, i32 %arg2, i64 %arg3) {
+define internal i32 @test_returning32_even_arg2b(i32 %arg0, i32 %arg1, i32 %arg2, i64 %arg3) {
entry:
ret i32 %arg2
}
@@ -224,7 +225,7 @@
; The i64 won't fit in a pair of register, and consumes the last register so a
; following i32 can't use that free register.
-define i32 @test_returning32_even_arg4(i32 %arg0, i32 %arg1, i32 %arg2, i64 %arg3, i32 %arg4) {
+define internal i32 @test_returning32_even_arg4(i32 %arg0, i32 %arg1, i32 %arg2, i64 %arg3, i32 %arg4) {
entry:
ret i32 %arg4
}
@@ -244,7 +245,7 @@
declare void @killRegisters()
-define void @test_passing_integers(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6) {
+define internal void @test_passing_integers(i32 %arg0, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6) {
call void @killRegisters()
call void @IntArgs(i32 %arg6, i32 %arg5, i32 %arg4, i32 %arg3, i32 %arg2, i32 %arg1)
ret void
diff --git a/tests_lit/llvm2ice_tests/invalid.test b/tests_lit/llvm2ice_tests/invalid.test
index 8cca520..14413b7 100644
--- a/tests_lit/llvm2ice_tests/invalid.test
+++ b/tests_lit/llvm2ice_tests/invalid.test
@@ -7,6 +7,7 @@
; REQUIRES: no_minimal_build
RUN: %p2i --expect-fail --tbc -i %p/Input/no-terminator-inst.tbc --insts \
+RUN: --args -allow-externally-defined-symbols \
RUN: | FileCheck --check-prefix=NO-TERM-INST %s
; NO-TERM-INST: Last instruction in function not terminator
diff --git a/tests_lit/llvm2ice_tests/large_stack_offs.ll b/tests_lit/llvm2ice_tests/large_stack_offs.ll
index 6fba4df..7818336 100644
--- a/tests_lit/llvm2ice_tests/large_stack_offs.ll
+++ b/tests_lit/llvm2ice_tests/large_stack_offs.ll
@@ -8,6 +8,7 @@
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble --disassemble --target arm32 \
; RUN: -i %s --args -Om1 --skip-unimplemented --test-stack-extra 4096 \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
diff --git a/tests_lit/llvm2ice_tests/load.ll b/tests_lit/llvm2ice_tests/load.ll
index 7b88fb3..33520ab 100644
--- a/tests_lit/llvm2ice_tests/load.ll
+++ b/tests_lit/llvm2ice_tests/load.ll
@@ -4,7 +4,7 @@
; RUN: %p2i -i %s --args --verbose inst -threads=0 | FileCheck %s
-define void @load_i64(i32 %addr_arg) {
+define internal void @load_i64(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i64*
%iv = load i64, i64* %__1, align 1
@@ -16,7 +16,7 @@
; CHECK-NEXT: ret void
}
-define void @load_i32(i32 %addr_arg) {
+define internal void @load_i32(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i32*
%iv = load i32, i32* %__1, align 1
@@ -28,7 +28,7 @@
; CHECK-NEXT: ret void
}
-define void @load_i16(i32 %addr_arg) {
+define internal void @load_i16(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i16*
%iv = load i16, i16* %__1, align 1
@@ -40,7 +40,7 @@
; CHECK-NEXT: ret void
}
-define void @load_i8(i32 %addr_arg) {
+define internal void @load_i8(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i8*
%iv = load i8, i8* %__1, align 1
diff --git a/tests_lit/llvm2ice_tests/loop-nest-depth.ll b/tests_lit/llvm2ice_tests/loop-nest-depth.ll
index 16a017e..8baf435 100644
--- a/tests_lit/llvm2ice_tests/loop-nest-depth.ll
+++ b/tests_lit/llvm2ice_tests/loop-nest-depth.ll
@@ -6,7 +6,7 @@
; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 --verbose=loop \
; RUN: --threads=0 | FileCheck %s
-define void @test_single_loop(i32 %a32) {
+define internal void @test_single_loop(i32 %a32) {
entry:
%a = trunc i32 %a32 to i1
br label %loop0
@@ -31,7 +31,7 @@
; CHECK-NEXT: LoopNestDepth = 0
; CHECK-LABEL: Before RMW
-define void @test_single_loop_with_continue(i32 %a32, i32 %b32) {
+define internal void @test_single_loop_with_continue(i32 %a32, i32 %b32) {
entry:
%a = trunc i32 %a32 to i1
%b = trunc i32 %b32 to i1
@@ -61,7 +61,7 @@
; CHECK-NEXT: LoopNestDepth = 0
; CHECK-LABEL: Before RMW
-define void @test_multiple_exits(i32 %a32, i32 %b32) {
+define internal void @test_multiple_exits(i32 %a32, i32 %b32) {
entry:
%a = trunc i32 %a32 to i1
%b = trunc i32 %b32 to i1
@@ -91,7 +91,7 @@
; CHECK-NEXT: LoopNestDepth = 0
; CHECK-LABEL: Before RMW
-define void @test_two_nested_loops(i32 %a32, i32 %b32) {
+define internal void @test_two_nested_loops(i32 %a32, i32 %b32) {
entry:
%a = trunc i32 %a32 to i1
%b = trunc i32 %b32 to i1
@@ -125,7 +125,8 @@
; CHECK-NEXT: LoopNestDepth = 0
; CHECK-LABEL: Before RMW
-define void @test_two_nested_loops_with_continue(i32 %a32, i32 %b32, i32 %c32) {
+define internal void @test_two_nested_loops_with_continue(i32 %a32, i32 %b32,
+ i32 %c32) {
entry:
%a = trunc i32 %a32 to i1
%b = trunc i32 %b32 to i1
@@ -164,7 +165,7 @@
; CHECK-NEXT: LoopNestDepth = 0
; CHECK-LABEL: Before RMW
-define void @test_multiple_nested_loops(i32 %a32, i32 %b32) {
+define internal void @test_multiple_nested_loops(i32 %a32, i32 %b32) {
entry:
%a = trunc i32 %a32 to i1
%b = trunc i32 %b32 to i1
@@ -210,7 +211,7 @@
; CHECK-NEXT: LoopNestDepth = 0
; CHECK-LABEL: Before RMW
-define void @test_three_nested_loops(i32 %a32, i32 %b32, i32 %c32) {
+define internal void @test_three_nested_loops(i32 %a32, i32 %b32, i32 %c32) {
entry:
%a = trunc i32 %a32 to i1
%b = trunc i32 %b32 to i1
@@ -253,7 +254,7 @@
; CHECK-NEXT: LoopNestDepth = 0
; CHECK-LABEL: Before RMW
-define void @test_diamond(i32 %a32) {
+define internal void @test_diamond(i32 %a32) {
entry:
%a = trunc i32 %a32 to i1
br i1 %a, label %left, label %right
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-cmpxchg-optimization.ll b/tests_lit/llvm2ice_tests/nacl-atomic-cmpxchg-optimization.ll
index 7646c10..3e2e743 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-cmpxchg-optimization.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-cmpxchg-optimization.ll
@@ -1,9 +1,9 @@
; This tests the optimization of atomic cmpxchg w/ following cmp + branches.
; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
-; RUN: | FileCheck --check-prefix=O2 %s
+; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=O2 %s
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
-; RUN: | FileCheck --check-prefix=OM1 %s
+; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=OM1 %s
declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32)
@@ -14,7 +14,8 @@
; (Or if we had other means to detect the only use).
declare void @use_value(i32)
-define i32 @test_atomic_cmpxchg_loop(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_loop(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
br label %loop
@@ -45,7 +46,8 @@
; OM1: call
; Still works if the compare operands are flipped.
-define i32 @test_atomic_cmpxchg_loop2(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_loop2(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
br label %loop
@@ -67,7 +69,7 @@
; Still works if the compare operands are constants.
-define i32 @test_atomic_cmpxchg_loop_const(i32 %iptr, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_loop_const(i32 %iptr, i32 %desired) {
entry:
br label %loop
@@ -88,7 +90,8 @@
; This is a case where the flags cannot be reused (compare is for some
; other condition).
-define i32 @test_atomic_cmpxchg_no_opt(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_no_opt(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
br label %loop
@@ -110,7 +113,8 @@
; Another case where the flags cannot be reused (the comparison result
; is used somewhere else).
-define i32 @test_atomic_cmpxchg_no_opt2(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_no_opt2(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
br label %loop
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll b/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll
index 4f8ea1b..5d0a9bb 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll
@@ -23,7 +23,7 @@
;;; Check unexpected memory order parameter (release=4 and acq_rel=5
;;; are disallowed).
-define i32 @error_atomic_load_8(i32 %iptr) {
+define internal i32 @error_atomic_load_8(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 0)
@@ -32,7 +32,7 @@
}
; CHECK: Unexpected memory ordering for AtomicLoad
-define i32 @error_atomic_load_16(i32 %iptr) {
+define internal i32 @error_atomic_load_16(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i16*
%i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 4)
@@ -41,7 +41,7 @@
}
; CHECK: Unexpected memory ordering for AtomicLoad
-define i64 @error_atomic_load_64(i32 %iptr) {
+define internal i64 @error_atomic_load_64(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 5)
@@ -53,7 +53,7 @@
;;; Store
;;; consume=2, acquire=3, acq_rel=5 are disallowed
-define void @error_atomic_store_32(i32 %iptr, i32 %v) {
+define internal void @error_atomic_store_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 2)
@@ -61,7 +61,7 @@
}
; CHECK: Unexpected memory ordering for AtomicStore
-define void @error_atomic_store_64(i32 %iptr, i64 %v) {
+define internal void @error_atomic_store_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 3)
@@ -69,7 +69,7 @@
}
; CHECK: Unexpected memory ordering for AtomicStore
-define void @error_atomic_store_64_const(i32 %iptr) {
+define internal void @error_atomic_store_64_const(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 5)
@@ -81,7 +81,7 @@
;;; Test atomic memory order and operation.
;;; Modes 3:6 allowed.
-define i32 @error_atomic_rmw_add_8(i32 %iptr, i32 %v) {
+define internal i32 @error_atomic_rmw_add_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -91,7 +91,7 @@
}
; CHECK: Unexpected memory ordering for AtomicRMW
-define i64 @error_atomic_rmw_add_64(i32 %iptr, i64 %v) {
+define internal i64 @error_atomic_rmw_add_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 7)
@@ -99,7 +99,7 @@
}
; CHECK: Unexpected memory ordering for AtomicRMW
-define i32 @error_atomic_rmw_add_16(i32 %iptr, i32 %v) {
+define internal i32 @error_atomic_rmw_add_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -109,7 +109,7 @@
}
; CHECK: Unknown AtomicRMW operation
-define i32 @error_atomic_rmw_add_32(i32 %iptr, i32 %v) {
+define internal i32 @error_atomic_rmw_add_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 7, i32* %ptr, i32 %v, i32 6)
@@ -117,7 +117,7 @@
}
; CHECK: Unknown AtomicRMW operation
-define i32 @error_atomic_rmw_add_32_max(i32 %iptr, i32 %v) {
+define internal i32 @error_atomic_rmw_add_32_max(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4294967295, i32* %ptr, i32 %v, i32 6)
@@ -127,7 +127,8 @@
;;; Cmpxchg
-define i32 @error_atomic_cmpxchg_32_success(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @error_atomic_cmpxchg_32_success(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
@@ -136,7 +137,8 @@
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
-define i32 @error_atomic_cmpxchg_32_failure(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @error_atomic_cmpxchg_32_failure(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
@@ -145,7 +147,8 @@
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
-define i64 @error_atomic_cmpxchg_64_failure(i32 %iptr, i64 %expected, i64 %desired) {
+define internal i64 @error_atomic_cmpxchg_64_failure(i32 %iptr, i64 %expected,
+ i64 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
@@ -156,14 +159,14 @@
;;; Fence and is-lock-free.
-define void @error_atomic_fence() {
+define internal void @error_atomic_fence() {
entry:
call void @llvm.nacl.atomic.fence(i32 0)
ret void
}
; CHECK: Unexpected memory ordering for AtomicFence
-define i32 @error_atomic_is_lock_free_var(i32 %iptr, i32 %bs) {
+define internal i32 @error_atomic_is_lock_free_var(i32 %iptr, i32 %bs) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i1 @llvm.nacl.atomic.is.lock.free(i32 %bs, i8* %ptr)
@@ -175,7 +178,7 @@
;;; Test bad non-constant memory ordering values.
-define i32 @error_atomic_load_8_nonconst(i32 %iptr) {
+define internal i32 @error_atomic_load_8_nonconst(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 %iptr)
@@ -184,7 +187,7 @@
}
; CHECK: Unexpected memory ordering for AtomicLoad
-define void @error_atomic_store_32_nonconst(i32 %iptr, i32 %v) {
+define internal void @error_atomic_store_32_nonconst(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 %v)
@@ -192,7 +195,7 @@
}
; CHECK: Unexpected memory ordering for AtomicStore
-define i32 @error_atomic_rmw_add_8_nonconst(i32 %iptr, i32 %v) {
+define internal i32 @error_atomic_rmw_add_8_nonconst(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -202,7 +205,8 @@
}
; CHECK: Unexpected memory ordering for AtomicRMW
-define i32 @error_atomic_cmpxchg_32_success_nonconst_1(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @error_atomic_cmpxchg_32_success_nonconst_1(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
@@ -211,7 +215,8 @@
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
-define i32 @error_atomic_cmpxchg_32_success_nonconst_2(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @error_atomic_cmpxchg_32_success_nonconst_2(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
@@ -220,7 +225,7 @@
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
-define void @error_atomic_fence_nonconst(i32 %v) {
+define internal void @error_atomic_fence_nonconst(i32 %v) {
entry:
call void @llvm.nacl.atomic.fence(i32 %v)
ret void
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
index 01e2048..ac65d37 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
@@ -14,7 +14,7 @@
@g32_c = internal global [4 x i8] zeroinitializer, align 4
@g32_d = internal global [4 x i8] zeroinitializer, align 4
-define i32 @test_fused_load_sub_a() {
+define internal i32 @test_fused_load_sub_a() {
entry:
%p_alloca = alloca i8, i32 4, align 4
%p_alloca_bc = bitcast i8* %p_alloca to i32*
@@ -54,7 +54,7 @@
; CHECK: mov DWORD PTR
; Test with the fence moved up a bit.
-define i32 @test_fused_load_sub_b() {
+define internal i32 @test_fused_load_sub_b() {
entry:
%p_alloca = alloca i8, i32 4, align 4
%p_alloca_bc = bitcast i8* %p_alloca to i32*
@@ -95,7 +95,7 @@
; CHECK: mov DWORD PTR
; Test with the fence splitting a load/sub.
-define i32 @test_fused_load_sub_c() {
+define internal i32 @test_fused_load_sub_c() {
entry:
%p_alloca = alloca i8, i32 4, align 4
%p_alloca_bc = bitcast i8* %p_alloca to i32*
@@ -141,7 +141,7 @@
; Test where a bunch of i8 loads could have been fused into one
; i32 load, but a fence blocks that.
-define i32 @could_have_fused_loads() {
+define internal i32 @could_have_fused_loads() {
entry:
%ptr1 = bitcast [4 x i8]* @g32_d to i8*
%b1 = load i8, i8* %ptr1, align 1
@@ -183,7 +183,7 @@
; Test where an identical load from two branches could have been hoisted
; up, and then the code merged, but a fence prevents it.
-define i32 @could_have_hoisted_loads(i32 %x) {
+define internal i32 @could_have_hoisted_loads(i32 %x) {
entry:
%ptr = bitcast [4 x i8]* @g32_d to i32*
%cmp = icmp eq i32 %x, 1
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll b/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll
index 30a08b2..5c745be 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll
@@ -2,24 +2,27 @@
; size allowed.
; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
-; RUN: | FileCheck %s
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
-; RUN: | FileCheck --check-prefix=O2 %s
+; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=O2 %s
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
-; RUN: | FileCheck %s
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; RUN: %if --need=allow_dump --need=target_ARM32 --command %p2i --filetype=asm \
; RUN: --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --need=target_ARM32 --command FileCheck %s \
; RUN: --check-prefix=ARM32
; RUN: %if --need=allow_dump --need=target_ARM32 --command %p2i --filetype=asm \
; RUN: --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --need=target_ARM32 --command FileCheck %s \
; RUN: --check-prefix=ARM32O2
; RUN: %if --need=allow_dump --need=target_ARM32 --command %p2i --filetype=asm \
; RUN: --target arm32 -i %s --args -Om1 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_dump --need=target_ARM32 --command FileCheck %s \
; RUN: --check-prefix=ARM32
@@ -58,7 +61,7 @@
; x86 guarantees load/store to be atomic if naturally aligned.
; The PNaCl IR requires all atomic accesses to be naturally aligned.
-define i32 @test_atomic_load_8(i32 %iptr) {
+define internal i32 @test_atomic_load_8(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
; parameter value "6" is for the sequential consistency memory order.
@@ -74,7 +77,7 @@
; ARM32: ldrb r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define i32 @test_atomic_load_16(i32 %iptr) {
+define internal i32 @test_atomic_load_16(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i16*
%i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6)
@@ -89,7 +92,7 @@
; ARM32: ldrh r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define i32 @test_atomic_load_32(i32 %iptr) {
+define internal i32 @test_atomic_load_32(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
@@ -102,7 +105,7 @@
; ARM32: ldr r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define i64 @test_atomic_load_64(i32 %iptr) {
+define internal i64 @test_atomic_load_64(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
@@ -115,7 +118,7 @@
; ARM32: ldrexd r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define i32 @test_atomic_load_32_with_arith(i32 %iptr) {
+define internal i32 @test_atomic_load_32_with_arith(i32 %iptr) {
entry:
br label %next
@@ -137,7 +140,7 @@
; ARM32: ldr r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define i32 @test_atomic_load_32_ignored(i32 %iptr) {
+define internal i32 @test_atomic_load_32_ignored(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
@@ -153,7 +156,7 @@
; ARM32: ldr r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define i64 @test_atomic_load_64_ignored(i32 %iptr) {
+define internal i64 @test_atomic_load_64_ignored(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%ignored = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
@@ -168,7 +171,7 @@
;;; Store
-define void @test_atomic_store_8(i32 %iptr, i32 %v) {
+define internal void @test_atomic_store_8(i32 %iptr, i32 %v) {
entry:
%truncv = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -183,7 +186,7 @@
; ARM32: strb r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define void @test_atomic_store_16(i32 %iptr, i32 %v) {
+define internal void @test_atomic_store_16(i32 %iptr, i32 %v) {
entry:
%truncv = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -198,7 +201,7 @@
; ARM32: strh r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define void @test_atomic_store_32(i32 %iptr, i32 %v) {
+define internal void @test_atomic_store_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6)
@@ -212,7 +215,7 @@
; ARM32: str r{{[0-9]+}}, [r{{[0-9]+}}
; ARM32: dmb
-define void @test_atomic_store_64(i32 %iptr, i64 %v) {
+define internal void @test_atomic_store_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6)
@@ -230,7 +233,7 @@
; ARM32: bne
; ARM32: dmb
-define void @test_atomic_store_64_const(i32 %iptr) {
+define internal void @test_atomic_store_64_const(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6)
@@ -258,7 +261,7 @@
;; add
-define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -278,7 +281,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -297,7 +300,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
@@ -314,7 +317,7 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
@@ -345,7 +348,7 @@
; ARM32: dmb
; Same test as above, but with a global address to test FakeUse issues.
-define i64 @test_atomic_rmw_add_64_global(i64 %v) {
+define internal i64 @test_atomic_rmw_add_64_global(i64 %v) {
entry:
%ptr = bitcast [8 x i8]* @Global64 to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
@@ -365,7 +368,7 @@
; used to manage the stack frame, so it cannot be used as a register either.
declare void @use_ptr(i32 %iptr)
-define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) {
entry:
br label %eblock ; Disable alloca optimization
eblock:
@@ -402,7 +405,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
@@ -422,7 +425,7 @@
; Atomic RMW 64 needs to be expanded into its own loop.
; Make sure that works w/ non-trivial function bodies.
-define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) {
entry:
%x = icmp ult i64 %v, 100
br i1 %x, label %err, label %loop
@@ -462,7 +465,7 @@
;; sub
-define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -482,7 +485,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -502,7 +505,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
@@ -520,7 +523,7 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6)
@@ -545,7 +548,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
@@ -565,7 +568,7 @@
;; or
-define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -589,7 +592,7 @@
; ARM32: dmb
; Same test as above, but with a global address to test FakeUse issues.
-define i32 @test_atomic_rmw_or_8_global(i32 %v) {
+define internal i32 @test_atomic_rmw_or_8_global(i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = bitcast [1 x i8]* @Global8 to i8*
@@ -608,7 +611,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -630,7 +633,7 @@
; ARM32: dmb
; Same test as above, but with a global address to test FakeUse issues.
-define i32 @test_atomic_rmw_or_16_global(i32 %v) {
+define internal i32 @test_atomic_rmw_or_16_global(i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = bitcast [2 x i8]* @Global16 to i16*
@@ -649,7 +652,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
@@ -669,7 +672,7 @@
; ARM32: dmb
; Same test as above, but with a global address to test FakeUse issues.
-define i32 @test_atomic_rmw_or_32_global(i32 %v) {
+define internal i32 @test_atomic_rmw_or_32_global(i32 %v) {
entry:
%ptr = bitcast [4 x i8]* @Global32 to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
@@ -686,7 +689,7 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6)
@@ -711,7 +714,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
@@ -735,7 +738,7 @@
;; and
-define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -756,7 +759,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -777,7 +780,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
@@ -796,7 +799,7 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6)
@@ -821,7 +824,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
@@ -843,7 +846,7 @@
;; xor
-define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -864,7 +867,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -885,7 +888,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
@@ -904,7 +907,7 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6)
@@ -929,7 +932,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
@@ -950,7 +953,7 @@
;; exchange
-define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -968,7 +971,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -986,7 +989,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
@@ -1002,7 +1005,7 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6)
@@ -1024,7 +1027,7 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
@@ -1044,7 +1047,8 @@
;;;; Cmpxchg
-define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%trunc_exp = trunc i32 %expected to i8
%trunc_des = trunc i32 %desired to i8
@@ -1069,7 +1073,8 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%trunc_exp = trunc i32 %expected to i16
%trunc_des = trunc i32 %desired to i16
@@ -1092,7 +1097,8 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
@@ -1112,7 +1118,8 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
+define internal i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected,
+ i64 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
@@ -1141,7 +1148,7 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_cmpxchg_64_undef(i32 %iptr, i64 %desired) {
+define internal i64 @test_atomic_cmpxchg_64_undef(i32 %iptr, i64 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 undef,
@@ -1165,7 +1172,8 @@
; ARM32: dmb
; Test a case where %old really does need to be copied out of edx:eax.
-define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expected, i64 %desired) {
+define internal void @test_atomic_cmpxchg_64_store(
+ i32 %ret_iptr, i32 %iptr, i64 %expected, i64 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
@@ -1199,7 +1207,8 @@
; Test with some more register pressure. When we have an alloca, ebp is
; used to manage the stack frame, so it cannot be used as a register either.
-define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired) {
+define internal i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected,
+ i64 %desired) {
entry:
br label %eblock ; Disable alloca optimization
eblock:
@@ -1240,7 +1249,8 @@
; ARM32: bne
; ARM32: dmb
-define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected,
+ i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
@@ -1260,7 +1270,8 @@
; ARM32: bne
; ARM32: dmb
-define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desired) {
+define internal i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected,
+ i64 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
@@ -1288,7 +1299,7 @@
;;;; Fence and is-lock-free.
-define void @test_atomic_fence() {
+define internal void @test_atomic_fence() {
entry:
call void @llvm.nacl.atomic.fence(i32 6)
ret void
@@ -1298,7 +1309,7 @@
; ARM32-LABEL: test_atomic_fence
; ARM32: dmb sy
-define void @test_atomic_fence_all() {
+define internal void @test_atomic_fence_all() {
entry:
call void @llvm.nacl.atomic.fence.all()
ret void
@@ -1308,7 +1319,7 @@
; ARM32-LABEL: test_atomic_fence_all
; ARM32: dmb sy
-define i32 @test_atomic_is_lock_free(i32 %iptr) {
+define internal i32 @test_atomic_is_lock_free(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
@@ -1320,7 +1331,7 @@
; ARM32-LABEL: test_atomic_is_lock_free
; ARM32: movw {{.*}}, #1
-define i32 @test_not_lock_free(i32 %iptr) {
+define internal i32 @test_not_lock_free(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i1 @llvm.nacl.atomic.is.lock.free(i32 7, i8* %ptr)
@@ -1332,7 +1343,7 @@
; ARM32-LABEL: test_not_lock_free
; ARM32: mov {{.*}}, #0
-define i32 @test_atomic_is_lock_free_ignored(i32 %iptr) {
+define internal i32 @test_atomic_is_lock_free_ignored(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%ignored = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
@@ -1352,7 +1363,8 @@
; fact that nacl.atomic.is.lock.free will resolve to a constant
; (which adds DCE opportunities). Once we optimize, the test expectations
; for this case should change.
-define i32 @test_atomic_is_lock_free_can_dce(i32 %iptr, i32 %x, i32 %y) {
+define internal i32 @test_atomic_is_lock_free_can_dce(i32 %iptr, i32 %x,
+ i32 %y) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
@@ -1376,7 +1388,7 @@
; Make sure we model that the Src register is modified and therefore it can't
; share a register with an overlapping live range, even if the result of the
; xadd instruction is unused.
-define void @test_xadd_regalloc() {
+define internal void @test_xadd_regalloc() {
entry:
br label %body
body:
@@ -1397,7 +1409,7 @@
; O2: ret
; Do the same test for the xchg instruction instead of xadd.
-define void @test_xchg_regalloc() {
+define internal void @test_xchg_regalloc() {
entry:
br label %body
body:
@@ -1418,7 +1430,7 @@
; O2: ret
; Same test for cmpxchg.
-define void @test_cmpxchg_regalloc() {
+define internal void @test_cmpxchg_regalloc() {
entry:
br label %body
body:
@@ -1439,7 +1451,7 @@
; O2: ret
; Same test for cmpxchg8b.
-define void @test_cmpxchg8b_regalloc() {
+define internal void @test_cmpxchg8b_regalloc() {
entry:
br label %body
body:
diff --git a/tests_lit/llvm2ice_tests/nacl-mem-intrinsics.ll b/tests_lit/llvm2ice_tests/nacl-mem-intrinsics.ll
index 5e5d7b0..95f5807 100644
--- a/tests_lit/llvm2ice_tests/nacl-mem-intrinsics.ll
+++ b/tests_lit/llvm2ice_tests/nacl-mem-intrinsics.ll
@@ -20,7 +20,7 @@
declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1)
-define void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
+define internal void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -35,7 +35,7 @@
; ARM32-LABEL: test_memcpy
; ARM32: bl {{.*}} memcpy
-define void @test_memcpy_long_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memcpy_long_const_len(i32 %iptr_dst, i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -50,7 +50,8 @@
; ARM32-LABEL: test_memcpy_long_const_len
; ARM32: bl {{.*}} memcpy
-define void @test_memcpy_very_small_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memcpy_very_small_const_len(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -67,7 +68,7 @@
; ARM32-LABEL: test_memcpy_very_small_const_len
; ARM32: bl {{.*}} memcpy
-define void @test_memcpy_const_len_3(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memcpy_const_len_3(i32 %iptr_dst, i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -86,7 +87,7 @@
; ARM32-LABEL: test_memcpy_const_len_3
; ARM32: bl {{.*}} memcpy
-define void @test_memcpy_mid_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memcpy_mid_const_len(i32 %iptr_dst, i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -105,7 +106,8 @@
; ARM32-LABEL: test_memcpy_mid_const_len
; ARM32: bl {{.*}} memcpy
-define void @test_memcpy_mid_const_len_overlap(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memcpy_mid_const_len_overlap(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -124,7 +126,8 @@
; ARM32-LABEL: test_memcpy_mid_const_len_overlap
; ARM32: bl {{.*}} memcpy
-define void @test_memcpy_big_const_len_overlap(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memcpy_big_const_len_overlap(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -143,7 +146,8 @@
; ARM32-LABEL: test_memcpy_big_const_len_overlap
; ARM32: bl {{.*}} memcpy
-define void @test_memcpy_large_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memcpy_large_const_len(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -164,7 +168,7 @@
; ARM32-LABEL: test_memcpy_large_const_len
; ARM32: bl {{.*}} memcpy
-define void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
+define internal void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -179,7 +183,8 @@
; ARM32-LABEL: test_memmove
; ARM32: bl {{.*}} memmove
-define void @test_memmove_long_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memmove_long_const_len(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -194,7 +199,8 @@
; ARM32-LABEL: test_memmove_long_const_len
; ARM32: bl {{.*}} memmove
-define void @test_memmove_very_small_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memmove_very_small_const_len(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -211,7 +217,7 @@
; ARM32-LABEL: test_memmove_very_small_const_len
; ARM32: bl {{.*}} memmove
-define void @test_memmove_const_len_3(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memmove_const_len_3(i32 %iptr_dst, i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -230,7 +236,7 @@
; ARM32-LABEL: test_memmove_const_len_3
; ARM32: bl {{.*}} memmove
-define void @test_memmove_mid_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memmove_mid_const_len(i32 %iptr_dst, i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -249,7 +255,8 @@
; ARM32-LABEL: test_memmove_mid_const_len
; ARM32: bl {{.*}} memmove
-define void @test_memmove_mid_const_len_overlap(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memmove_mid_const_len_overlap(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -268,7 +275,8 @@
; ARM32-LABEL: test_memmove_mid_const_len_overlap
; ARM32: bl {{.*}} memmove
-define void @test_memmove_big_const_len_overlap(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memmove_big_const_len_overlap(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -287,7 +295,8 @@
; ARM32-LABEL: test_memmove_big_const_len_overlap
; ARM32: bl {{.*}} memmove
-define void @test_memmove_large_const_len(i32 %iptr_dst, i32 %iptr_src) {
+define internal void @test_memmove_large_const_len(i32 %iptr_dst,
+ i32 %iptr_src) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -308,7 +317,7 @@
; ARM32-LABEL: test_memmove_large_const_len
; ARM32: bl {{.*}} memmove
-define void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) {
+define internal void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) {
entry:
%val = trunc i32 %wide_val to i8
%dst = inttoptr i32 %iptr_dst to i8*
@@ -326,7 +335,8 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_const_len_align(i32 %iptr_dst, i32 %wide_val) {
+define internal void @test_memset_const_len_align(i32 %iptr_dst,
+ i32 %wide_val) {
entry:
%val = trunc i32 %wide_val to i8
%dst = inttoptr i32 %iptr_dst to i8*
@@ -344,7 +354,8 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_long_const_len_zero_val_align(i32 %iptr_dst) {
+define internal void @test_memset_long_const_len_zero_val_align(
+ i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 0,
@@ -359,7 +370,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_const_val(i32 %iptr_dst, i32 %len) {
+define internal void @test_memset_const_val(i32 %iptr_dst, i32 %len) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 0, i32 %len, i32 1, i1 false)
@@ -374,7 +385,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_const_val_len_very_small(i32 %iptr_dst) {
+define internal void @test_memset_const_val_len_very_small(i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 10, i32 2, i32 1, i1 false)
@@ -389,7 +400,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_const_val_len_3(i32 %iptr_dst) {
+define internal void @test_memset_const_val_len_3(i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 16, i32 3, i32 1, i1 false)
@@ -405,7 +416,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_const_val_len_mid(i32 %iptr_dst) {
+define internal void @test_memset_const_val_len_mid(i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 32, i32 9, i32 1, i1 false)
@@ -422,7 +433,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_zero_const_len_small(i32 %iptr_dst) {
+define internal void @test_memset_zero_const_len_small(i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 0, i32 12, i32 1, i1 false)
@@ -439,7 +450,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_zero_const_len_small_overlap(i32 %iptr_dst) {
+define internal void @test_memset_zero_const_len_small_overlap(i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 0, i32 15, i32 1, i1 false)
@@ -456,7 +467,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_zero_const_len_big_overlap(i32 %iptr_dst) {
+define internal void @test_memset_zero_const_len_big_overlap(i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 0, i32 30, i32 1, i1 false)
@@ -473,7 +484,7 @@
; ARM32: uxtb
; ARM32: bl {{.*}} memset
-define void @test_memset_zero_const_len_large(i32 %iptr_dst) {
+define internal void @test_memset_zero_const_len_large(i32 %iptr_dst) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
call void @llvm.memset.p0i8.i32(i8* %dst, i8 0, i32 33, i32 1, i1 false)
diff --git a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
index 04be832..aea34ad 100644
--- a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
+++ b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
@@ -2,9 +2,11 @@
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
; RUN: --target x8632 -i %s --args -O2 -sandbox \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
; RUN: --target x8632 -i %s --args -Om1 -sandbox \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; Do another run w/ O2 and a different check-prefix (otherwise O2 and Om1
@@ -12,6 +14,7 @@
; some code is optimized out.
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
; RUN: --target x8632 -i %s --args -O2 -sandbox \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 \
; RUN: --command FileCheck --check-prefix=CHECKO2REM %s
@@ -20,12 +23,14 @@
; We also know that because it's O2, it'll have the O2REM optimizations.
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
; RUN: --target x8632 -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 \
; RUN: --command FileCheck --check-prefix=CHECKO2UNSANDBOXEDREM %s
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble --disassemble --target arm32 \
; RUN: -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
@@ -50,7 +55,7 @@
declare i8* @llvm.stacksave()
declare void @llvm.stackrestore(i8*)
-define i32 @test_nacl_read_tp() {
+define internal i32 @test_nacl_read_tp() {
entry:
%ptr = call i8* @llvm.nacl.read.tp()
%__1 = ptrtoint i8* %ptr to i32
@@ -63,7 +68,7 @@
; CHECKO2UNSANDBOXEDREM-LABEL: test_nacl_read_tp
; CHECKO2UNSANDBOXEDREM: call {{.*}} R_{{.*}} __nacl_read_tp
-define i32 @test_nacl_read_tp_more_addressing() {
+define internal i32 @test_nacl_read_tp_more_addressing() {
entry:
%ptr = call i8* @llvm.nacl.read.tp()
%__1 = ptrtoint i8* %ptr to i32
@@ -90,7 +95,7 @@
; CHECKO2UNSANDBOXEDREM: call {{.*}} R_{{.*}} __nacl_read_tp
; CHECKO2UNSANDBOXEDREM: call {{.*}} R_{{.*}} __nacl_read_tp
-define i32 @test_nacl_read_tp_dead(i32 %a) {
+define internal i32 @test_nacl_read_tp_dead(i32 %a) {
entry:
%ptr = call i8* @llvm.nacl.read.tp()
; Not actually using the result of nacl read tp call.
@@ -103,7 +108,7 @@
; CHECKO2UNSANDBOXEDREM-LABEL: test_nacl_read_tp_dead
; CHECKO2UNSANDBOXEDREM-NOT: call {{.*}} R_{{.*}} __nacl_read_tp
-define i32 @test_setjmplongjmp(i32 %iptr_env) {
+define internal i32 @test_setjmplongjmp(i32 %iptr_env) {
entry:
%env = inttoptr i32 %iptr_env to i8*
%i = call i32 @llvm.nacl.setjmp(i8* %env)
@@ -127,7 +132,7 @@
; ARM32: bl {{.*}} setjmp
; ARM32: bl {{.*}} longjmp
-define i32 @test_setjmp_unused(i32 %iptr_env, i32 %i_other) {
+define internal i32 @test_setjmp_unused(i32 %iptr_env, i32 %i_other) {
entry:
%env = inttoptr i32 %iptr_env to i8*
%i = call i32 @llvm.nacl.setjmp(i8* %env)
@@ -138,7 +143,7 @@
; CHECKO2REM-LABEL: test_setjmp_unused
; CHECKO2REM: call {{.*}} R_{{.*}} setjmp
-define float @test_sqrt_float(float %x, i32 %iptr) {
+define internal float @test_sqrt_float(float %x, i32 %iptr) {
entry:
%r = call float @llvm.sqrt.f32(float %x)
%r2 = call float @llvm.sqrt.f32(float %r)
@@ -156,7 +161,7 @@
; ARM32: vsqrt.f32
; ARM32: vadd.f32
-define float @test_sqrt_float_mergeable_load(float %x, i32 %iptr) {
+define internal float @test_sqrt_float_mergeable_load(float %x, i32 %iptr) {
entry:
%__2 = inttoptr i32 %iptr to float*
%y = load float, float* %__2, align 4
@@ -173,7 +178,7 @@
; ARM32: vldr s{{.*}}
; ARM32: vsqrt.f32
-define double @test_sqrt_double(double %x, i32 %iptr) {
+define internal double @test_sqrt_double(double %x, i32 %iptr) {
entry:
%r = call double @llvm.sqrt.f64(double %x)
%r2 = call double @llvm.sqrt.f64(double %r)
@@ -191,7 +196,7 @@
; ARM32: vsqrt.f64
; ARM32: vadd.f64
-define double @test_sqrt_double_mergeable_load(double %x, i32 %iptr) {
+define internal double @test_sqrt_double_mergeable_load(double %x, i32 %iptr) {
entry:
%__2 = inttoptr i32 %iptr to double*
%y = load double, double* %__2, align 8
@@ -205,7 +210,7 @@
; ARM32: vldr d{{.*}}
; ARM32: vsqrt.f64
-define float @test_sqrt_ignored(float %x, double %y) {
+define internal float @test_sqrt_ignored(float %x, double %y) {
entry:
%ignored1 = call float @llvm.sqrt.f32(float %x)
%ignored2 = call double @llvm.sqrt.f64(double %y)
@@ -215,7 +220,7 @@
; CHECKO2REM-NOT: sqrtss
; CHECKO2REM-NOT: sqrtsd
-define float @test_fabs_float(float %x) {
+define internal float @test_fabs_float(float %x) {
entry:
%r = call float @llvm.fabs.f32(float %x)
%r2 = call float @llvm.fabs.f32(float %r)
@@ -236,7 +241,7 @@
; CHECK: psrld
; CHECK: pand {{.*}}xmm{{.*}}xmm
-define double @test_fabs_double(double %x) {
+define internal double @test_fabs_double(double %x) {
entry:
%r = call double @llvm.fabs.f64(double %x)
%r2 = call double @llvm.fabs.f64(double %r)
@@ -257,7 +262,7 @@
; CHECK: psrlq
; CHECK: pand {{.*}}xmm{{.*}}xmm
-define <4 x float> @test_fabs_v4f32(<4 x float> %x) {
+define internal <4 x float> @test_fabs_v4f32(<4 x float> %x) {
entry:
%r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x)
%r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r)
@@ -276,7 +281,7 @@
; CHECK: psrld
; CHECK: pand
-define i32 @test_trap(i32 %br) {
+define internal i32 @test_trap(i32 %br) {
entry:
%r1 = icmp eq i32 %br, 0
br i1 %r1, label %Zero, label %NonZero
@@ -291,7 +296,7 @@
; ARM32-LABEL: test_trap
; ARM32: .word 0xe7fedef0
-define i32 @test_bswap_16(i32 %x) {
+define internal i32 @test_bswap_16(i32 %x) {
entry:
%x_trunc = trunc i32 %x to i16
%r = call i16 @llvm.bswap.i16(i16 %x_trunc)
@@ -306,7 +311,7 @@
; ARM32: rev
; ARM32: lsr {{.*}} #16
-define i32 @test_bswap_32(i32 %x) {
+define internal i32 @test_bswap_32(i32 %x) {
entry:
%r = call i32 @llvm.bswap.i32(i32 %x)
ret i32 %r
@@ -316,7 +321,7 @@
; ARM32-LABEL: test_bswap_32
; ARM32: rev
-define i64 @test_bswap_64(i64 %x) {
+define internal i64 @test_bswap_64(i64 %x) {
entry:
%r = call i64 @llvm.bswap.i64(i64 %x)
ret i64 %r
@@ -328,7 +333,7 @@
; ARM32: rev
; ARM32: rev
-define i64 @test_bswap_64_undef() {
+define internal i64 @test_bswap_64_undef() {
entry:
%r = call i64 @llvm.bswap.i64(i64 undef)
ret i64 %r
@@ -340,7 +345,7 @@
; ARM32: rev
; ARM32: rev
-define i32 @test_ctlz_32(i32 %x) {
+define internal i32 @test_ctlz_32(i32 %x) {
entry:
%r = call i32 @llvm.ctlz.i32(i32 %x, i1 false)
ret i32 %r
@@ -356,7 +361,7 @@
; ARM32-LABEL: test_ctlz_32
; ARM32: clz
-define i32 @test_ctlz_32_const() {
+define internal i32 @test_ctlz_32_const() {
entry:
%r = call i32 @llvm.ctlz.i32(i32 123456, i1 false)
ret i32 %r
@@ -369,7 +374,7 @@
; ARM32-LABEL: test_ctlz_32_const
; ARM32: clz
-define i32 @test_ctlz_32_ignored(i32 %x) {
+define internal i32 @test_ctlz_32_ignored(i32 %x) {
entry:
%ignored = call i32 @llvm.ctlz.i32(i32 %x, i1 false)
ret i32 1
@@ -377,7 +382,7 @@
; CHECKO2REM-LABEL: test_ctlz_32_ignored
; CHECKO2REM-NOT: bsr
-define i64 @test_ctlz_64(i64 %x) {
+define internal i64 @test_ctlz_64(i64 %x) {
entry:
%r = call i64 @llvm.ctlz.i64(i64 %x, i1 false)
ret i64 %r
@@ -401,7 +406,7 @@
; ARM32: clzne
; ARM32: mov {{.*}}, #0
-define i32 @test_ctlz_64_const(i64 %x) {
+define internal i32 @test_ctlz_64_const(i64 %x) {
entry:
%r = call i64 @llvm.ctlz.i64(i64 123456789012, i1 false)
%r2 = trunc i64 %r to i32
@@ -414,7 +419,7 @@
; ARM32: clz
; ARM32: clzne
-define i32 @test_ctlz_64_ignored(i64 %x) {
+define internal i32 @test_ctlz_64_ignored(i64 %x) {
entry:
%ignored = call i64 @llvm.ctlz.i64(i64 1234567890, i1 false)
ret i32 2
@@ -422,7 +427,7 @@
; CHECKO2REM-LABEL: test_ctlz_64_ignored
; CHECKO2REM-NOT: bsr
-define i32 @test_cttz_32(i32 %x) {
+define internal i32 @test_cttz_32(i32 %x) {
entry:
%r = call i32 @llvm.cttz.i32(i32 %x, i1 false)
ret i32 %r
@@ -435,7 +440,7 @@
; ARM32: rbit
; ARM32: clz
-define i64 @test_cttz_64(i64 %x) {
+define internal i64 @test_cttz_64(i64 %x) {
entry:
%r = call i64 @llvm.cttz.i64(i64 %x, i1 false)
ret i64 %r
@@ -458,7 +463,7 @@
; ARM32: clzne
; ARM32: mov {{.*}}, #0
-define i32 @test_popcount_32(i32 %x) {
+define internal i32 @test_popcount_32(i32 %x) {
entry:
%r = call i32 @llvm.ctpop.i32(i32 %x)
ret i32 %r
@@ -468,7 +473,7 @@
; ARM32-LABEL: test_popcount_32
; ARM32: bl {{.*}} __popcountsi2
-define i64 @test_popcount_64(i64 %x) {
+define internal i64 @test_popcount_64(i64 %x) {
entry:
%r = call i64 @llvm.ctpop.i64(i64 %x)
ret i64 %r
@@ -482,7 +487,7 @@
; ARM32: bl {{.*}} __popcountdi2
; ARM32: mov {{.*}}, #0
-define i32 @test_popcount_64_ret_i32(i64 %x) {
+define internal i32 @test_popcount_64_ret_i32(i64 %x) {
entry:
%r_i64 = call i64 @llvm.ctpop.i64(i64 %x)
%r = trunc i64 %r_i64 to i32
@@ -493,7 +498,7 @@
; CHECKO2REM: call {{.*}} R_{{.*}} __popcountdi2
; CHECKO2REM-NOT: mov {{.*}}, 0
-define void @test_stacksave_noalloca() {
+define internal void @test_stacksave_noalloca() {
entry:
%sp = call i8* @llvm.stacksave()
call void @llvm.stackrestore(i8* %sp)
@@ -508,7 +513,7 @@
declare i32 @foo(i32 %x)
-define void @test_stacksave_multiple(i32 %x) {
+define internal void @test_stacksave_multiple(i32 %x) {
entry:
%x_4 = mul i32 %x, 4
%sp1 = call i8* @llvm.stacksave()
diff --git a/tests_lit/llvm2ice_tests/nop-insertion.ll b/tests_lit/llvm2ice_tests/nop-insertion.ll
index 785ef48..7031ff7 100644
--- a/tests_lit/llvm2ice_tests/nop-insertion.ll
+++ b/tests_lit/llvm2ice_tests/nop-insertion.ll
@@ -19,7 +19,7 @@
; RUN: | FileCheck %s --check-prefix=SANDBOX50
-define <4 x i32> @mul_v4i32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @mul_v4i32(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = mul <4 x i32> %a, %b
ret <4 x i32> %res
diff --git a/tests_lit/llvm2ice_tests/phi_invalid.test b/tests_lit/llvm2ice_tests/phi_invalid.test
index 184f08c..d8323db 100644
--- a/tests_lit/llvm2ice_tests/phi_invalid.test
+++ b/tests_lit/llvm2ice_tests/phi_invalid.test
@@ -3,6 +3,7 @@
RUN: %p2i --expect-fail --tbc -i %p/Input/phi-invalid.tbc --insts 2>&1 \
RUN: --filetype=obj --args -o /dev/null \
+RUN: -allow-externally-defined-symbols \
RUN: | FileCheck --check-prefix=BADPHI %s
; BADPHI: Phi error:
diff --git a/tests_lit/llvm2ice_tests/prune_unreachable.ll b/tests_lit/llvm2ice_tests/prune_unreachable.ll
index 0c77acd..7ce93e9 100644
--- a/tests_lit/llvm2ice_tests/prune_unreachable.ll
+++ b/tests_lit/llvm2ice_tests/prune_unreachable.ll
@@ -1,12 +1,14 @@
; This tests that unreachable basic blocks are pruned from the CFG, so that
; liveness analysis doesn't detect inconsistencies.
-; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 | FileCheck %s
-; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 | FileCheck %s
+; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
+; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
declare void @abort()
-define i32 @unreachable_block() {
+define internal i32 @unreachable_block() {
entry:
; ret_val has no reaching uses and so its assignment may be
; dead-code eliminated.
diff --git a/tests_lit/llvm2ice_tests/randomize-pool-immediate-basic.ll b/tests_lit/llvm2ice_tests/randomize-pool-immediate-basic.ll
index ef7de4f..a5d2eb4 100644
--- a/tests_lit/llvm2ice_tests/randomize-pool-immediate-basic.ll
+++ b/tests_lit/llvm2ice_tests/randomize-pool-immediate-basic.ll
@@ -19,7 +19,7 @@
; RUN: | FileCheck %s --check-prefix=POOLING
-define i32 @add_arg_plus_200000(i32 %arg) {
+define internal i32 @add_arg_plus_200000(i32 %arg) {
entry:
%res = add i32 200000, %arg
ret i32 %res
@@ -36,7 +36,7 @@
; POOLING: mov e{{[a-z]*}},DWORD PTR ds:0x0 {{[0-9a-f]*}}: R_386_32 .L$i32$00030d40
}
-define float @load_arg_plus_200000(float* %arg) {
+define internal float @load_arg_plus_200000(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr.int = add i32 %arg.int, 200000
@@ -53,7 +53,7 @@
; POOLING: mov e{{[a-z]*}},DWORD PTR ds:0x0 {{[0-9a-f]*}}: R_386_32 .L$i32$00030d40
}
-define i64 @add_arg_plus_64bits(i32 %arg) {
+define internal i64 @add_arg_plus_64bits(i32 %arg) {
entry:
%0 = sext i32 %arg to i64
%res = add i64 90000000000, %0
@@ -73,7 +73,7 @@
; POOLING: mov e{{[a-z]*}},DWORD PTR ds:0x0 {{[0-9a-f]*}}: R_386_32 .L$i32$f46b0400
}
-define i64 @load_arg_plus_64bits(i64* %arg) {
+define internal i64 @load_arg_plus_64bits(i64* %arg) {
entry:
%arg.int = ptrtoint i64* %arg to i32
%arg.new = add i32 %arg.int, 90000
diff --git a/tests_lit/llvm2ice_tests/randomize-regalloc.ll b/tests_lit/llvm2ice_tests/randomize-regalloc.ll
index 5bdd70d..51e4b72 100644
--- a/tests_lit/llvm2ice_tests/randomize-regalloc.ll
+++ b/tests_lit/llvm2ice_tests/randomize-regalloc.ll
@@ -17,7 +17,7 @@
; RUN: -randomize-regalloc \
; RUN: | FileCheck %s --check-prefix=OPTM1_123
-define <4 x i32> @mul_v4i32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @mul_v4i32(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = mul <4 x i32> %a, %b
ret <4 x i32> %res
diff --git a/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll b/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll
index f3d4b5d..0833a5c 100644
--- a/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll
+++ b/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll
@@ -5,7 +5,7 @@
; TODO(kschimpf) Find out why lc2i is needed.
; RUN: %lc2i -i %s --args -O2 --verbose regalloc
-define void @foo() {
+define internal void @foo() {
bb:
br i1 undef, label %bb13, label %bb14
diff --git a/tests_lit/llvm2ice_tests/reorder-basic-blocks.ll b/tests_lit/llvm2ice_tests/reorder-basic-blocks.ll
index bd49021..440b08d 100644
--- a/tests_lit/llvm2ice_tests/reorder-basic-blocks.ll
+++ b/tests_lit/llvm2ice_tests/reorder-basic-blocks.ll
@@ -9,7 +9,7 @@
; RUN: -reorder-basic-blocks -threads=0 \
; RUN: | FileCheck %s --check-prefix=SEED2
-define void @basic_block_reordering(i32 %foo, i32 %bar) {
+define internal void @basic_block_reordering(i32 %foo, i32 %bar) {
entry:
%r1 = icmp eq i32 %foo, %bar
br i1 %r1, label %BB1, label %BB2
diff --git a/tests_lit/llvm2ice_tests/reorder-functions.ll b/tests_lit/llvm2ice_tests/reorder-functions.ll
index 3cbce9c..5351e43 100644
--- a/tests_lit/llvm2ice_tests/reorder-functions.ll
+++ b/tests_lit/llvm2ice_tests/reorder-functions.ll
@@ -37,27 +37,27 @@
; RUN: -reorder-functions-window-size=0xffffffff \
; RUN: | FileCheck %s --check-prefix=WINDOWSIZEMAX
-define void @func1() {
+define internal void @func1() {
ret void
}
-define void @func2() {
+define internal void @func2() {
ret void
}
-define void @func3() {
+define internal void @func3() {
ret void
}
-define void @func4() {
+define internal void @func4() {
ret void
}
-define void @func5() {
+define internal void @func5() {
ret void
}
-define void @func6() {
+define internal void @func6() {
ret void
}
diff --git a/tests_lit/llvm2ice_tests/reorder-pooled-constants.ll b/tests_lit/llvm2ice_tests/reorder-pooled-constants.ll
index ce53ea1..4bc3c20 100644
--- a/tests_lit/llvm2ice_tests/reorder-pooled-constants.ll
+++ b/tests_lit/llvm2ice_tests/reorder-pooled-constants.ll
@@ -3,11 +3,11 @@
; RUN: %p2i --assemble --disassemble --filetype=obj --dis-flags=-s \
; RUN: --target x8632 -i %s --args -sz-seed=1 -O2 -reorder-pooled-constants \
-; RUN: | FileCheck %s --check-prefix=X86
+; RUN: -allow-externally-defined-symbols | FileCheck %s --check-prefix=X86
; RUN: %p2i --assemble --disassemble --filetype=obj --dis-flags=-s \
; RUN: --target x8632 -i %s --args -sz-seed=1 -Om1 -reorder-pooled-constants \
-; RUN: | FileCheck %s --check-prefix=X86
+; RUN: -allow-externally-defined-symbols | FileCheck %s --check-prefix=X86
@__init_array_start = internal constant [0 x i8] zeroinitializer, align 4
@__fini_array_start = internal constant [0 x i8] zeroinitializer, align 4
diff --git a/tests_lit/llvm2ice_tests/return_immediates.ll b/tests_lit/llvm2ice_tests/return_immediates.ll
index 2d94fb5..e8ddb58 100644
--- a/tests_lit/llvm2ice_tests/return_immediates.ll
+++ b/tests_lit/llvm2ice_tests/return_immediates.ll
@@ -2,7 +2,8 @@
; sets, some immediates are more complicated than others.
; For x86-32, it shouldn't be a problem.
-; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 | FileCheck %s
+; RUN: %p2i --filetype=obj --disassemble -i %s --args -O2 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; TODO(jvoung): Stop skipping unimplemented parts (via --skip-unimplemented)
; once enough infrastructure is in. Also, switch to --filetype=obj
@@ -17,7 +18,7 @@
; ARM has a shifter that allows encoding 8-bits rotated right by even amounts.
; The first few "rotate right" test cases are expressed as shift-left.
-define i32 @ret_8bits_shift_left0() {
+define internal i32 @ret_8bits_shift_left0() {
ret i32 255
}
; CHECK-LABEL: ret_8bits_shift_left0
@@ -25,7 +26,7 @@
; ARM32-LABEL: ret_8bits_shift_left0
; ARM32-NEXT: mov r0, #255
-define i32 @ret_8bits_shift_left1() {
+define internal i32 @ret_8bits_shift_left1() {
ret i32 510
}
; CHECK-LABEL: ret_8bits_shift_left1
@@ -33,7 +34,7 @@
; ARM32-LABEL: ret_8bits_shift_left1
; ARM32-NEXT: movw r0, #510
-define i32 @ret_8bits_shift_left2() {
+define internal i32 @ret_8bits_shift_left2() {
ret i32 1020
}
; CHECK-LABEL: ret_8bits_shift_left2
@@ -41,7 +42,7 @@
; ARM32-LABEL: ret_8bits_shift_left2
; ARM32-NEXT: mov r0, #1020
-define i32 @ret_8bits_shift_left4() {
+define internal i32 @ret_8bits_shift_left4() {
ret i32 4080
}
; CHECK-LABEL: ret_8bits_shift_left4
@@ -49,7 +50,7 @@
; ARM32-LABEL: ret_8bits_shift_left4
; ARM32-NEXT: mov r0, #4080
-define i32 @ret_8bits_shift_left14() {
+define internal i32 @ret_8bits_shift_left14() {
ret i32 4177920
}
; CHECK-LABEL: ret_8bits_shift_left14
@@ -57,7 +58,7 @@
; ARM32-LABEL: ret_8bits_shift_left14
; ARM32-NEXT: mov r0, #4177920
-define i32 @ret_8bits_shift_left15() {
+define internal i32 @ret_8bits_shift_left15() {
ret i32 8355840
}
; CHECK-LABEL: ret_8bits_shift_left15
@@ -68,7 +69,7 @@
; Shift 8 bits left by 24 to the i32 limit. This is also ror by 8 bits.
-define i32 @ret_8bits_shift_left24() {
+define internal i32 @ret_8bits_shift_left24() {
ret i32 4278190080
}
; CHECK-LABEL: ret_8bits_shift_left24
@@ -79,7 +80,7 @@
; The next few cases wrap around and actually demonstrate the rotation.
-define i32 @ret_8bits_ror7() {
+define internal i32 @ret_8bits_ror7() {
ret i32 4261412865
}
; CHECK-LABEL: ret_8bits_ror7
@@ -88,7 +89,7 @@
; ARM32-NEXT: movw r0, #1
; ARM32-NEXT: movt r0, #65024
-define i32 @ret_8bits_ror6() {
+define internal i32 @ret_8bits_ror6() {
ret i32 4227858435
}
; CHECK-LABEL: ret_8bits_ror6
@@ -97,7 +98,7 @@
; ARM32-NEXT: mov r0, #-67108861
; ARM32-NEXT: bx lr
-define i32 @ret_8bits_ror5() {
+define internal i32 @ret_8bits_ror5() {
ret i32 4160749575
}
; CHECK-LABEL: ret_8bits_ror5
@@ -106,7 +107,7 @@
; ARM32-NEXT: movw r0, #7
; ARM32-NEXT: movt r0, #63488
-define i32 @ret_8bits_ror4() {
+define internal i32 @ret_8bits_ror4() {
ret i32 4026531855
}
; CHECK-LABEL: ret_8bits_ror4
@@ -115,7 +116,7 @@
; ARM32-NEXT: mov r0, #-268435441
; ARM32-NEXT: bx lr
-define i32 @ret_8bits_ror3() {
+define internal i32 @ret_8bits_ror3() {
ret i32 3758096415
}
; CHECK-LABEL: ret_8bits_ror3
@@ -124,7 +125,7 @@
; ARM32-NEXT: movw r0, #31
; ARM32-NEXT: movt r0, #57344
-define i32 @ret_8bits_ror2() {
+define internal i32 @ret_8bits_ror2() {
ret i32 3221225535
}
; CHECK-LABEL: ret_8bits_ror2
@@ -133,7 +134,7 @@
; ARM32-NEXT: mov r0, #-1073741761
; ARM32-NEXT: bx lr
-define i32 @ret_8bits_ror1() {
+define internal i32 @ret_8bits_ror1() {
ret i32 2147483775
}
; CHECK-LABEL: ret_8bits_ror1
@@ -145,7 +146,7 @@
; Some architectures can handle 16-bits at a time efficiently,
; so also test those.
-define i32 @ret_16bits_lower() {
+define internal i32 @ret_16bits_lower() {
ret i32 65535
}
; CHECK-LABEL: ret_16bits_lower
@@ -154,7 +155,7 @@
; ARM32-NEXT: movw r0, #65535
; ARM32-NEXT: bx lr
-define i32 @ret_17bits_lower() {
+define internal i32 @ret_17bits_lower() {
ret i32 131071
}
; CHECK-LABEL: ret_17bits_lower
@@ -163,7 +164,7 @@
; ARM32-NEXT: movw r0, #65535
; ARM32-NEXT: movt r0, #1
-define i32 @ret_16bits_upper() {
+define internal i32 @ret_16bits_upper() {
ret i32 4294901760
}
; CHECK-LABEL: ret_16bits_upper
@@ -174,7 +175,7 @@
; Some 32-bit immediates can be inverted, and moved in a single instruction.
-define i32 @ret_8bits_inverted_shift_left0() {
+define internal i32 @ret_8bits_inverted_shift_left0() {
ret i32 4294967040
}
; CHECK-LABEL: ret_8bits_inverted_shift_left0
@@ -183,7 +184,7 @@
; ARM32-NEXT: mvn r0, #255
; ARM32-NEXT: bx lr
-define i32 @ret_8bits_inverted_shift_left24() {
+define internal i32 @ret_8bits_inverted_shift_left24() {
ret i32 16777215
}
; CHECK-LABEL: ret_8bits_inverted_shift_left24
@@ -192,7 +193,7 @@
; ARM32-NEXT: mvn r0, #-16777216
; ARM32-NEXT: bx lr
-define i32 @ret_8bits_inverted_ror2() {
+define internal i32 @ret_8bits_inverted_ror2() {
ret i32 1073741760
}
; CHECK-LABEL: ret_8bits_inverted_ror2
@@ -201,7 +202,7 @@
; ARM32-NEXT: mvn r0, #-1073741761
; ARM32-NEXT: bx lr
-define i32 @ret_8bits_inverted_ror6() {
+define internal i32 @ret_8bits_inverted_ror6() {
ret i32 67108860
}
; CHECK-LABEL: ret_8bits_inverted_ror6
@@ -210,7 +211,7 @@
; ARM32-NEXT: mvn r0, #-67108861
; ARM32-NEXT: bx lr
-define i32 @ret_8bits_inverted_ror7() {
+define internal i32 @ret_8bits_inverted_ror7() {
ret i32 33554430
}
; CHECK-LABEL: ret_8bits_inverted_ror7
@@ -221,7 +222,7 @@
; 64-bit immediates.
-define i64 @ret_64bits_shift_left0() {
+define internal i64 @ret_64bits_shift_left0() {
ret i64 1095216660735
}
; CHECK-LABEL: ret_64bits_shift_left0
@@ -236,7 +237,7 @@
declare void @_start()
-define i32 @ret_addr() {
+define internal i32 @ret_addr() {
%ptr = ptrtoint void ()* @_start to i32
ret i32 %ptr
}
diff --git a/tests_lit/llvm2ice_tests/returns_twice_no_coalesce.ll b/tests_lit/llvm2ice_tests/returns_twice_no_coalesce.ll
index 8419e0f..c9da9cb 100644
--- a/tests_lit/llvm2ice_tests/returns_twice_no_coalesce.ll
+++ b/tests_lit/llvm2ice_tests/returns_twice_no_coalesce.ll
@@ -1,7 +1,8 @@
; This file checks that SimpleCoalescing of local stack slots is not done
; when calling a function with the "returns twice" attribute.
-; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 | FileCheck %s
+; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; Setjmp is a function with the "returns twice" attribute.
declare i32 @llvm.nacl.setjmp(i8*)
@@ -9,7 +10,7 @@
declare i32 @other(i32)
declare void @user(i32)
-define i32 @call_returns_twice(i32 %iptr_jmpbuf, i32 %x) {
+define internal i32 @call_returns_twice(i32 %iptr_jmpbuf, i32 %x) {
entry:
%local = add i32 %x, 12345
%jmpbuf = inttoptr i32 %iptr_jmpbuf to i8*
@@ -32,7 +33,7 @@
; There should not be sharing of the stack slot.
; CHECK-NOT: mov DWORD PTR [esp + [[OFF]]], [[REG2]]
-define i32 @no_call_returns_twice(i32 %iptr_jmpbuf, i32 %x) {
+define internal i32 @no_call_returns_twice(i32 %iptr_jmpbuf, i32 %x) {
entry:
%local = add i32 %x, 12345
%y = call i32 @other(i32 %x)
diff --git a/tests_lit/llvm2ice_tests/rng.ll b/tests_lit/llvm2ice_tests/rng.ll
index e4fa813..aa61530 100644
--- a/tests_lit/llvm2ice_tests/rng.ll
+++ b/tests_lit/llvm2ice_tests/rng.ll
@@ -104,7 +104,7 @@
@ArrayUninitConstInt = internal constant [20 x i8] zeroinitializer, align 4
-define <4 x i32> @func1(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @func1(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = mul <4 x i32> %a, %b
ret <4 x i32> %res
@@ -136,7 +136,7 @@
-define float @func2(float* %arg) {
+define internal float @func2(float* %arg) {
entry:
%arg.int = ptrtoint float* %arg to i32
%addr.int = add i32 %arg.int, 200000
@@ -148,7 +148,7 @@
; BLINDINGO2: lea [[REG:e[a-z]*]],{{[[]}}{{e[a-z]*}}+0x69ed4ee7{{[]]}}
}
-define float @func3(i32 %arg, float %input) {
+define internal float @func3(i32 %arg, float %input) {
entry:
switch i32 %arg, label %return [
i32 0, label %sw.bb
@@ -183,7 +183,7 @@
ret float %retval.0
}
-define <4 x i32> @func4(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @func4(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = mul <4 x i32> %a, %b
ret <4 x i32> %res
@@ -200,7 +200,7 @@
; REGALLOC-NEXT: ret
}
-define void @func5(i32 %foo, i32 %bar) {
+define internal void @func5(i32 %foo, i32 %bar) {
entry:
%r1 = icmp eq i32 %foo, %bar
br i1 %r1, label %BB1, label %BB2
@@ -223,7 +223,7 @@
; BBREORDERING: .Lfunc5$BB3
}
-define i32 @func6(i32 %arg) {
+define internal i32 @func6(i32 %arg) {
entry:
%res = add i32 200000, %arg
ret i32 %res
diff --git a/tests_lit/llvm2ice_tests/sdiv.ll b/tests_lit/llvm2ice_tests/sdiv.ll
index 43ed073..cac138e 100644
--- a/tests_lit/llvm2ice_tests/sdiv.ll
+++ b/tests_lit/llvm2ice_tests/sdiv.ll
@@ -4,7 +4,7 @@
; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 | FileCheck %s
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 | FileCheck %s
-define i32 @sdiv_i8(i32 %a.i32, i32 %b.i32) {
+define internal i32 @sdiv_i8(i32 %a.i32, i32 %b.i32) {
entry:
%a = trunc i32 %a.i32 to i8
%b = trunc i32 %b.i32 to i8
@@ -16,7 +16,7 @@
; CHECK: idiv
}
-define i32 @sdiv_i16(i32 %a.i32, i32 %b.i32) {
+define internal i32 @sdiv_i16(i32 %a.i32, i32 %b.i32) {
entry:
%a = trunc i32 %a.i32 to i16
%b = trunc i32 %b.i32 to i16
@@ -28,7 +28,7 @@
; CHECK: idiv
}
-define i32 @sdiv_i32(i32 %a, i32 %b) {
+define internal i32 @sdiv_i32(i32 %a, i32 %b) {
entry:
%res = sdiv i32 %a, %b
ret i32 %res
@@ -37,7 +37,7 @@
; CHECK: idiv
}
-define i32 @srem_i8(i32 %a.i32, i32 %b.i32) {
+define internal i32 @srem_i8(i32 %a.i32, i32 %b.i32) {
entry:
%a = trunc i32 %a.i32 to i8
%b = trunc i32 %b.i32 to i8
@@ -49,7 +49,7 @@
; CHECK: idiv
}
-define i32 @srem_i16(i32 %a.i32, i32 %b.i32) {
+define internal i32 @srem_i16(i32 %a.i32, i32 %b.i32) {
entry:
%a = trunc i32 %a.i32 to i16
%b = trunc i32 %b.i32 to i16
@@ -61,7 +61,7 @@
; CHECK: idiv
}
-define i32 @srem_i32(i32 %a, i32 %b) {
+define internal i32 @srem_i32(i32 %a, i32 %b) {
entry:
%res = srem i32 %a, %b
ret i32 %res
diff --git a/tests_lit/llvm2ice_tests/select-opt.ll b/tests_lit/llvm2ice_tests/select-opt.ll
index a34cec6..0c97edf 100644
--- a/tests_lit/llvm2ice_tests/select-opt.ll
+++ b/tests_lit/llvm2ice_tests/select-opt.ll
@@ -4,24 +4,26 @@
; match lines.
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -O2 \
+; RUN: --target x8632 -i %s --args -O2 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; RUN: %if --need=target_X8632 --command %p2i --filetype=obj --disassemble \
-; RUN: --target x8632 -i %s --args -Om1 \
+; RUN: --target x8632 -i %s --args -Om1 -allow-externally-defined-symbols \
; RUN: | %if --need=target_X8632 --command FileCheck %s
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -O2 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
; RUN: %if --need=target_ARM32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target arm32 -i %s --args -Om1 --skip-unimplemented \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
-define void @testSelect(i32 %a, i32 %b) {
+define internal void @testSelect(i32 %a, i32 %b) {
entry:
%cmp = icmp slt i32 %a, %b
%cond = select i1 %cmp, i32 %a, i32 %b
@@ -60,7 +62,7 @@
; Check for valid addressing mode in the cmp instruction when the
; operand is an immediate.
-define i32 @testSelectImm32(i32 %a, i32 %b) {
+define internal i32 @testSelectImm32(i32 %a, i32 %b) {
entry:
%cond = select i1 false, i32 %a, i32 %b
ret i32 %cond
@@ -73,7 +75,7 @@
; Check for valid addressing mode in the cmp instruction when the
; operand is an immediate. There is a different x86-32 lowering
; sequence for 64-bit operands.
-define i64 @testSelectImm64(i64 %a, i64 %b) {
+define internal i64 @testSelectImm64(i64 %a, i64 %b) {
entry:
%cond = select i1 true, i64 %a, i64 %b
ret i64 %cond
diff --git a/tests_lit/llvm2ice_tests/shift.ll b/tests_lit/llvm2ice_tests/shift.ll
index 5f8fdc9..020b1c9 100644
--- a/tests_lit/llvm2ice_tests/shift.ll
+++ b/tests_lit/llvm2ice_tests/shift.ll
@@ -29,7 +29,7 @@
@i2 = internal global [4 x i8] zeroinitializer, align 4
@u1 = internal global [4 x i8] zeroinitializer, align 4
-define void @conv1() {
+define internal void @conv1() {
entry:
%__0 = bitcast [4 x i8]* @u1 to i32*
%v0 = load i32, i32* %__0, align 1
@@ -47,7 +47,7 @@
; ARM32: lsl {{.*}}, #24
; ARM32: asr {{.*}}, #24
-define void @conv2() {
+define internal void @conv2() {
entry:
%__0 = bitcast [4 x i8]* @u1 to i32*
%v0 = load i32, i32* %__0, align 1
@@ -65,7 +65,7 @@
; ARM32: lsl {{.*}}, #16
; ARM32: lsr {{.*}}, #16
-define i32 @shlImmLarge(i32 %val) {
+define internal i32 @shlImmLarge(i32 %val) {
entry:
%result = shl i32 %val, 257
ret i32 %result
@@ -73,7 +73,7 @@
; CHECK-LABEL: shlImmLarge
; CHECK: shl {{.*}},0x1
-define i32 @shlImmNeg(i32 %val) {
+define internal i32 @shlImmNeg(i32 %val) {
entry:
%result = shl i32 %val, -1
ret i32 %result
@@ -81,7 +81,7 @@
; CHECK-LABEL: shlImmNeg
; CHECK: shl {{.*}},0xff
-define i32 @lshrImmLarge(i32 %val) {
+define internal i32 @lshrImmLarge(i32 %val) {
entry:
%result = lshr i32 %val, 257
ret i32 %result
@@ -89,7 +89,7 @@
; CHECK-LABEL: lshrImmLarge
; CHECK: shr {{.*}},0x1
-define i32 @lshrImmNeg(i32 %val) {
+define internal i32 @lshrImmNeg(i32 %val) {
entry:
%result = lshr i32 %val, -1
ret i32 %result
@@ -97,7 +97,7 @@
; CHECK-LABEL: lshrImmNeg
; CHECK: shr {{.*}},0xff
-define i32 @ashrImmLarge(i32 %val) {
+define internal i32 @ashrImmLarge(i32 %val) {
entry:
%result = ashr i32 %val, 257
ret i32 %result
@@ -105,7 +105,7 @@
; CHECK-LABEL: ashrImmLarge
; CHECK: sar {{.*}},0x1
-define i32 @ashrImmNeg(i32 %val) {
+define internal i32 @ashrImmNeg(i32 %val) {
entry:
%result = ashr i32 %val, -1
ret i32 %result
@@ -113,7 +113,7 @@
; CHECK-LABEL: ashrImmNeg
; CHECK: sar {{.*}},0xff
-define i64 @shlImm64One(i64 %val) {
+define internal i64 @shlImm64One(i64 %val) {
entry:
%result = shl i64 %val, 1
ret i64 %result
@@ -121,7 +121,7 @@
; CHECK-LABEL: shlImm64One
; CHECK: shl {{.*}},1
-define i64 @shlImm64LessThan32(i64 %val) {
+define internal i64 @shlImm64LessThan32(i64 %val) {
entry:
%result = shl i64 %val, 4
ret i64 %result
@@ -129,7 +129,7 @@
; CHECK-LABEL: shlImm64LessThan32
; CHECK: shl {{.*}},0x4
-define i64 @shlImm64Equal32(i64 %val) {
+define internal i64 @shlImm64Equal32(i64 %val) {
entry:
%result = shl i64 %val, 32
ret i64 %result
@@ -137,7 +137,7 @@
; CHECK-LABEL: shlImm64Equal32
; CHECK-NOT: shl
-define i64 @shlImm64GreaterThan32(i64 %val) {
+define internal i64 @shlImm64GreaterThan32(i64 %val) {
entry:
%result = shl i64 %val, 40
ret i64 %result
@@ -145,7 +145,7 @@
; CHECK-LABEL: shlImm64GreaterThan32
; CHECK: shl {{.*}},0x8
-define i64 @lshrImm64One(i64 %val) {
+define internal i64 @lshrImm64One(i64 %val) {
entry:
%result = lshr i64 %val, 1
ret i64 %result
@@ -153,7 +153,7 @@
; CHECK-LABEL: lshrImm64One
; CHECK: shr {{.*}},1
-define i64 @lshrImm64LessThan32(i64 %val) {
+define internal i64 @lshrImm64LessThan32(i64 %val) {
entry:
%result = lshr i64 %val, 4
ret i64 %result
@@ -162,7 +162,7 @@
; CHECK: shrd {{.*}},0x4
; CHECK: shr {{.*}},0x4
-define i64 @lshrImm64Equal32(i64 %val) {
+define internal i64 @lshrImm64Equal32(i64 %val) {
entry:
%result = lshr i64 %val, 32
ret i64 %result
@@ -170,7 +170,7 @@
; CHECK-LABEL: lshrImm64Equal32
; CHECK-NOT: shr
-define i64 @lshrImm64GreaterThan32(i64 %val) {
+define internal i64 @lshrImm64GreaterThan32(i64 %val) {
entry:
%result = lshr i64 %val, 40
ret i64 %result
@@ -179,7 +179,7 @@
; CHECK-NOT: shrd
; CHECK: shr {{.*}},0x8
-define i64 @ashrImm64One(i64 %val) {
+define internal i64 @ashrImm64One(i64 %val) {
entry:
%result = ashr i64 %val, 1
ret i64 %result
@@ -188,7 +188,7 @@
; CHECK: shrd {{.*}},0x1
; CHECK: sar {{.*}},1
-define i64 @ashrImm64LessThan32(i64 %val) {
+define internal i64 @ashrImm64LessThan32(i64 %val) {
entry:
%result = ashr i64 %val, 4
ret i64 %result
@@ -197,7 +197,7 @@
; CHECK: shrd {{.*}},0x4
; CHECK: sar {{.*}},0x4
-define i64 @ashrImm64Equal32(i64 %val) {
+define internal i64 @ashrImm64Equal32(i64 %val) {
entry:
%result = ashr i64 %val, 32
ret i64 %result
@@ -206,7 +206,7 @@
; CHECK: sar {{.*}},0x1f
; CHECK-NOT: shrd
-define i64 @ashrImm64GreaterThan32(i64 %val) {
+define internal i64 @ashrImm64GreaterThan32(i64 %val) {
entry:
%result = ashr i64 %val, 40
ret i64 %result
diff --git a/tests_lit/llvm2ice_tests/simple-loop.ll b/tests_lit/llvm2ice_tests/simple-loop.ll
index 6d114fa..95cb9b8 100644
--- a/tests_lit/llvm2ice_tests/simple-loop.ll
+++ b/tests_lit/llvm2ice_tests/simple-loop.ll
@@ -6,7 +6,7 @@
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
; RUN: | FileCheck --check-prefix=OPTM1 %s
-define i32 @simple_loop(i32 %a, i32 %n) {
+define internal i32 @simple_loop(i32 %a, i32 %n) {
entry:
%cmp4 = icmp sgt i32 %n, 0
br i1 %cmp4, label %for.body, label %for.end
diff --git a/tests_lit/llvm2ice_tests/square.ll b/tests_lit/llvm2ice_tests/square.ll
index 18c5d58..50fa9e2 100644
--- a/tests_lit/llvm2ice_tests/square.ll
+++ b/tests_lit/llvm2ice_tests/square.ll
@@ -9,7 +9,7 @@
; RUN: --target x8632 -i %s --args -Om1 -mattr=sse4.1 \
; RUN: | %if --need=target_X8632 --command FileCheck %s
-define float @Square_float(float %a) {
+define internal float @Square_float(float %a) {
entry:
%result = fmul float %a, %a
ret float %result
@@ -17,7 +17,7 @@
; CHECK-LABEL: Square_float
; CHECK: mulss [[REG:xmm.]],[[REG]]
-define double @Square_double(double %a) {
+define internal double @Square_double(double %a) {
entry:
%result = fmul double %a, %a
ret double %result
@@ -25,7 +25,7 @@
; CHECK-LABEL: Square_double
; CHECK: mulsd [[REG:xmm.]],[[REG]]
-define i32 @Square_i32(i32 %a) {
+define internal i32 @Square_i32(i32 %a) {
entry:
%result = mul i32 %a, %a
ret i32 %result
@@ -33,7 +33,7 @@
; CHECK-LABEL: Square_i32
; CHECK: imul [[REG:e..]],[[REG]]
-define i16 @Square_i16(i16 %a) {
+define internal i16 @Square_i16(i16 %a) {
entry:
%result = mul i16 %a, %a
ret i16 %result
@@ -41,7 +41,7 @@
; CHECK-LABEL: Square_i16
; CHECK: imul [[REG:..]],[[REG]]
-define i8 @Square_i8(i8 %a) {
+define internal i8 @Square_i8(i8 %a) {
entry:
%result = mul i8 %a, %a
ret i8 %result
@@ -49,7 +49,7 @@
; CHECK-LABEL: Square_i8
; CHECK: imul al
-define <4 x float> @Square_v4f32(<4 x float> %a) {
+define internal <4 x float> @Square_v4f32(<4 x float> %a) {
entry:
%result = fmul <4 x float> %a, %a
ret <4 x float> %result
@@ -57,7 +57,7 @@
; CHECK-LABEL: Square_v4f32
; CHECK: mulps [[REG:xmm.]],[[REG]]
-define <4 x i32> @Square_v4i32(<4 x i32> %a) {
+define internal <4 x i32> @Square_v4i32(<4 x i32> %a) {
entry:
%result = mul <4 x i32> %a, %a
ret <4 x i32> %result
@@ -65,7 +65,7 @@
; CHECK-LABEL: Square_v4i32
; CHECK: pmulld [[REG:xmm.]],[[REG]]
-define <8 x i16> @Square_v8i16(<8 x i16> %a) {
+define internal <8 x i16> @Square_v8i16(<8 x i16> %a) {
entry:
%result = mul <8 x i16> %a, %a
ret <8 x i16> %result
@@ -73,7 +73,7 @@
; CHECK-LABEL: Square_v8i16
; CHECK: pmullw [[REG:xmm.]],[[REG]]
-define <16 x i8> @Square_v16i8(<16 x i8> %a) {
+define internal <16 x i8> @Square_v16i8(<16 x i8> %a) {
entry:
%result = mul <16 x i8> %a, %a
ret <16 x i8> %result
diff --git a/tests_lit/llvm2ice_tests/store.ll b/tests_lit/llvm2ice_tests/store.ll
index ec85020..94931fb 100644
--- a/tests_lit/llvm2ice_tests/store.ll
+++ b/tests_lit/llvm2ice_tests/store.ll
@@ -4,7 +4,7 @@
; RUN: %p2i -i %s --args --verbose inst -threads=0 | FileCheck %s
-define void @store_i64(i32 %addr_arg) {
+define internal void @store_i64(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i64*
store i64 1, i64* %__1, align 1
@@ -16,7 +16,7 @@
; CHECK-NEXT: ret void
}
-define void @store_i32(i32 %addr_arg) {
+define internal void @store_i32(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i32*
store i32 1, i32* %__1, align 1
@@ -28,7 +28,7 @@
; CHECK-NEXT: ret void
}
-define void @store_i16(i32 %addr_arg) {
+define internal void @store_i16(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i16*
store i16 1, i16* %__1, align 1
@@ -40,7 +40,7 @@
; CHECK-NEXT: ret void
}
-define void @store_i8(i32 %addr_arg) {
+define internal void @store_i8(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i8*
store i8 1, i8* %__1, align 1
diff --git a/tests_lit/llvm2ice_tests/switch-opt.ll b/tests_lit/llvm2ice_tests/switch-opt.ll
index ea552b8..38d0962 100644
--- a/tests_lit/llvm2ice_tests/switch-opt.ll
+++ b/tests_lit/llvm2ice_tests/switch-opt.ll
@@ -11,7 +11,7 @@
; RUN: | %if --need=target_ARM32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix ARM32 %s
-define i32 @testSwitch(i32 %a) {
+define internal i32 @testSwitch(i32 %a) {
entry:
switch i32 %a, label %sw.default [
i32 1, label %sw.epilog
@@ -44,7 +44,7 @@
; immediate. It's important that there is exactly one case, because
; for two or more cases the source operand is legalized into a
; register.
-define i32 @testSwitchImm() {
+define internal i32 @testSwitchImm() {
entry:
switch i32 10, label %sw.default [
i32 1, label %sw.default
@@ -105,7 +105,7 @@
; Similar to testSwitchImm, make sure proper addressing modes are
; used. In reality, this is tested by running the output through the
; assembler.
-define i32 @testSwitchImm64() {
+define internal i32 @testSwitchImm64() {
entry:
switch i64 10, label %sw.default [
i64 1, label %sw.default
@@ -120,7 +120,7 @@
; ARM32-NEXT: beq [[ADDR:[0-9a-f]+]]
; ARM32-NEXT: b [[ADDR]]
-define i32 @testSwitchUndef64() {
+define internal i32 @testSwitchUndef64() {
entry:
switch i64 undef, label %sw.default [
i64 1, label %sw.default
diff --git a/tests_lit/llvm2ice_tests/undef.ll b/tests_lit/llvm2ice_tests/undef.ll
index d5823a9..0e1ff05 100644
--- a/tests_lit/llvm2ice_tests/undef.ll
+++ b/tests_lit/llvm2ice_tests/undef.ll
@@ -9,14 +9,14 @@
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 -mattr=sse4.1 \
; RUN: | FileCheck %s
-define i32 @undef_i32() {
+define internal i32 @undef_i32() {
entry:
ret i32 undef
; CHECK-LABEL: undef_i32
; CHECK: mov eax,0x0
}
-define i64 @undef_i64() {
+define internal i64 @undef_i64() {
entry:
ret i64 undef
; CHECK-LABEL: undef_i64
@@ -25,7 +25,7 @@
; CHECK: ret
}
-define i32 @trunc_undef_i64() {
+define internal i32 @trunc_undef_i64() {
entry:
%ret = trunc i64 undef to i32
ret i32 %ret
@@ -34,63 +34,63 @@
; CHECK: ret
}
-define float @undef_float() {
+define internal float @undef_float() {
entry:
ret float undef
; CHECK-LABEL: undef_float
; CHECK: fld DWORD PTR {{.*}} .L$float$00000000
}
-define <4 x i1> @undef_v4i1() {
+define internal <4 x i1> @undef_v4i1() {
entry:
ret <4 x i1> undef
; CHECK-LABEL: undef_v4i1
; CHECK: pxor
}
-define <8 x i1> @undef_v8i1() {
+define internal <8 x i1> @undef_v8i1() {
entry:
ret <8 x i1> undef
; CHECK-LABEL: undef_v8i1
; CHECK: pxor
}
-define <16 x i1> @undef_v16i1() {
+define internal <16 x i1> @undef_v16i1() {
entry:
ret <16 x i1> undef
; CHECK-LABEL: undef_v16i1
; CHECK: pxor
}
-define <16 x i8> @undef_v16i8() {
+define internal <16 x i8> @undef_v16i8() {
entry:
ret <16 x i8> undef
; CHECK-LABEL: undef_v16i8
; CHECK: pxor
}
-define <8 x i16> @undef_v8i16() {
+define internal <8 x i16> @undef_v8i16() {
entry:
ret <8 x i16> undef
; CHECK-LABEL: undef_v8i16
; CHECK: pxor
}
-define <4 x i32> @undef_v4i32() {
+define internal <4 x i32> @undef_v4i32() {
entry:
ret <4 x i32> undef
; CHECK-LABEL: undef_v4i32
; CHECK: pxor
}
-define <4 x float> @undef_v4f32() {
+define internal <4 x float> @undef_v4f32() {
entry:
ret <4 x float> undef
; CHECK-LABEL: undef_v4f32
; CHECK: pxor
}
-define <4 x i32> @vector_arith(<4 x i32> %arg) {
+define internal <4 x i32> @vector_arith(<4 x i32> %arg) {
entry:
%val = add <4 x i32> undef, %arg
ret <4 x i32> %val
@@ -98,7 +98,7 @@
; CHECK: pxor
}
-define <4 x float> @vector_bitcast() {
+define internal <4 x float> @vector_bitcast() {
entry:
%val = bitcast <4 x i32> undef to <4 x float>
ret <4 x float> %val
@@ -106,7 +106,7 @@
; CHECK: pxor
}
-define <4 x i32> @vector_sext() {
+define internal <4 x i32> @vector_sext() {
entry:
%val = sext <4 x i1> undef to <4 x i32>
ret <4 x i32> %val
@@ -114,7 +114,7 @@
; CHECK: pxor
}
-define <4 x i32> @vector_zext() {
+define internal <4 x i32> @vector_zext() {
entry:
%val = zext <4 x i1> undef to <4 x i32>
ret <4 x i32> %val
@@ -122,7 +122,7 @@
; CHECK: pxor
}
-define <4 x i1> @vector_trunc() {
+define internal <4 x i1> @vector_trunc() {
entry:
%val = trunc <4 x i32> undef to <4 x i1>
ret <4 x i1> %val
@@ -130,7 +130,7 @@
; CHECK: pxor
}
-define <4 x i1> @vector_icmp(<4 x i32> %arg) {
+define internal <4 x i1> @vector_icmp(<4 x i32> %arg) {
entry:
%val = icmp eq <4 x i32> undef, %arg
ret <4 x i1> %val
@@ -138,7 +138,7 @@
; CHECK: pxor
}
-define <4 x i1> @vector_fcmp(<4 x float> %arg) {
+define internal <4 x i1> @vector_fcmp(<4 x float> %arg) {
entry:
%val = fcmp ueq <4 x float> undef, %arg
ret <4 x i1> %val
@@ -146,7 +146,7 @@
; CHECK: pxor
}
-define <4 x i32> @vector_fptosi() {
+define internal <4 x i32> @vector_fptosi() {
entry:
%val = fptosi <4 x float> undef to <4 x i32>
ret <4 x i32> %val
@@ -154,7 +154,7 @@
; CHECK: pxor
}
-define <4 x i32> @vector_fptoui() {
+define internal <4 x i32> @vector_fptoui() {
entry:
%val = fptoui <4 x float> undef to <4 x i32>
ret <4 x i32> %val
@@ -162,7 +162,7 @@
; CHECK: pxor
}
-define <4 x float> @vector_sitofp() {
+define internal <4 x float> @vector_sitofp() {
entry:
%val = sitofp <4 x i32> undef to <4 x float>
ret <4 x float> %val
@@ -170,7 +170,7 @@
; CHECK: pxor
}
-define <4 x float> @vector_uitofp() {
+define internal <4 x float> @vector_uitofp() {
entry:
%val = uitofp <4 x i32> undef to <4 x float>
ret <4 x float> %val
@@ -178,7 +178,7 @@
; CHECK: pxor
}
-define <4 x float> @vector_insertelement_arg1() {
+define internal <4 x float> @vector_insertelement_arg1() {
entry:
%val = insertelement <4 x float> undef, float 1.0, i32 0
ret <4 x float> %val
@@ -186,7 +186,7 @@
; CHECK: pxor
}
-define <4 x float> @vector_insertelement_arg2(<4 x float> %arg) {
+define internal <4 x float> @vector_insertelement_arg2(<4 x float> %arg) {
entry:
%val = insertelement <4 x float> %arg, float undef, i32 0
ret <4 x float> %val
@@ -194,7 +194,7 @@
; CHECK: {{movss|insertps}} {{.*}},DWORD PTR {{.*}} .L$float$00000000
}
-define float @vector_extractelement_v4f32_index_0() {
+define internal float @vector_extractelement_v4f32_index_0() {
entry:
%val = extractelement <4 x float> undef, i32 0
ret float %val
@@ -202,7 +202,7 @@
; CHECK: pxor
}
-define float @vector_extractelement_v4f32_index_1() {
+define internal float @vector_extractelement_v4f32_index_1() {
entry:
%val = extractelement <4 x float> undef, i32 1
ret float %val
@@ -210,7 +210,7 @@
; CHECK: pxor
}
-define i32 @vector_extractelement_v16i1_index_7() {
+define internal i32 @vector_extractelement_v16i1_index_7() {
entry:
%val.trunc = extractelement <16 x i1> undef, i32 7
%val = sext i1 %val.trunc to i32
@@ -219,7 +219,8 @@
; CHECK: pxor
}
-define <4 x i32> @vector_select_v4i32_cond(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @vector_select_v4i32_cond(<4 x i32> %a,
+ <4 x i32> %b) {
entry:
%val = select <4 x i1> undef, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %val
@@ -227,7 +228,8 @@
; CHECK: pxor
}
-define <4 x i32> @vector_select_v4i32_arg1(<4 x i1> %cond, <4 x i32> %b) {
+define internal <4 x i32> @vector_select_v4i32_arg1(<4 x i1> %cond,
+ <4 x i32> %b) {
entry:
%val = select <4 x i1> %cond, <4 x i32> undef, <4 x i32> %b
ret <4 x i32> %val
@@ -235,7 +237,8 @@
; CHECK: pxor
}
-define <4 x i32> @vector_select_v4i32_arg2(<4 x i1> %cond, <4 x i32> %a) {
+define internal <4 x i32> @vector_select_v4i32_arg2(<4 x i1> %cond,
+ <4 x i32> %a) {
entry:
%val = select <4 x i1> %cond, <4 x i32> %a, <4 x i32> undef
ret <4 x i32> %val
@@ -243,7 +246,8 @@
; CHECK: pxor
}
-define <4 x i1> @vector_select_v4i1_cond(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @vector_select_v4i1_cond(<4 x i1> %a,
+ <4 x i1> %b) {
entry:
%val = select <4 x i1> undef, <4 x i1> %a, <4 x i1> %b
ret <4 x i1> %val
@@ -251,7 +255,8 @@
; CHECK: pxor
}
-define <4 x i1> @vector_select_v4i1_arg1(<4 x i1> %cond, <4 x i1> %b) {
+define internal <4 x i1> @vector_select_v4i1_arg1(<4 x i1> %cond,
+ <4 x i1> %b) {
entry:
%val = select <4 x i1> %cond, <4 x i1> undef, <4 x i1> %b
ret <4 x i1> %val
@@ -259,7 +264,8 @@
; CHECK: pxor
}
-define <4 x i1> @vector_select_v4i1_arg2(<4 x i1> %cond, <4 x i1> %a) {
+define internal <4 x i1> @vector_select_v4i1_arg2(<4 x i1> %cond,
+ <4 x i1> %a) {
entry:
%val = select <4 x i1> %cond, <4 x i1> %a, <4 x i1> undef
ret <4 x i1> %val
@@ -267,7 +273,8 @@
; CHECK: pxor
}
-define <4 x float> @vector_select_v4f32_cond(<4 x float> %a, <4 x float> %b) {
+define internal <4 x float> @vector_select_v4f32_cond(<4 x float> %a,
+ <4 x float> %b) {
entry:
%val = select <4 x i1> undef, <4 x float> %a, <4 x float> %b
ret <4 x float> %val
@@ -275,7 +282,8 @@
; CHECK: pxor
}
-define <4 x float> @vector_select_v4f32_arg1(<4 x i1> %cond, <4 x float> %b) {
+define internal <4 x float> @vector_select_v4f32_arg1(<4 x i1> %cond,
+ <4 x float> %b) {
entry:
%val = select <4 x i1> %cond, <4 x float> undef, <4 x float> %b
ret <4 x float> %val
@@ -283,7 +291,8 @@
; CHECK: pxor
}
-define <4 x float> @vector_select_v4f32_arg2(<4 x i1> %cond, <4 x float> %a) {
+define internal <4 x float> @vector_select_v4f32_arg2(<4 x i1> %cond,
+ <4 x float> %a) {
entry:
%val = select <4 x i1> %cond, <4 x float> %a, <4 x float> undef
ret <4 x float> %val
diff --git a/tests_lit/llvm2ice_tests/vector-align.ll b/tests_lit/llvm2ice_tests/vector-align.ll
index 85f09fd..7173140 100644
--- a/tests_lit/llvm2ice_tests/vector-align.ll
+++ b/tests_lit/llvm2ice_tests/vector-align.ll
@@ -7,7 +7,7 @@
; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 | FileCheck %s
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 | FileCheck %s
-define <4 x i32> @test_add(i32 %addr_i, <4 x i32> %addend) {
+define internal <4 x i32> @test_add(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
%loaded = load <4 x i32>, <4 x i32>* %addr, align 4
@@ -18,7 +18,7 @@
; CHECK-NOT: paddd xmm{{.}},XMMWORD PTR [e{{ax|cx|dx|di|si|bx|bp}}
; CHECK: paddd xmm{{.}},
-define <4 x i32> @test_and(i32 %addr_i, <4 x i32> %addend) {
+define internal <4 x i32> @test_and(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
%loaded = load <4 x i32>, <4 x i32>* %addr, align 4
@@ -29,7 +29,7 @@
; CHECK-NOT: pand xmm{{.}},XMMWORD PTR [e{{ax|cx|dx|di|si|bx|bp}}
; CHECK: pand xmm{{.}},
-define <4 x i32> @test_or(i32 %addr_i, <4 x i32> %addend) {
+define internal <4 x i32> @test_or(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
%loaded = load <4 x i32>, <4 x i32>* %addr, align 4
@@ -40,7 +40,7 @@
; CHECK-NOT: por xmm{{.}},XMMWORD PTR [e{{ax|cx|dx|di|si|bx|bp}}
; CHECK: por xmm{{.}},
-define <4 x i32> @test_xor(i32 %addr_i, <4 x i32> %addend) {
+define internal <4 x i32> @test_xor(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
%loaded = load <4 x i32>, <4 x i32>* %addr, align 4
@@ -51,7 +51,7 @@
; CHECK-NOT: pxor xmm{{.}},XMMWORD PTR [e{{ax|cx|dx|di|si|bx|bp}}
; CHECK: pxor xmm{{.}},
-define <4 x i32> @test_sub(i32 %addr_i, <4 x i32> %addend) {
+define internal <4 x i32> @test_sub(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
%loaded = load <4 x i32>, <4 x i32>* %addr, align 4
@@ -62,7 +62,7 @@
; CHECK-NOT: psubd xmm{{.}},XMMWORD PTR [e{{ax|cx|dx|di|si|bx|bp}}
; CHECK: psubd xmm{{.}},
-define <4 x float> @test_fadd(i32 %addr_i, <4 x float> %addend) {
+define internal <4 x float> @test_fadd(i32 %addr_i, <4 x float> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x float>*
%loaded = load <4 x float>, <4 x float>* %addr, align 4
@@ -73,7 +73,7 @@
; CHECK-NOT: addps xmm{{.}},XMMWORD PTR [e{{ax|cx|dx|di|si|bx|bp}}
; CHECK: addps xmm{{.}},
-define <4 x float> @test_fsub(i32 %addr_i, <4 x float> %addend) {
+define internal <4 x float> @test_fsub(i32 %addr_i, <4 x float> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x float>*
%loaded = load <4 x float>, <4 x float>* %addr, align 4
diff --git a/tests_lit/llvm2ice_tests/vector-arg.ll b/tests_lit/llvm2ice_tests/vector-arg.ll
index 97b59e2..3d0e2c7 100644
--- a/tests_lit/llvm2ice_tests/vector-arg.ll
+++ b/tests_lit/llvm2ice_tests/vector-arg.ll
@@ -2,14 +2,16 @@
; calling convention for vectors.
; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
-; RUN: | FileCheck %s
+; RUN: -allow-externally-defined-symbols | FileCheck %s
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
-; RUN: | FileCheck --check-prefix=OPTM1 %s
+; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=OPTM1 %s
; The first five functions test that vectors are moved from their
; correct argument location to xmm0.
-define <4 x float> @test_returning_arg0(<4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3, <4 x float> %arg4, <4 x float> %arg5) {
+define internal <4 x float> @test_returning_arg0(
+ <4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3,
+ <4 x float> %arg4, <4 x float> %arg5) {
entry:
ret <4 x float> %arg0
; CHECK-LABEL: test_returning_arg0
@@ -22,7 +24,9 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_arg1(<4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3, <4 x float> %arg4, <4 x float> %arg5) {
+define internal <4 x float> @test_returning_arg1(
+ <4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3,
+ <4 x float> %arg4, <4 x float> %arg5) {
entry:
ret <4 x float> %arg1
; CHECK-LABEL: test_returning_arg1
@@ -35,7 +39,9 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_arg2(<4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3, <4 x float> %arg4, <4 x float> %arg5) {
+define internal <4 x float> @test_returning_arg2(
+ <4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3,
+ <4 x float> %arg4, <4 x float> %arg5) {
entry:
ret <4 x float> %arg2
; CHECK-LABEL: test_returning_arg2
@@ -48,7 +54,9 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_arg3(<4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3, <4 x float> %arg4, <4 x float> %arg5) {
+define internal <4 x float> @test_returning_arg3(
+ <4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3,
+ <4 x float> %arg4, <4 x float> %arg5) {
entry:
ret <4 x float> %arg3
; CHECK-LABEL: test_returning_arg3
@@ -61,7 +69,9 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_arg4(<4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3, <4 x float> %arg4, <4 x float> %arg5) {
+define internal <4 x float> @test_returning_arg4(
+ <4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3,
+ <4 x float> %arg4, <4 x float> %arg5) {
entry:
ret <4 x float> %arg4
; CHECK-LABEL: test_returning_arg4
@@ -77,7 +87,11 @@
; correctly when interspersed with stack arguments in the argument
; list.
-define <4 x float> @test_returning_interspersed_arg0(i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1, i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3, i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4, <4 x float> %arg5, float %floatarg1) {
+define internal <4 x float> @test_returning_interspersed_arg0(
+ i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1,
+ i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3,
+ i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4,
+ <4 x float> %arg5, float %floatarg1) {
entry:
ret <4 x float> %arg0
; CHECK-LABEL: test_returning_interspersed_arg0
@@ -90,7 +104,11 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_interspersed_arg1(i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1, i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3, i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4, <4 x float> %arg5, float %floatarg1) {
+define internal <4 x float> @test_returning_interspersed_arg1(
+ i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1,
+ i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3,
+ i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4,
+ <4 x float> %arg5, float %floatarg1) {
entry:
ret <4 x float> %arg1
; CHECK-LABEL: test_returning_interspersed_arg1
@@ -103,7 +121,11 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_interspersed_arg2(i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1, i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3, i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4, <4 x float> %arg5, float %floatarg1) {
+define internal <4 x float> @test_returning_interspersed_arg2(
+ i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1,
+ i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3,
+ i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4,
+ <4 x float> %arg5, float %floatarg1) {
entry:
ret <4 x float> %arg2
; CHECK-LABEL: test_returning_interspersed_arg2
@@ -116,7 +138,11 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_interspersed_arg3(i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1, i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3, i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4, <4 x float> %arg5, float %floatarg1) {
+define internal <4 x float> @test_returning_interspersed_arg3(
+ i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1,
+ i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3,
+ i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4,
+ <4 x float> %arg5, float %floatarg1) {
entry:
ret <4 x float> %arg3
; CHECK-LABEL: test_returning_interspersed_arg3
@@ -129,7 +155,11 @@
; OPTM1: ret
}
-define <4 x float> @test_returning_interspersed_arg4(i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1, i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3, i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4, <4 x float> %arg5, float %floatarg1) {
+define internal <4 x float> @test_returning_interspersed_arg4(
+ i32 %i32arg0, double %doublearg0, <4 x float> %arg0, <4 x float> %arg1,
+ i32 %i32arg1, <4 x float> %arg2, double %doublearg1, <4 x float> %arg3,
+ i32 %i32arg2, double %doublearg2, float %floatarg0, <4 x float> %arg4,
+ <4 x float> %arg5, float %floatarg1) {
entry:
ret <4 x float> %arg4
; CHECK-LABEL: test_returning_interspersed_arg4
@@ -143,16 +173,21 @@
; Test that vectors are passed correctly as arguments to a function.
-declare void @VectorArgs(<4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>)
+declare void @VectorArgs(<4 x float>, <4 x float>, <4 x float>, <4 x float>,
+ <4 x float>, <4 x float>)
declare void @killXmmRegisters()
-define void @test_passing_vectors(<4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3, <4 x float> %arg4, <4 x float> %arg5, <4 x float> %arg6, <4 x float> %arg7, <4 x float> %arg8, <4 x float> %arg9) {
+define internal void @test_passing_vectors(
+ <4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3,
+ <4 x float> %arg4, <4 x float> %arg5, <4 x float> %arg6, <4 x float> %arg7,
+ <4 x float> %arg8, <4 x float> %arg9) {
entry:
; Kills XMM registers so that no in-arg lowering code interferes
; with the test.
call void @killXmmRegisters()
- call void @VectorArgs(<4 x float> %arg9, <4 x float> %arg8, <4 x float> %arg7, <4 x float> %arg6, <4 x float> %arg5, <4 x float> %arg4)
+ call void @VectorArgs(<4 x float> %arg9, <4 x float> %arg8, <4 x float> %arg7,
+ <4 x float> %arg6, <4 x float> %arg5, <4 x float> %arg4)
ret void
; CHECK-LABEL: test_passing_vectors
; CHECK: sub esp,0x20
@@ -181,14 +216,22 @@
; OPTM1-NEXT: add esp,0x20
}
-declare void @InterspersedVectorArgs(<4 x float>, i64, <4 x float>, i64, <4 x float>, float, <4 x float>, double, <4 x float>, i32, <4 x float>)
+declare void @InterspersedVectorArgs(
+ <4 x float>, i64, <4 x float>, i64, <4 x float>, float, <4 x float>,
+ double, <4 x float>, i32, <4 x float>)
-define void @test_passing_vectors_interspersed(<4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3, <4 x float> %arg4, <4 x float> %arg5, <4 x float> %arg6, <4 x float> %arg7, <4 x float> %arg8, <4 x float> %arg9) {
+define internal void @test_passing_vectors_interspersed(
+ <4 x float> %arg0, <4 x float> %arg1, <4 x float> %arg2, <4 x float> %arg3,
+ <4 x float> %arg4, <4 x float> %arg5, <4 x float> %arg6, <4 x float> %arg7,
+ <4 x float> %arg8, <4 x float> %arg9) {
entry:
; Kills XMM registers so that no in-arg lowering code interferes
; with the test.
call void @killXmmRegisters()
- call void @InterspersedVectorArgs(<4 x float> %arg9, i64 0, <4 x float> %arg8, i64 1, <4 x float> %arg7, float 2.000000e+00, <4 x float> %arg6, double 3.000000e+00, <4 x float> %arg5, i32 4, <4 x float> %arg4)
+ call void @InterspersedVectorArgs(<4 x float> %arg9, i64 0, <4 x float> %arg8,
+ i64 1, <4 x float> %arg7, float 2.000000e+00,
+ <4 x float> %arg6, double 3.000000e+00,
+ <4 x float> %arg5, i32 4, <4 x float> %arg4)
ret void
; CHECK-LABEL: test_passing_vectors_interspersed
; CHECK: sub esp,0x50
@@ -224,7 +267,7 @@
declare <4 x float> @VectorReturn(<4 x float> %arg0)
-define void @test_receiving_vectors(<4 x float> %arg0) {
+define internal void @test_receiving_vectors(<4 x float> %arg0) {
entry:
%result = call <4 x float> @VectorReturn(<4 x float> %arg0)
%result2 = call <4 x float> @VectorReturn(<4 x float> %result)
diff --git a/tests_lit/llvm2ice_tests/vector-arith.ll b/tests_lit/llvm2ice_tests/vector-arith.ll
index 69131df..abf09e2 100644
--- a/tests_lit/llvm2ice_tests/vector-arith.ll
+++ b/tests_lit/llvm2ice_tests/vector-arith.ll
@@ -9,7 +9,7 @@
; RUN: %p2i -i %s --filetype=obj --disassemble -a -Om1 -mattr=sse4.1 \
; RUN: | FileCheck --check-prefix=SSE41 %s
-define <4 x float> @test_fadd(<4 x float> %arg0, <4 x float> %arg1) {
+define internal <4 x float> @test_fadd(<4 x float> %arg0, <4 x float> %arg1) {
entry:
%res = fadd <4 x float> %arg0, %arg1
ret <4 x float> %res
@@ -17,7 +17,7 @@
; CHECK: addps
}
-define <4 x float> @test_fsub(<4 x float> %arg0, <4 x float> %arg1) {
+define internal <4 x float> @test_fsub(<4 x float> %arg0, <4 x float> %arg1) {
entry:
%res = fsub <4 x float> %arg0, %arg1
ret <4 x float> %res
@@ -25,7 +25,7 @@
; CHECK: subps
}
-define <4 x float> @test_fmul(<4 x float> %arg0, <4 x float> %arg1) {
+define internal <4 x float> @test_fmul(<4 x float> %arg0, <4 x float> %arg1) {
entry:
%res = fmul <4 x float> %arg0, %arg1
ret <4 x float> %res
@@ -33,7 +33,7 @@
; CHECK: mulps
}
-define <4 x float> @test_fdiv(<4 x float> %arg0, <4 x float> %arg1) {
+define internal <4 x float> @test_fdiv(<4 x float> %arg0, <4 x float> %arg1) {
entry:
%res = fdiv <4 x float> %arg0, %arg1
ret <4 x float> %res
@@ -41,7 +41,7 @@
; CHECK: divps
}
-define <4 x float> @test_frem(<4 x float> %arg0, <4 x float> %arg1) {
+define internal <4 x float> @test_frem(<4 x float> %arg0, <4 x float> %arg1) {
entry:
%res = frem <4 x float> %arg0, %arg1
ret <4 x float> %res
@@ -52,7 +52,7 @@
; CHECK: fmodf
}
-define <16 x i8> @test_add_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_add_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = add <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -60,7 +60,7 @@
; CHECK: paddb
}
-define <16 x i8> @test_and_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_and_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = and <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -68,7 +68,7 @@
; CHECK: pand
}
-define <16 x i8> @test_or_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_or_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = or <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -76,7 +76,7 @@
; CHECK: por
}
-define <16 x i8> @test_xor_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_xor_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = xor <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -84,7 +84,7 @@
; CHECK: pxor
}
-define <16 x i8> @test_sub_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_sub_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = sub <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -92,7 +92,7 @@
; CHECK: psubb
}
-define <16 x i8> @test_mul_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_mul_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = mul <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -115,7 +115,7 @@
; CHECK: imul
}
-define <16 x i8> @test_shl_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_shl_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = shl <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -138,7 +138,7 @@
; CHECK: shl
}
-define <16 x i8> @test_lshr_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_lshr_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = lshr <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -161,7 +161,7 @@
; CHECK: shr
}
-define <16 x i8> @test_ashr_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_ashr_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = ashr <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -184,7 +184,7 @@
; CHECK: sar
}
-define <16 x i8> @test_udiv_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_udiv_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = udiv <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -207,7 +207,7 @@
; CHECK: div
}
-define <16 x i8> @test_sdiv_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_sdiv_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = sdiv <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -230,7 +230,7 @@
; CHECK: idiv
}
-define <16 x i8> @test_urem_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_urem_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = urem <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -253,7 +253,7 @@
; CHECK: div
}
-define <16 x i8> @test_srem_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
+define internal <16 x i8> @test_srem_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
entry:
%res = srem <16 x i8> %arg0, %arg1
ret <16 x i8> %res
@@ -276,7 +276,7 @@
; CHECK: idiv
}
-define <8 x i16> @test_add_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_add_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = add <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -284,7 +284,7 @@
; CHECK: paddw
}
-define <8 x i16> @test_and_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_and_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = and <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -292,7 +292,7 @@
; CHECK: pand
}
-define <8 x i16> @test_or_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_or_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = or <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -300,7 +300,7 @@
; CHECK: por
}
-define <8 x i16> @test_xor_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_xor_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = xor <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -308,7 +308,7 @@
; CHECK: pxor
}
-define <8 x i16> @test_sub_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_sub_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = sub <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -316,7 +316,7 @@
; CHECK: psubw
}
-define <8 x i16> @test_mul_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_mul_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = mul <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -324,7 +324,7 @@
; CHECK: pmullw
}
-define <8 x i16> @test_shl_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_shl_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = shl <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -339,7 +339,7 @@
; CHECK: shl
}
-define <8 x i16> @test_lshr_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_lshr_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = lshr <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -354,7 +354,7 @@
; CHECK: shr
}
-define <8 x i16> @test_ashr_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_ashr_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = ashr <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -369,7 +369,7 @@
; CHECK: sar
}
-define <8 x i16> @test_udiv_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_udiv_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = udiv <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -384,7 +384,7 @@
; CHECK: div
}
-define <8 x i16> @test_sdiv_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_sdiv_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = sdiv <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -399,7 +399,7 @@
; CHECK: idiv
}
-define <8 x i16> @test_urem_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_urem_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = urem <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -414,7 +414,7 @@
; CHECK: div
}
-define <8 x i16> @test_srem_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
+define internal <8 x i16> @test_srem_v8i16(<8 x i16> %arg0, <8 x i16> %arg1) {
entry:
%res = srem <8 x i16> %arg0, %arg1
ret <8 x i16> %res
@@ -429,7 +429,7 @@
; CHECK: idiv
}
-define <4 x i32> @test_add_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_add_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = add <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -437,7 +437,7 @@
; CHECK: paddd
}
-define <4 x i32> @test_and_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_and_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = and <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -445,7 +445,7 @@
; CHECK: pand
}
-define <4 x i32> @test_or_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_or_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = or <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -453,7 +453,7 @@
; CHECK: por
}
-define <4 x i32> @test_xor_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_xor_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = xor <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -461,7 +461,7 @@
; CHECK: pxor
}
-define <4 x i32> @test_sub_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_sub_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = sub <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -469,7 +469,7 @@
; CHECK: psubd
}
-define <4 x i32> @test_mul_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_mul_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = mul <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -481,7 +481,7 @@
; SSE41: pmulld
}
-define <4 x i32> @test_shl_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_shl_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = shl <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -495,7 +495,7 @@
; SSE41-LABEL: test_shl_v4i32
}
-define <4 x i32> @test_lshr_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_lshr_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = lshr <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -506,7 +506,7 @@
; CHECK: shr
}
-define <4 x i32> @test_ashr_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_ashr_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = ashr <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -517,7 +517,7 @@
; CHECK: sar
}
-define <4 x i32> @test_udiv_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_udiv_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = udiv <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -528,7 +528,7 @@
; CHECK: div
}
-define <4 x i32> @test_sdiv_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_sdiv_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = sdiv <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -539,7 +539,7 @@
; CHECK: idiv
}
-define <4 x i32> @test_urem_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_urem_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = urem <4 x i32> %arg0, %arg1
ret <4 x i32> %res
@@ -550,7 +550,7 @@
; CHECK: div
}
-define <4 x i32> @test_srem_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
+define internal <4 x i32> @test_srem_v4i32(<4 x i32> %arg0, <4 x i32> %arg1) {
entry:
%res = srem <4 x i32> %arg0, %arg1
ret <4 x i32> %res
diff --git a/tests_lit/llvm2ice_tests/vector-bitcast.ll b/tests_lit/llvm2ice_tests/vector-bitcast.ll
index 9901c3c..3283402 100644
--- a/tests_lit/llvm2ice_tests/vector-bitcast.ll
+++ b/tests_lit/llvm2ice_tests/vector-bitcast.ll
@@ -6,7 +6,7 @@
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
; RUN: | FileCheck --check-prefix=OPTM1 %s
-define <16 x i8> @test_bitcast_v16i8_to_v16i8(<16 x i8> %arg) {
+define internal <16 x i8> @test_bitcast_v16i8_to_v16i8(<16 x i8> %arg) {
entry:
%res = bitcast <16 x i8> %arg to <16 x i8>
ret <16 x i8> %res
@@ -15,7 +15,7 @@
; CHECK-NEXT: ret
}
-define <8 x i16> @test_bitcast_v16i8_to_v8i16(<16 x i8> %arg) {
+define internal <8 x i16> @test_bitcast_v16i8_to_v8i16(<16 x i8> %arg) {
entry:
%res = bitcast <16 x i8> %arg to <8 x i16>
ret <8 x i16> %res
@@ -24,7 +24,7 @@
; CHECK-NEXT: ret
}
-define <4 x i32> @test_bitcast_v16i8_to_v4i32(<16 x i8> %arg) {
+define internal <4 x i32> @test_bitcast_v16i8_to_v4i32(<16 x i8> %arg) {
entry:
%res = bitcast <16 x i8> %arg to <4 x i32>
ret <4 x i32> %res
@@ -33,7 +33,7 @@
; CHECK-NEXT: ret
}
-define <4 x float> @test_bitcast_v16i8_to_v4f32(<16 x i8> %arg) {
+define internal <4 x float> @test_bitcast_v16i8_to_v4f32(<16 x i8> %arg) {
entry:
%res = bitcast <16 x i8> %arg to <4 x float>
ret <4 x float> %res
@@ -42,7 +42,7 @@
; CHECK-NEXT: ret
}
-define <16 x i8> @test_bitcast_v8i16_to_v16i8(<8 x i16> %arg) {
+define internal <16 x i8> @test_bitcast_v8i16_to_v16i8(<8 x i16> %arg) {
entry:
%res = bitcast <8 x i16> %arg to <16 x i8>
ret <16 x i8> %res
@@ -51,7 +51,7 @@
; CHECK-NEXT: ret
}
-define <8 x i16> @test_bitcast_v8i16_to_v8i16(<8 x i16> %arg) {
+define internal <8 x i16> @test_bitcast_v8i16_to_v8i16(<8 x i16> %arg) {
entry:
%res = bitcast <8 x i16> %arg to <8 x i16>
ret <8 x i16> %res
@@ -60,7 +60,7 @@
; CHECK-NEXT: ret
}
-define <4 x i32> @test_bitcast_v8i16_to_v4i32(<8 x i16> %arg) {
+define internal <4 x i32> @test_bitcast_v8i16_to_v4i32(<8 x i16> %arg) {
entry:
%res = bitcast <8 x i16> %arg to <4 x i32>
ret <4 x i32> %res
@@ -69,7 +69,7 @@
; CHECK-NEXT: ret
}
-define <4 x float> @test_bitcast_v8i16_to_v4f32(<8 x i16> %arg) {
+define internal <4 x float> @test_bitcast_v8i16_to_v4f32(<8 x i16> %arg) {
entry:
%res = bitcast <8 x i16> %arg to <4 x float>
ret <4 x float> %res
@@ -78,7 +78,7 @@
; CHECK-NEXT: ret
}
-define <16 x i8> @test_bitcast_v4i32_to_v16i8(<4 x i32> %arg) {
+define internal <16 x i8> @test_bitcast_v4i32_to_v16i8(<4 x i32> %arg) {
entry:
%res = bitcast <4 x i32> %arg to <16 x i8>
ret <16 x i8> %res
@@ -87,7 +87,7 @@
; CHECK-NEXT: ret
}
-define <8 x i16> @test_bitcast_v4i32_to_v8i16(<4 x i32> %arg) {
+define internal <8 x i16> @test_bitcast_v4i32_to_v8i16(<4 x i32> %arg) {
entry:
%res = bitcast <4 x i32> %arg to <8 x i16>
ret <8 x i16> %res
@@ -96,7 +96,7 @@
; CHECK-NEXT: ret
}
-define <4 x i32> @test_bitcast_v4i32_to_v4i32(<4 x i32> %arg) {
+define internal <4 x i32> @test_bitcast_v4i32_to_v4i32(<4 x i32> %arg) {
entry:
%res = bitcast <4 x i32> %arg to <4 x i32>
ret <4 x i32> %res
@@ -105,7 +105,7 @@
; CHECK-NEXT: ret
}
-define <4 x float> @test_bitcast_v4i32_to_v4f32(<4 x i32> %arg) {
+define internal <4 x float> @test_bitcast_v4i32_to_v4f32(<4 x i32> %arg) {
entry:
%res = bitcast <4 x i32> %arg to <4 x float>
ret <4 x float> %res
@@ -114,7 +114,7 @@
; CHECK-NEXT: ret
}
-define <16 x i8> @test_bitcast_v4f32_to_v16i8(<4 x float> %arg) {
+define internal <16 x i8> @test_bitcast_v4f32_to_v16i8(<4 x float> %arg) {
entry:
%res = bitcast <4 x float> %arg to <16 x i8>
ret <16 x i8> %res
@@ -123,7 +123,7 @@
; CHECK-NEXT: ret
}
-define <8 x i16> @test_bitcast_v4f32_to_v8i16(<4 x float> %arg) {
+define internal <8 x i16> @test_bitcast_v4f32_to_v8i16(<4 x float> %arg) {
entry:
%res = bitcast <4 x float> %arg to <8 x i16>
ret <8 x i16> %res
@@ -132,7 +132,7 @@
; CHECK-NEXT: ret
}
-define <4 x i32> @test_bitcast_v4f32_to_v4i32(<4 x float> %arg) {
+define internal <4 x i32> @test_bitcast_v4f32_to_v4i32(<4 x float> %arg) {
entry:
%res = bitcast <4 x float> %arg to <4 x i32>
ret <4 x i32> %res
@@ -141,7 +141,7 @@
; CHECK-NEXT: ret
}
-define <4 x float> @test_bitcast_v4f32_to_v4f32(<4 x float> %arg) {
+define internal <4 x float> @test_bitcast_v4f32_to_v4f32(<4 x float> %arg) {
entry:
%res = bitcast <4 x float> %arg to <4 x float>
ret <4 x float> %res
@@ -150,7 +150,7 @@
; CHECK-NEXT: ret
}
-define i8 @test_bitcast_v8i1_to_i8(<8 x i1> %arg) {
+define internal i8 @test_bitcast_v8i1_to_i8(<8 x i1> %arg) {
entry:
%res = bitcast <8 x i1> %arg to i8
ret i8 %res
@@ -162,7 +162,7 @@
; OPMT1: call -4
}
-define i16 @test_bitcast_v16i1_to_i16(<16 x i1> %arg) {
+define internal i16 @test_bitcast_v16i1_to_i16(<16 x i1> %arg) {
entry:
%res = bitcast <16 x i1> %arg to i16
ret i16 %res
@@ -174,7 +174,7 @@
; OPMT1: call -4
}
-define <8 x i1> @test_bitcast_i8_to_v8i1(i32 %arg) {
+define internal <8 x i1> @test_bitcast_i8_to_v8i1(i32 %arg) {
entry:
%arg.trunc = trunc i32 %arg to i8
%res = bitcast i8 %arg.trunc to <8 x i1>
@@ -187,7 +187,7 @@
; OPTM1: call {{.*}} R_{{.*}} __Sz_bitcast_i8_8xi1
}
-define <16 x i1> @test_bitcast_i16_to_v16i1(i32 %arg) {
+define internal <16 x i1> @test_bitcast_i16_to_v16i1(i32 %arg) {
entry:
%arg.trunc = trunc i32 %arg to i16
%res = bitcast i16 %arg.trunc to <16 x i1>
diff --git a/tests_lit/llvm2ice_tests/vector-cast.ll b/tests_lit/llvm2ice_tests/vector-cast.ll
index 3dae1fc..d36b7b9 100644
--- a/tests_lit/llvm2ice_tests/vector-cast.ll
+++ b/tests_lit/llvm2ice_tests/vector-cast.ll
@@ -6,7 +6,7 @@
; sext operations
-define <16 x i8> @test_sext_v16i1_to_v16i8(<16 x i1> %arg) {
+define internal <16 x i8> @test_sext_v16i1_to_v16i8(<16 x i1> %arg) {
entry:
%res = sext <16 x i1> %arg to <16 x i8>
ret <16 x i8> %res
@@ -20,7 +20,7 @@
; CHECK: pcmpgtb
}
-define <8 x i16> @test_sext_v8i1_to_v8i16(<8 x i1> %arg) {
+define internal <8 x i16> @test_sext_v8i1_to_v8i16(<8 x i1> %arg) {
entry:
%res = sext <8 x i1> %arg to <8 x i16>
ret <8 x i16> %res
@@ -30,7 +30,7 @@
; CHECK: psraw {{.*}},0xf
}
-define <4 x i32> @test_sext_v4i1_to_v4i32(<4 x i1> %arg) {
+define internal <4 x i32> @test_sext_v4i1_to_v4i32(<4 x i1> %arg) {
entry:
%res = sext <4 x i1> %arg to <4 x i32>
ret <4 x i32> %res
@@ -42,7 +42,7 @@
; zext operations
-define <16 x i8> @test_zext_v16i1_to_v16i8(<16 x i1> %arg) {
+define internal <16 x i8> @test_zext_v16i1_to_v16i8(<16 x i1> %arg) {
entry:
%res = zext <16 x i1> %arg to <16 x i8>
ret <16 x i8> %res
@@ -54,7 +54,7 @@
; CHECK: pand
}
-define <8 x i16> @test_zext_v8i1_to_v8i16(<8 x i1> %arg) {
+define internal <8 x i16> @test_zext_v8i1_to_v8i16(<8 x i1> %arg) {
entry:
%res = zext <8 x i1> %arg to <8 x i16>
ret <8 x i16> %res
@@ -66,7 +66,7 @@
; CHECK: pand
}
-define <4 x i32> @test_zext_v4i1_to_v4i32(<4 x i1> %arg) {
+define internal <4 x i32> @test_zext_v4i1_to_v4i32(<4 x i1> %arg) {
entry:
%res = zext <4 x i1> %arg to <4 x i32>
ret <4 x i32> %res
@@ -80,7 +80,7 @@
; trunc operations
-define <16 x i1> @test_trunc_v16i8_to_v16i1(<16 x i8> %arg) {
+define internal <16 x i1> @test_trunc_v16i8_to_v16i1(<16 x i8> %arg) {
entry:
%res = trunc <16 x i8> %arg to <16 x i1>
ret <16 x i1> %res
@@ -92,7 +92,7 @@
; CHECK: pand
}
-define <8 x i1> @test_trunc_v8i16_to_v8i1(<8 x i16> %arg) {
+define internal <8 x i1> @test_trunc_v8i16_to_v8i1(<8 x i16> %arg) {
entry:
%res = trunc <8 x i16> %arg to <8 x i1>
ret <8 x i1> %res
@@ -104,7 +104,7 @@
; CHECK: pand
}
-define <4 x i1> @test_trunc_v4i32_to_v4i1(<4 x i32> %arg) {
+define internal <4 x i1> @test_trunc_v4i32_to_v4i1(<4 x i32> %arg) {
entry:
%res = trunc <4 x i32> %arg to <4 x i1>
ret <4 x i1> %res
@@ -118,7 +118,7 @@
; fpto[us]i operations
-define <4 x i32> @test_fptosi_v4f32_to_v4i32(<4 x float> %arg) {
+define internal <4 x i32> @test_fptosi_v4f32_to_v4i32(<4 x float> %arg) {
entry:
%res = fptosi <4 x float> %arg to <4 x i32>
ret <4 x i32> %res
@@ -127,7 +127,7 @@
; CHECK: cvttps2dq
}
-define <4 x i32> @test_fptoui_v4f32_to_v4i32(<4 x float> %arg) {
+define internal <4 x i32> @test_fptoui_v4f32_to_v4i32(<4 x float> %arg) {
entry:
%res = fptoui <4 x float> %arg to <4 x i32>
ret <4 x i32> %res
@@ -138,7 +138,7 @@
; [su]itofp operations
-define <4 x float> @test_sitofp_v4i32_to_v4f32(<4 x i32> %arg) {
+define internal <4 x float> @test_sitofp_v4i32_to_v4f32(<4 x i32> %arg) {
entry:
%res = sitofp <4 x i32> %arg to <4 x float>
ret <4 x float> %res
@@ -147,7 +147,7 @@
; CHECK: cvtdq2ps
}
-define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
+define internal <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
entry:
%res = uitofp <4 x i32> %arg to <4 x float>
ret <4 x float> %res
diff --git a/tests_lit/llvm2ice_tests/vector-fcmp.ll b/tests_lit/llvm2ice_tests/vector-fcmp.ll
index 641d715..97e438b 100644
--- a/tests_lit/llvm2ice_tests/vector-fcmp.ll
+++ b/tests_lit/llvm2ice_tests/vector-fcmp.ll
@@ -7,7 +7,7 @@
; Check that sext elimination occurs when the result of the comparison
; instruction is alrady sign extended. Sign extension to 4 x i32 uses
; the pslld instruction.
-define <4 x i32> @sextElimination(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @sextElimination(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp oeq <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -17,7 +17,7 @@
; CHECK-NOT: pslld
}
-define <4 x i32> @fcmpFalseVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpFalseVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp false <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -26,7 +26,7 @@
; CHECK: pxor
}
-define <4 x i32> @fcmpOeqVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpOeqVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp oeq <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -35,7 +35,7 @@
; CHECK: cmpeqps
}
-define <4 x i32> @fcmpOgeVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpOgeVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp oge <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -44,7 +44,7 @@
; CHECK: cmpleps
}
-define <4 x i32> @fcmpOgtVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpOgtVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp ogt <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -53,7 +53,7 @@
; CHECK: cmpltps
}
-define <4 x i32> @fcmpOleVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpOleVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp ole <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -62,7 +62,7 @@
; CHECK: cmpleps
}
-define <4 x i32> @fcmpOltVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpOltVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp olt <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -71,7 +71,7 @@
; CHECK: cmpltps
}
-define <4 x i32> @fcmpOneVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpOneVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp one <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -82,7 +82,7 @@
; CHECK: pand
}
-define <4 x i32> @fcmpOrdVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpOrdVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp ord <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -91,7 +91,7 @@
; CHECK: cmpordps
}
-define <4 x i32> @fcmpTrueVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpTrueVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp true <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -100,7 +100,7 @@
; CHECK: pcmpeqd
}
-define <4 x i32> @fcmpUeqVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpUeqVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp ueq <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -111,7 +111,7 @@
; CHECK: por
}
-define <4 x i32> @fcmpUgeVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpUgeVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp uge <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -120,7 +120,7 @@
; CHECK: cmpnltps
}
-define <4 x i32> @fcmpUgtVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpUgtVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp ugt <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -129,7 +129,7 @@
; CHECK: cmpnleps
}
-define <4 x i32> @fcmpUleVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpUleVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp ule <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -138,7 +138,7 @@
; CHECK: cmpnltps
}
-define <4 x i32> @fcmpUltVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpUltVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp ult <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -147,7 +147,7 @@
; CHECK: cmpnleps
}
-define <4 x i32> @fcmpUneVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpUneVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp une <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -156,7 +156,7 @@
; CHECK: cmpneqps
}
-define <4 x i32> @fcmpUnoVector(<4 x float> %a, <4 x float> %b) {
+define internal <4 x i32> @fcmpUnoVector(<4 x float> %a, <4 x float> %b) {
entry:
%res.trunc = fcmp uno <4 x float> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
diff --git a/tests_lit/llvm2ice_tests/vector-icmp.ll b/tests_lit/llvm2ice_tests/vector-icmp.ll
index 0830b38..57adc94 100644
--- a/tests_lit/llvm2ice_tests/vector-icmp.ll
+++ b/tests_lit/llvm2ice_tests/vector-icmp.ll
@@ -7,7 +7,7 @@
; Check that sext elimination occurs when the result of the comparison
; instruction is alrady sign extended. Sign extension to 4 x i32 uses
; the pslld instruction.
-define <4 x i32> @test_sext_elimination(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @test_sext_elimination(<4 x i32> %a, <4 x i32> %b) {
entry:
%res.trunc = icmp eq <4 x i32> %a, %b
%res = sext <4 x i1> %res.trunc to <4 x i32>
@@ -17,7 +17,7 @@
; CHECK-NOT: pslld
}
-define <4 x i1> @test_icmp_v4i32_eq(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_eq(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp eq <4 x i32> %a, %b
ret <4 x i1> %res
@@ -25,7 +25,7 @@
; CHECK: pcmpeqd
}
-define <4 x i1> @test_icmp_v4i32_ne(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_ne(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp ne <4 x i32> %a, %b
ret <4 x i1> %res
@@ -34,14 +34,14 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i32_sgt(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_sgt(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp sgt <4 x i32> %a, %b
ret <4 x i1> %res
; CHECK: pcmpgtd
}
-define <4 x i1> @test_icmp_v4i32_sle(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_sle(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp sle <4 x i32> %a, %b
ret <4 x i1> %res
@@ -50,7 +50,7 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i32_slt(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_slt(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp slt <4 x i32> %a, %b
ret <4 x i1> %res
@@ -58,7 +58,7 @@
; CHECK: pcmpgtd
}
-define <4 x i1> @test_icmp_v4i32_uge(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_uge(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp uge <4 x i32> %a, %b
ret <4 x i1> %res
@@ -68,7 +68,7 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i32_ugt(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_ugt(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp ugt <4 x i32> %a, %b
ret <4 x i1> %res
@@ -77,7 +77,7 @@
; CHECK: pcmpgtd
}
-define <4 x i1> @test_icmp_v4i32_ule(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_ule(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp ule <4 x i32> %a, %b
ret <4 x i1> %res
@@ -87,7 +87,7 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i32_ult(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i1> @test_icmp_v4i32_ult(<4 x i32> %a, <4 x i32> %b) {
entry:
%res = icmp ult <4 x i32> %a, %b
ret <4 x i1> %res
@@ -96,7 +96,7 @@
; CHECK: pcmpgtd
}
-define <4 x i1> @test_icmp_v4i1_eq(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_eq(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp eq <4 x i1> %a, %b
ret <4 x i1> %res
@@ -104,7 +104,7 @@
; CHECK: pcmpeqd
}
-define <4 x i1> @test_icmp_v4i1_ne(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_ne(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp ne <4 x i1> %a, %b
ret <4 x i1> %res
@@ -113,7 +113,7 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i1_sgt(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_sgt(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp sgt <4 x i1> %a, %b
ret <4 x i1> %res
@@ -121,7 +121,7 @@
; CHECK: pcmpgtd
}
-define <4 x i1> @test_icmp_v4i1_sle(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_sle(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp sle <4 x i1> %a, %b
ret <4 x i1> %res
@@ -130,7 +130,7 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i1_slt(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_slt(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp slt <4 x i1> %a, %b
ret <4 x i1> %res
@@ -138,7 +138,7 @@
; CHECK: pcmpgtd
}
-define <4 x i1> @test_icmp_v4i1_uge(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_uge(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp uge <4 x i1> %a, %b
ret <4 x i1> %res
@@ -148,7 +148,7 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i1_ugt(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_ugt(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp ugt <4 x i1> %a, %b
ret <4 x i1> %res
@@ -157,7 +157,7 @@
; CHECK: pcmpgtd
}
-define <4 x i1> @test_icmp_v4i1_ule(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_ule(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp ule <4 x i1> %a, %b
ret <4 x i1> %res
@@ -167,7 +167,7 @@
; CHECK: pxor
}
-define <4 x i1> @test_icmp_v4i1_ult(<4 x i1> %a, <4 x i1> %b) {
+define internal <4 x i1> @test_icmp_v4i1_ult(<4 x i1> %a, <4 x i1> %b) {
entry:
%res = icmp ult <4 x i1> %a, %b
ret <4 x i1> %res
@@ -176,7 +176,7 @@
; CHECK: pcmpgtd
}
-define <8 x i1> @test_icmp_v8i16_eq(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_eq(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp eq <8 x i16> %a, %b
ret <8 x i1> %res
@@ -184,7 +184,7 @@
; CHECK: pcmpeqw
}
-define <8 x i1> @test_icmp_v8i16_ne(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_ne(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp ne <8 x i16> %a, %b
ret <8 x i1> %res
@@ -193,7 +193,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i16_sgt(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_sgt(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp sgt <8 x i16> %a, %b
ret <8 x i1> %res
@@ -201,7 +201,7 @@
; CHECK: pcmpgtw
}
-define <8 x i1> @test_icmp_v8i16_sle(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_sle(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp sle <8 x i16> %a, %b
ret <8 x i1> %res
@@ -210,7 +210,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i16_slt(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_slt(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp slt <8 x i16> %a, %b
ret <8 x i1> %res
@@ -218,7 +218,7 @@
; CHECK: pcmpgtw
}
-define <8 x i1> @test_icmp_v8i16_uge(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_uge(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp uge <8 x i16> %a, %b
ret <8 x i1> %res
@@ -228,7 +228,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i16_ugt(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_ugt(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp ugt <8 x i16> %a, %b
ret <8 x i1> %res
@@ -237,7 +237,7 @@
; CHECK: pcmpgtw
}
-define <8 x i1> @test_icmp_v8i16_ule(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_ule(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp ule <8 x i16> %a, %b
ret <8 x i1> %res
@@ -247,7 +247,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i16_ult(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i1> @test_icmp_v8i16_ult(<8 x i16> %a, <8 x i16> %b) {
entry:
%res = icmp ult <8 x i16> %a, %b
ret <8 x i1> %res
@@ -256,7 +256,7 @@
; CHECK: pcmpgtw
}
-define <8 x i1> @test_icmp_v8i1_eq(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_eq(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp eq <8 x i1> %a, %b
ret <8 x i1> %res
@@ -264,7 +264,7 @@
; CHECK: pcmpeqw
}
-define <8 x i1> @test_icmp_v8i1_ne(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_ne(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp ne <8 x i1> %a, %b
ret <8 x i1> %res
@@ -273,7 +273,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i1_sgt(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_sgt(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp sgt <8 x i1> %a, %b
ret <8 x i1> %res
@@ -281,7 +281,7 @@
; CHECK: pcmpgtw
}
-define <8 x i1> @test_icmp_v8i1_sle(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_sle(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp sle <8 x i1> %a, %b
ret <8 x i1> %res
@@ -290,7 +290,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i1_slt(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_slt(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp slt <8 x i1> %a, %b
ret <8 x i1> %res
@@ -298,7 +298,7 @@
; CHECK: pcmpgtw
}
-define <8 x i1> @test_icmp_v8i1_uge(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_uge(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp uge <8 x i1> %a, %b
ret <8 x i1> %res
@@ -308,7 +308,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i1_ugt(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_ugt(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp ugt <8 x i1> %a, %b
ret <8 x i1> %res
@@ -317,7 +317,7 @@
; CHECK: pcmpgtw
}
-define <8 x i1> @test_icmp_v8i1_ule(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_ule(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp ule <8 x i1> %a, %b
ret <8 x i1> %res
@@ -327,7 +327,7 @@
; CHECK: pxor
}
-define <8 x i1> @test_icmp_v8i1_ult(<8 x i1> %a, <8 x i1> %b) {
+define internal <8 x i1> @test_icmp_v8i1_ult(<8 x i1> %a, <8 x i1> %b) {
entry:
%res = icmp ult <8 x i1> %a, %b
ret <8 x i1> %res
@@ -336,7 +336,7 @@
; CHECK: pcmpgtw
}
-define <16 x i1> @test_icmp_v16i8_eq(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_eq(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp eq <16 x i8> %a, %b
ret <16 x i1> %res
@@ -344,7 +344,7 @@
; CHECK: pcmpeqb
}
-define <16 x i1> @test_icmp_v16i8_ne(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_ne(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp ne <16 x i8> %a, %b
ret <16 x i1> %res
@@ -353,7 +353,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i8_sgt(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_sgt(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp sgt <16 x i8> %a, %b
ret <16 x i1> %res
@@ -361,7 +361,7 @@
; CHECK: pcmpgtb
}
-define <16 x i1> @test_icmp_v16i8_sle(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_sle(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp sle <16 x i8> %a, %b
ret <16 x i1> %res
@@ -370,7 +370,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i8_slt(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_slt(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp slt <16 x i8> %a, %b
ret <16 x i1> %res
@@ -378,7 +378,7 @@
; CHECK: pcmpgtb
}
-define <16 x i1> @test_icmp_v16i8_uge(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_uge(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp uge <16 x i8> %a, %b
ret <16 x i1> %res
@@ -388,7 +388,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i8_ugt(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_ugt(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp ugt <16 x i8> %a, %b
ret <16 x i1> %res
@@ -397,7 +397,7 @@
; CHECK: pcmpgtb
}
-define <16 x i1> @test_icmp_v16i8_ule(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_ule(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp ule <16 x i8> %a, %b
ret <16 x i1> %res
@@ -407,7 +407,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i8_ult(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i1> @test_icmp_v16i8_ult(<16 x i8> %a, <16 x i8> %b) {
entry:
%res = icmp ult <16 x i8> %a, %b
ret <16 x i1> %res
@@ -416,7 +416,7 @@
; CHECK: pcmpgtb
}
-define <16 x i1> @test_icmp_v16i1_eq(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_eq(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp eq <16 x i1> %a, %b
ret <16 x i1> %res
@@ -424,7 +424,7 @@
; CHECK: pcmpeqb
}
-define <16 x i1> @test_icmp_v16i1_ne(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_ne(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp ne <16 x i1> %a, %b
ret <16 x i1> %res
@@ -433,7 +433,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i1_sgt(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_sgt(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp sgt <16 x i1> %a, %b
ret <16 x i1> %res
@@ -441,7 +441,7 @@
; CHECK: pcmpgtb
}
-define <16 x i1> @test_icmp_v16i1_sle(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_sle(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp sle <16 x i1> %a, %b
ret <16 x i1> %res
@@ -450,7 +450,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i1_slt(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_slt(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp slt <16 x i1> %a, %b
ret <16 x i1> %res
@@ -458,7 +458,7 @@
; CHECK: pcmpgtb
}
-define <16 x i1> @test_icmp_v16i1_uge(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_uge(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp uge <16 x i1> %a, %b
ret <16 x i1> %res
@@ -468,7 +468,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i1_ugt(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_ugt(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp ugt <16 x i1> %a, %b
ret <16 x i1> %res
@@ -477,7 +477,7 @@
; CHECK: pcmpgtb
}
-define <16 x i1> @test_icmp_v16i1_ule(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_ule(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp ule <16 x i1> %a, %b
ret <16 x i1> %res
@@ -487,7 +487,7 @@
; CHECK: pxor
}
-define <16 x i1> @test_icmp_v16i1_ult(<16 x i1> %a, <16 x i1> %b) {
+define internal <16 x i1> @test_icmp_v16i1_ult(<16 x i1> %a, <16 x i1> %b) {
entry:
%res = icmp ult <16 x i1> %a, %b
ret <16 x i1> %res
diff --git a/tests_lit/llvm2ice_tests/vector-ops.ll b/tests_lit/llvm2ice_tests/vector-ops.ll
index 48bf1a9..7114187 100644
--- a/tests_lit/llvm2ice_tests/vector-ops.ll
+++ b/tests_lit/llvm2ice_tests/vector-ops.ll
@@ -11,7 +11,8 @@
; insertelement operations
-define <4 x float> @insertelement_v4f32_0(<4 x float> %vec, float %elt) {
+define internal <4 x float> @insertelement_v4f32_0(<4 x float> %vec,
+ float %elt) {
entry:
%res = insertelement <4 x float> %vec, float %elt, i32 0
ret <4 x float> %res
@@ -22,7 +23,7 @@
; SSE41: insertps {{.*}},{{.*}},0x0
}
-define <4 x i32> @insertelement_v4i32_0(<4 x i32> %vec, i32 %elt) {
+define internal <4 x i32> @insertelement_v4i32_0(<4 x i32> %vec, i32 %elt) {
entry:
%res = insertelement <4 x i32> %vec, i32 %elt, i32 0
ret <4 x i32> %res
@@ -35,7 +36,8 @@
}
-define <4 x float> @insertelement_v4f32_1(<4 x float> %vec, float %elt) {
+define internal <4 x float> @insertelement_v4f32_1(<4 x float> %vec,
+ float %elt) {
entry:
%res = insertelement <4 x float> %vec, float %elt, i32 1
ret <4 x float> %res
@@ -47,7 +49,7 @@
; SSE41: insertps {{.*}},{{.*}},0x10
}
-define <4 x i32> @insertelement_v4i32_1(<4 x i32> %vec, i32 %elt) {
+define internal <4 x i32> @insertelement_v4i32_1(<4 x i32> %vec, i32 %elt) {
entry:
%res = insertelement <4 x i32> %vec, i32 %elt, i32 1
ret <4 x i32> %res
@@ -59,7 +61,7 @@
; SSE41: pinsrd {{.*}},{{.*}},0x1
}
-define <8 x i16> @insertelement_v8i16(<8 x i16> %vec, i32 %elt.arg) {
+define internal <8 x i16> @insertelement_v8i16(<8 x i16> %vec, i32 %elt.arg) {
entry:
%elt = trunc i32 %elt.arg to i16
%res = insertelement <8 x i16> %vec, i16 %elt, i32 1
@@ -71,7 +73,7 @@
; SSE41: pinsrw
}
-define <16 x i8> @insertelement_v16i8(<16 x i8> %vec, i32 %elt.arg) {
+define internal <16 x i8> @insertelement_v16i8(<16 x i8> %vec, i32 %elt.arg) {
entry:
%elt = trunc i32 %elt.arg to i8
%res = insertelement <16 x i8> %vec, i8 %elt, i32 1
@@ -85,7 +87,7 @@
; SSE41: pinsrb
}
-define <4 x i1> @insertelement_v4i1_0(<4 x i1> %vec, i32 %elt.arg) {
+define internal <4 x i1> @insertelement_v4i1_0(<4 x i1> %vec, i32 %elt.arg) {
entry:
%elt = trunc i32 %elt.arg to i1
%res = insertelement <4 x i1> %vec, i1 %elt, i32 0
@@ -97,7 +99,7 @@
; SSE41: pinsrd {{.*}},{{.*}},0x0
}
-define <4 x i1> @insertelement_v4i1_1(<4 x i1> %vec, i32 %elt.arg) {
+define internal <4 x i1> @insertelement_v4i1_1(<4 x i1> %vec, i32 %elt.arg) {
entry:
%elt = trunc i32 %elt.arg to i1
%res = insertelement <4 x i1> %vec, i1 %elt, i32 1
@@ -110,7 +112,7 @@
; SSE41: pinsrd {{.*}},{{.*}},0x1
}
-define <8 x i1> @insertelement_v8i1(<8 x i1> %vec, i32 %elt.arg) {
+define internal <8 x i1> @insertelement_v8i1(<8 x i1> %vec, i32 %elt.arg) {
entry:
%elt = trunc i32 %elt.arg to i1
%res = insertelement <8 x i1> %vec, i1 %elt, i32 1
@@ -122,7 +124,7 @@
; SSE41: pinsrw
}
-define <16 x i1> @insertelement_v16i1(<16 x i1> %vec, i32 %elt.arg) {
+define internal <16 x i1> @insertelement_v16i1(<16 x i1> %vec, i32 %elt.arg) {
entry:
%elt = trunc i32 %elt.arg to i1
%res = insertelement <16 x i1> %vec, i1 %elt, i32 1
@@ -138,7 +140,7 @@
; extractelement operations
-define float @extractelement_v4f32(<4 x float> %vec) {
+define internal float @extractelement_v4f32(<4 x float> %vec) {
entry:
%res = extractelement <4 x float> %vec, i32 1
ret float %res
@@ -149,7 +151,7 @@
; SSE41: pshufd
}
-define i32 @extractelement_v4i32(<4 x i32> %vec) {
+define internal i32 @extractelement_v4i32(<4 x i32> %vec) {
entry:
%res = extractelement <4 x i32> %vec, i32 1
ret i32 %res
@@ -161,7 +163,7 @@
; SSE41: pextrd
}
-define i32 @extractelement_v8i16(<8 x i16> %vec) {
+define internal i32 @extractelement_v8i16(<8 x i16> %vec) {
entry:
%res = extractelement <8 x i16> %vec, i32 1
%res.ext = zext i16 %res to i32
@@ -173,7 +175,7 @@
; SSE41: pextrw
}
-define i32 @extractelement_v16i8(<16 x i8> %vec) {
+define internal i32 @extractelement_v16i8(<16 x i8> %vec) {
entry:
%res = extractelement <16 x i8> %vec, i32 1
%res.ext = zext i8 %res to i32
@@ -187,7 +189,7 @@
; SSE41: pextrb
}
-define i32 @extractelement_v4i1(<4 x i1> %vec) {
+define internal i32 @extractelement_v4i1(<4 x i1> %vec) {
entry:
%res = extractelement <4 x i1> %vec, i32 1
%res.ext = zext i1 %res to i32
@@ -199,7 +201,7 @@
; SSE41: pextrd
}
-define i32 @extractelement_v8i1(<8 x i1> %vec) {
+define internal i32 @extractelement_v8i1(<8 x i1> %vec) {
entry:
%res = extractelement <8 x i1> %vec, i32 1
%res.ext = zext i1 %res to i32
@@ -211,7 +213,7 @@
; SSE41: pextrw
}
-define i32 @extractelement_v16i1(<16 x i1> %vec) {
+define internal i32 @extractelement_v16i1(<16 x i1> %vec) {
entry:
%res = extractelement <16 x i1> %vec, i32 1
%res.ext = zext i1 %res to i32
diff --git a/tests_lit/llvm2ice_tests/vector-select.ll b/tests_lit/llvm2ice_tests/vector-select.ll
index 4ce8f38..6e08d3f 100644
--- a/tests_lit/llvm2ice_tests/vector-select.ll
+++ b/tests_lit/llvm2ice_tests/vector-select.ll
@@ -9,7 +9,8 @@
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 -mattr=sse4.1 \
; RUN: | FileCheck --check-prefix=SSE41 %s
-define <16 x i8> @test_select_v16i8(<16 x i1> %cond, <16 x i8> %arg1, <16 x i8> %arg2) {
+define internal <16 x i8> @test_select_v16i8(<16 x i1> %cond, <16 x i8> %arg1,
+ <16 x i8> %arg2) {
entry:
%res = select <16 x i1> %cond, <16 x i8> %arg1, <16 x i8> %arg2
ret <16 x i8> %res
@@ -22,7 +23,8 @@
; SSE41: pblendvb xmm{{[0-7]}},{{xmm[0-7]|XMMWORD}}
}
-define <16 x i1> @test_select_v16i1(<16 x i1> %cond, <16 x i1> %arg1, <16 x i1> %arg2) {
+define internal <16 x i1> @test_select_v16i1(<16 x i1> %cond, <16 x i1> %arg1,
+ <16 x i1> %arg2) {
entry:
%res = select <16 x i1> %cond, <16 x i1> %arg1, <16 x i1> %arg2
ret <16 x i1> %res
@@ -35,7 +37,8 @@
; SSE41: pblendvb xmm{{[0-7]}},{{xmm[0-7]|XMMWORD}}
}
-define <8 x i16> @test_select_v8i16(<8 x i1> %cond, <8 x i16> %arg1, <8 x i16> %arg2) {
+define internal <8 x i16> @test_select_v8i16(<8 x i1> %cond, <8 x i16> %arg1,
+ <8 x i16> %arg2) {
entry:
%res = select <8 x i1> %cond, <8 x i16> %arg1, <8 x i16> %arg2
ret <8 x i16> %res
@@ -48,7 +51,8 @@
; SSE41: pblendvb xmm{{[0-7]}},{{xmm[0-7]|XMMWORD}}
}
-define <8 x i1> @test_select_v8i1(<8 x i1> %cond, <8 x i1> %arg1, <8 x i1> %arg2) {
+define internal <8 x i1> @test_select_v8i1(<8 x i1> %cond, <8 x i1> %arg1,
+ <8 x i1> %arg2) {
entry:
%res = select <8 x i1> %cond, <8 x i1> %arg1, <8 x i1> %arg2
ret <8 x i1> %res
@@ -61,7 +65,8 @@
; SSE41: pblendvb xmm{{[0-7]}},{{xmm[0-7]|XMMWORD}}
}
-define <4 x i32> @test_select_v4i32(<4 x i1> %cond, <4 x i32> %arg1, <4 x i32> %arg2) {
+define internal <4 x i32> @test_select_v4i32(<4 x i1> %cond, <4 x i32> %arg1,
+ <4 x i32> %arg2) {
entry:
%res = select <4 x i1> %cond, <4 x i32> %arg1, <4 x i32> %arg2
ret <4 x i32> %res
@@ -75,7 +80,8 @@
; SSE41: blendvps xmm{{[0-7]}},{{xmm[0-7]|XMMWORD}}
}
-define <4 x float> @test_select_v4f32(<4 x i1> %cond, <4 x float> %arg1, <4 x float> %arg2) {
+define internal <4 x float> @test_select_v4f32(
+ <4 x i1> %cond, <4 x float> %arg1, <4 x float> %arg2) {
entry:
%res = select <4 x i1> %cond, <4 x float> %arg1, <4 x float> %arg2
ret <4 x float> %res
@@ -89,7 +95,8 @@
; SSE41: blendvps xmm{{[0-7]}},{{xmm[0-7]|XMMWORD}}
}
-define <4 x i1> @test_select_v4i1(<4 x i1> %cond, <4 x i1> %arg1, <4 x i1> %arg2) {
+define internal <4 x i1> @test_select_v4i1(<4 x i1> %cond, <4 x i1> %arg1,
+ <4 x i1> %arg2) {
entry:
%res = select <4 x i1> %cond, <4 x i1> %arg1, <4 x i1> %arg2
ret <4 x i1> %res
diff --git a/tests_lit/parse_errs/bad-bb-size.test b/tests_lit/parse_errs/bad-bb-size.test
index 012e09d..1414c91 100644
--- a/tests_lit/parse_errs/bad-bb-size.test
+++ b/tests_lit/parse_errs/bad-bb-size.test
@@ -3,7 +3,8 @@
; REQUIRES: no_minimal_build
; RUN: not %pnacl_sz -bitcode-as-text %p/Inputs/bad-bb-size.tbc \
-; RUN: -bitcode-format=pnacl -notranslate -no-ir-gen -build-on-read 2>&1 \
+; RUN: -bitcode-format=pnacl -notranslate -no-ir-gen -build-on-read \
+; RUN: -allow-externally-defined-symbols 2>&1 \
; RUN: | FileCheck %s
; CHECK: Function defines 3105555534 basic blocks, which is too big for a function containing 36 bytes
diff --git a/tests_lit/parse_errs/bad-intrinsic-arg.test b/tests_lit/parse_errs/bad-intrinsic-arg.test
index 6063841..5e3c6e9 100644
--- a/tests_lit/parse_errs/bad-intrinsic-arg.test
+++ b/tests_lit/parse_errs/bad-intrinsic-arg.test
@@ -4,7 +4,8 @@
; RUN: not %pnacl_sz -bitcode-as-text \
; RUN: %p/Inputs/bad-intrinsic-arg.tbc \
-; RUN: -bitcode-format=pnacl -notranslate -build-on-read 2>&1 \
+; RUN: -bitcode-format=pnacl -notranslate -build-on-read \
+; RUN: -allow-externally-defined-symbols 2>&1 \
; RUN: | FileCheck %s
; CHECK: Argument 1 of llvm.nacl.setjmp expects i32. Found: double
diff --git a/tests_lit/parse_errs/bad-switch-case.test b/tests_lit/parse_errs/bad-switch-case.test
index b8d090a..41e9e09 100644
--- a/tests_lit/parse_errs/bad-switch-case.test
+++ b/tests_lit/parse_errs/bad-switch-case.test
@@ -1,6 +1,7 @@
; REQUIRES: no_minimal_build
; RUN: not %pnacl_sz -bitcode-as-text %p/Inputs/bad-switch-case.tbc \
-; RUN: -bitcode-format=pnacl -notranslate -build-on-read 2>&1 \
+; RUN: -bitcode-format=pnacl -notranslate -build-on-read \
+; RUN: -allow-externally-defined-symbols 2>&1 \
; RUN: | FileCheck -check-prefix=BAD-SWITCH-CASE %s
; BAD-SWITCH-CASE: Reference to basic block 3105555534 not found. Must be less than 6
diff --git a/tests_lit/parse_errs/bad-var-fwdref.test b/tests_lit/parse_errs/bad-var-fwdref.test
index a23dc07..ebf20da 100644
--- a/tests_lit/parse_errs/bad-var-fwdref.test
+++ b/tests_lit/parse_errs/bad-var-fwdref.test
@@ -3,7 +3,8 @@
; REQUIRES: no_minimal_build
; RUN: not %pnacl_sz -bitcode-as-text %p/Inputs/bad-var-fwdref.tbc \
-; RUN: -bitcode-format=pnacl -notranslate -no-ir-gen -build-on-read 2>&1 \
+; RUN: -bitcode-format=pnacl -notranslate -no-ir-gen -build-on-read \
+; RUN: -allow-externally-defined-symbols 2>&1 \
; RUN: | FileCheck %s
; CHECK: Forward reference @3105555534 too big. Have 1 globals and function contains 16 bytes
diff --git a/tests_lit/parse_errs/call-fcn-bad-param-type.ll b/tests_lit/parse_errs/call-fcn-bad-param-type.ll
index 46c3ab7..b7246e0 100644
--- a/tests_lit/parse_errs/call-fcn-bad-param-type.ll
+++ b/tests_lit/parse_errs/call-fcn-bad-param-type.ll
@@ -3,7 +3,8 @@
; REQUIRES: no_minimal_build
-; RUN: %p2i --expect-fail -i %s --insts | FileCheck %s
+; RUN: %p2i --expect-fail -i %s --insts --args \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
declare void @f(i8);
diff --git a/tests_lit/parse_errs/call-fcn-bad-return-type.ll b/tests_lit/parse_errs/call-fcn-bad-return-type.ll
index 4154c7d..873e05c 100644
--- a/tests_lit/parse_errs/call-fcn-bad-return-type.ll
+++ b/tests_lit/parse_errs/call-fcn-bad-return-type.ll
@@ -3,7 +3,8 @@
; REQUIRES: no_minimal_build
-; RUN: %p2i --expect-fail -i %s --insts | FileCheck %s
+; RUN: %p2i --expect-fail -i %s --insts --args \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
declare i1 @f();
diff --git a/tests_lit/parse_errs/call-indirect-i8.ll b/tests_lit/parse_errs/call-indirect-i8.ll
index 2c46ba1..c1a8947 100644
--- a/tests_lit/parse_errs/call-indirect-i8.ll
+++ b/tests_lit/parse_errs/call-indirect-i8.ll
@@ -4,7 +4,7 @@
; RUN: %p2i --expect-fail -i %s --insts | FileCheck %s
-define void @CallIndirectI32(i32 %f_addr) {
+define internal void @CallIndirectI32(i32 %f_addr) {
entry:
%f = inttoptr i32 %f_addr to i32(i8)*
%r = call i32 %f(i8 1)
diff --git a/tests_lit/parse_errs/fcn-value-index-isnt-defined.test b/tests_lit/parse_errs/fcn-value-index-isnt-defined.test
index 9359873..32a96dd 100644
--- a/tests_lit/parse_errs/fcn-value-index-isnt-defined.test
+++ b/tests_lit/parse_errs/fcn-value-index-isnt-defined.test
@@ -2,7 +2,7 @@
; REQUIRES: no_minimal_build
-; RUN: not %pnacl_sz -bitcode-as-text \
+; RUN: not %pnacl_sz -bitcode-as-text -allow-externally-defined-symbols \
; RUN: %p/Inputs/fcn-value-index-isnt-defined.tbc \
; RUN: -bitcode-format=pnacl -notranslate -build-on-read 2>&1 \
; RUN: | FileCheck %s
diff --git a/tests_lit/parse_errs/indirect-call-on-float.test b/tests_lit/parse_errs/indirect-call-on-float.test
index ef30de6..fc68512 100644
--- a/tests_lit/parse_errs/indirect-call-on-float.test
+++ b/tests_lit/parse_errs/indirect-call-on-float.test
@@ -2,7 +2,7 @@
; REQUIRES: no_minimal_build
-; RUN: not %pnacl_sz -bitcode-as-text \
+; RUN: not %pnacl_sz -bitcode-as-text -allow-externally-defined-symbols \
; RUN: %p/Inputs/indirect-call-on-float.tbc \
; RUN: -bitcode-format=pnacl -notranslate -build-on-read 2>&1 \
; RUN: | FileCheck %s
diff --git a/tests_lit/parse_errs/insertelt-wrong-type.test b/tests_lit/parse_errs/insertelt-wrong-type.test
index 326ec34..3e38a97 100644
--- a/tests_lit/parse_errs/insertelt-wrong-type.test
+++ b/tests_lit/parse_errs/insertelt-wrong-type.test
@@ -3,7 +3,7 @@
; REQUIRES: no_minimal_build
-; RUN: not %pnacl_sz -bitcode-as-text \
+; RUN: not %pnacl_sz -bitcode-as-text -allow-externally-defined-symbols \
; RUN: %p/Inputs/insertelt-wrong-type.tbc \
; RUN: -bitcode-format=pnacl -notranslate -build-on-read 2>&1 \
; RUN: | FileCheck %s
diff --git a/tests_lit/parse_errs/symtab-after-fcn.test b/tests_lit/parse_errs/symtab-after-fcn.test
index 4e519f1..a74bbd2 100644
--- a/tests_lit/parse_errs/symtab-after-fcn.test
+++ b/tests_lit/parse_errs/symtab-after-fcn.test
@@ -3,7 +3,8 @@
; REQUIRES: no_minimal_build
; RUN: not %pnacl_sz -bitcode-as-text %p/Inputs/symtab-after-fcn.tbc \
-; RUN: -bitcode-format=pnacl -notranslate -build-on-read 2>&1 \
+; RUN: -bitcode-format=pnacl -notranslate -build-on-read \
+; RUN: -allow-externally-defined-symbols 2>&1 \
; RUN: | FileCheck %s
; CHECK: Module valuesymtab not allowed after function blocks
diff --git a/tests_lit/reader_tests/alloca.ll b/tests_lit/reader_tests/alloca.ll
index 0a314e0..034cc4f 100644
--- a/tests_lit/reader_tests/alloca.ll
+++ b/tests_lit/reader_tests/alloca.ll
@@ -8,7 +8,7 @@
; Show examples where size is defined by a constant.
-define i32 @AllocaA0Size1() {
+define internal i32 @AllocaA0Size1() {
entry:
%array = alloca i8, i32 1
%addr = ptrtoint i8* %array to i32
@@ -19,7 +19,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaA0Size2() {
+define internal i32 @AllocaA0Size2() {
entry:
%array = alloca i8, i32 2
%addr = ptrtoint i8* %array to i32
@@ -30,7 +30,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaA0Size3() {
+define internal i32 @AllocaA0Size3() {
entry:
%array = alloca i8, i32 3
%addr = ptrtoint i8* %array to i32
@@ -41,7 +41,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaA0Size4() {
+define internal i32 @AllocaA0Size4() {
entry:
%array = alloca i8, i32 4
%addr = ptrtoint i8* %array to i32
@@ -52,7 +52,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaA1Size4(i32 %n) {
+define internal i32 @AllocaA1Size4(i32 %n) {
entry:
%array = alloca i8, i32 4, align 1
%addr = ptrtoint i8* %array to i32
@@ -63,7 +63,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaA2Size4(i32 %n) {
+define internal i32 @AllocaA2Size4(i32 %n) {
entry:
%array = alloca i8, i32 4, align 2
%addr = ptrtoint i8* %array to i32
@@ -74,7 +74,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaA8Size4(i32 %n) {
+define internal i32 @AllocaA8Size4(i32 %n) {
entry:
%array = alloca i8, i32 4, align 8
%addr = ptrtoint i8* %array to i32
@@ -85,7 +85,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @Alloca16Size4(i32 %n) {
+define internal i32 @Alloca16Size4(i32 %n) {
entry:
%array = alloca i8, i32 4, align 16
%addr = ptrtoint i8* %array to i32
@@ -98,7 +98,7 @@
; Show examples where size is not known at compile time.
-define i32 @AllocaVarsizeA0(i32 %n) {
+define internal i32 @AllocaVarsizeA0(i32 %n) {
entry:
%array = alloca i8, i32 %n
%addr = ptrtoint i8* %array to i32
@@ -109,7 +109,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaVarsizeA1(i32 %n) {
+define internal i32 @AllocaVarsizeA1(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 1
%addr = ptrtoint i8* %array to i32
@@ -120,7 +120,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaVarsizeA2(i32 %n) {
+define internal i32 @AllocaVarsizeA2(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 2
%addr = ptrtoint i8* %array to i32
@@ -131,7 +131,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaVarsizeA4(i32 %n) {
+define internal i32 @AllocaVarsizeA4(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 4
%addr = ptrtoint i8* %array to i32
@@ -142,7 +142,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaVarsizeA8(i32 %n) {
+define internal i32 @AllocaVarsizeA8(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 8
%addr = ptrtoint i8* %array to i32
@@ -153,7 +153,7 @@
; CHECK-NEXT: ret i32 %array
}
-define i32 @AllocaVarsizeA16(i32 %n) {
+define internal i32 @AllocaVarsizeA16(i32 %n) {
entry:
%array = alloca i8, i32 %n, align 16
%addr = ptrtoint i8* %array to i32
diff --git a/tests_lit/reader_tests/binops.ll b/tests_lit/reader_tests/binops.ll
index 65ed55e..6436343 100644
--- a/tests_lit/reader_tests/binops.ll
+++ b/tests_lit/reader_tests/binops.ll
@@ -10,97 +10,97 @@
; TODO(kschimpf): add i8/i16. Needs bitcasts.
-define i32 @AddI32(i32 %a, i32 %b) {
+define internal i32 @AddI32(i32 %a, i32 %b) {
entry:
%add = add i32 %b, %a
ret i32 %add
}
-; CHECK: define i32 @AddI32(i32 %a, i32 %b) {
+; CHECK: define internal i32 @AddI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = add i32 %b, %a
; CHECK-NEXT: ret i32 %add
; CHECK-NEXT: }
-define i64 @AddI64(i64 %a, i64 %b) {
+define internal i64 @AddI64(i64 %a, i64 %b) {
entry:
%add = add i64 %b, %a
ret i64 %add
}
-; CHECK-NEXT: define i64 @AddI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @AddI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = add i64 %b, %a
; CHECK-NEXT: ret i64 %add
; CHECK-NEXT: }
-define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%add = add <16 x i8> %b, %a
ret <16 x i8> %add
}
-; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = add <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %add
; CHECK-NEXT: }
-define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%add = add <8 x i16> %b, %a
ret <8 x i16> %add
}
-; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = add <8 x i16> %b, %a
; CHECK-NEXT: ret <8 x i16> %add
; CHECK-NEXT: }
-define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%add = add <4 x i32> %b, %a
ret <4 x i32> %add
}
-; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = add <4 x i32> %b, %a
; CHECK-NEXT: ret <4 x i32> %add
; CHECK-NEXT: }
-define float @AddFloat(float %a, float %b) {
+define internal float @AddFloat(float %a, float %b) {
entry:
%add = fadd float %b, %a
ret float %add
}
-; CHECK-NEXT: define float @AddFloat(float %a, float %b) {
+; CHECK-NEXT: define internal float @AddFloat(float %a, float %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = fadd float %b, %a
; CHECK-NEXT: ret float %add
; CHECK-NEXT: }
-define double @AddDouble(double %a, double %b) {
+define internal double @AddDouble(double %a, double %b) {
entry:
%add = fadd double %b, %a
ret double %add
}
-; CHECK-NEXT: define double @AddDouble(double %a, double %b) {
+; CHECK-NEXT: define internal double @AddDouble(double %a, double %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = fadd double %b, %a
; CHECK-NEXT: ret double %add
; CHECK-NEXT: }
-define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) {
+define internal <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) {
entry:
%add = fadd <4 x float> %b, %a
ret <4 x float> %add
}
-; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: define internal <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %add = fadd <4 x float> %b, %a
; CHECK-NEXT: ret <4 x float> %add
@@ -108,97 +108,97 @@
; TODO(kschimpf): sub i8/i16. Needs bitcasts.
-define i32 @SubI32(i32 %a, i32 %b) {
+define internal i32 @SubI32(i32 %a, i32 %b) {
entry:
%sub = sub i32 %a, %b
ret i32 %sub
}
-; CHECK-NEXT: define i32 @SubI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @SubI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = sub i32 %a, %b
; CHECK-NEXT: ret i32 %sub
; CHECK-NEXT: }
-define i64 @SubI64(i64 %a, i64 %b) {
+define internal i64 @SubI64(i64 %a, i64 %b) {
entry:
%sub = sub i64 %a, %b
ret i64 %sub
}
-; CHECK-NEXT: define i64 @SubI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @SubI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = sub i64 %a, %b
; CHECK-NEXT: ret i64 %sub
; CHECK-NEXT: }
-define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%sub = sub <16 x i8> %a, %b
ret <16 x i8> %sub
}
-; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = sub <16 x i8> %a, %b
; CHECK-NEXT: ret <16 x i8> %sub
; CHECK-NEXT: }
-define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%sub = sub <8 x i16> %a, %b
ret <8 x i16> %sub
}
-; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = sub <8 x i16> %a, %b
; CHECK-NEXT: ret <8 x i16> %sub
; CHECK-NEXT: }
-define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%sub = sub <4 x i32> %a, %b
ret <4 x i32> %sub
}
-; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = sub <4 x i32> %a, %b
; CHECK-NEXT: ret <4 x i32> %sub
; CHECK-NEXT: }
-define float @SubFloat(float %a, float %b) {
+define internal float @SubFloat(float %a, float %b) {
entry:
%sub = fsub float %a, %b
ret float %sub
}
-; CHECK-NEXT: define float @SubFloat(float %a, float %b) {
+; CHECK-NEXT: define internal float @SubFloat(float %a, float %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = fsub float %a, %b
; CHECK-NEXT: ret float %sub
; CHECK-NEXT: }
-define double @SubDouble(double %a, double %b) {
+define internal double @SubDouble(double %a, double %b) {
entry:
%sub = fsub double %a, %b
ret double %sub
}
-; CHECK-NEXT: define double @SubDouble(double %a, double %b) {
+; CHECK-NEXT: define internal double @SubDouble(double %a, double %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = fsub double %a, %b
; CHECK-NEXT: ret double %sub
; CHECK-NEXT: }
-define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) {
+define internal <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) {
entry:
%sub = fsub <4 x float> %a, %b
ret <4 x float> %sub
}
-; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: define internal <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sub = fsub <4 x float> %a, %b
; CHECK-NEXT: ret <4 x float> %sub
@@ -206,73 +206,73 @@
; TODO(kschimpf): mul i8/i16. Needs bitcasts.
-define i32 @MulI32(i32 %a, i32 %b) {
+define internal i32 @MulI32(i32 %a, i32 %b) {
entry:
%mul = mul i32 %b, %a
ret i32 %mul
}
-; CHECK-NEXT: define i32 @MulI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @MulI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %mul = mul i32 %b, %a
; CHECK-NEXT: ret i32 %mul
; CHECK-NEXT: }
-define i64 @MulI64(i64 %a, i64 %b) {
+define internal i64 @MulI64(i64 %a, i64 %b) {
entry:
%mul = mul i64 %b, %a
ret i64 %mul
}
-; CHECK-NEXT: define i64 @MulI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @MulI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %mul = mul i64 %b, %a
; CHECK-NEXT: ret i64 %mul
; CHECK-NEXT: }
-define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%mul = mul <16 x i8> %b, %a
ret <16 x i8> %mul
}
-; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %mul = mul <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %mul
; CHECK-NEXT: }
-define float @MulFloat(float %a, float %b) {
+define internal float @MulFloat(float %a, float %b) {
entry:
%mul = fmul float %b, %a
ret float %mul
}
-; CHECK-NEXT: define float @MulFloat(float %a, float %b) {
+; CHECK-NEXT: define internal float @MulFloat(float %a, float %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %mul = fmul float %b, %a
; CHECK-NEXT: ret float %mul
; CHECK-NEXT: }
-define double @MulDouble(double %a, double %b) {
+define internal double @MulDouble(double %a, double %b) {
entry:
%mul = fmul double %b, %a
ret double %mul
}
-; CHECK-NEXT: define double @MulDouble(double %a, double %b) {
+; CHECK-NEXT: define internal double @MulDouble(double %a, double %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %mul = fmul double %b, %a
; CHECK-NEXT: ret double %mul
; CHECK-NEXT: }
-define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) {
+define internal <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) {
entry:
%mul = fmul <4 x float> %b, %a
ret <4 x float> %mul
}
-; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: define internal <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %mul = fmul <4 x float> %b, %a
; CHECK-NEXT: ret <4 x float> %mul
@@ -280,61 +280,61 @@
; TODO(kschimpf): sdiv i8/i16. Needs bitcasts.
-define i32 @SdivI32(i32 %a, i32 %b) {
+define internal i32 @SdivI32(i32 %a, i32 %b) {
entry:
%div = sdiv i32 %a, %b
ret i32 %div
}
-; CHECK-NEXT: define i32 @SdivI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @SdivI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = sdiv i32 %a, %b
; CHECK-NEXT: ret i32 %div
; CHECK-NEXT: }
-define i64 @SdivI64(i64 %a, i64 %b) {
+define internal i64 @SdivI64(i64 %a, i64 %b) {
entry:
%div = sdiv i64 %a, %b
ret i64 %div
}
-; CHECK-NEXT: define i64 @SdivI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @SdivI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = sdiv i64 %a, %b
; CHECK-NEXT: ret i64 %div
; CHECK-NEXT: }
-define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%div = sdiv <16 x i8> %a, %b
ret <16 x i8> %div
}
-; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = sdiv <16 x i8> %a, %b
; CHECK-NEXT: ret <16 x i8> %div
; CHECK-NEXT: }
-define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%div = sdiv <8 x i16> %a, %b
ret <8 x i16> %div
}
-; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = sdiv <8 x i16> %a, %b
; CHECK-NEXT: ret <8 x i16> %div
; CHECK-NEXT: }
-define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%div = sdiv <4 x i32> %a, %b
ret <4 x i32> %div
}
-; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = sdiv <4 x i32> %a, %b
; CHECK-NEXT: ret <4 x i32> %div
@@ -342,61 +342,61 @@
; TODO(kschimpf): srem i8/i16. Needs bitcasts.
-define i32 @SremI32(i32 %a, i32 %b) {
+define internal i32 @SremI32(i32 %a, i32 %b) {
entry:
%rem = srem i32 %a, %b
ret i32 %rem
}
-; CHECK-NEXT: define i32 @SremI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @SremI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = srem i32 %a, %b
; CHECK-NEXT: ret i32 %rem
; CHECK-NEXT: }
-define i64 @SremI64(i64 %a, i64 %b) {
+define internal i64 @SremI64(i64 %a, i64 %b) {
entry:
%rem = srem i64 %a, %b
ret i64 %rem
}
-; CHECK-NEXT: define i64 @SremI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @SremI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = srem i64 %a, %b
; CHECK-NEXT: ret i64 %rem
; CHECK-NEXT: }
-define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%rem = srem <16 x i8> %a, %b
ret <16 x i8> %rem
}
-; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = srem <16 x i8> %a, %b
; CHECK-NEXT: ret <16 x i8> %rem
; CHECK-NEXT: }
-define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%rem = srem <8 x i16> %a, %b
ret <8 x i16> %rem
}
-; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = srem <8 x i16> %a, %b
; CHECK-NEXT: ret <8 x i16> %rem
; CHECK-NEXT: }
-define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%rem = srem <4 x i32> %a, %b
ret <4 x i32> %rem
}
-; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = srem <4 x i32> %a, %b
; CHECK-NEXT: ret <4 x i32> %rem
@@ -404,61 +404,61 @@
; TODO(kschimpf): udiv i8/i16. Needs bitcasts.
-define i32 @UdivI32(i32 %a, i32 %b) {
+define internal i32 @UdivI32(i32 %a, i32 %b) {
entry:
%div = udiv i32 %a, %b
ret i32 %div
}
-; CHECK-NEXT: define i32 @UdivI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @UdivI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = udiv i32 %a, %b
; CHECK-NEXT: ret i32 %div
; CHECK-NEXT: }
-define i64 @UdivI64(i64 %a, i64 %b) {
+define internal i64 @UdivI64(i64 %a, i64 %b) {
entry:
%div = udiv i64 %a, %b
ret i64 %div
}
-; CHECK-NEXT: define i64 @UdivI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @UdivI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = udiv i64 %a, %b
; CHECK-NEXT: ret i64 %div
; CHECK-NEXT: }
-define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%div = udiv <16 x i8> %a, %b
ret <16 x i8> %div
}
-; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = udiv <16 x i8> %a, %b
; CHECK-NEXT: ret <16 x i8> %div
; CHECK-NEXT: }
-define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%div = udiv <8 x i16> %a, %b
ret <8 x i16> %div
}
-; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = udiv <8 x i16> %a, %b
; CHECK-NEXT: ret <8 x i16> %div
; CHECK-NEXT: }
-define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%div = udiv <4 x i32> %a, %b
ret <4 x i32> %div
}
-; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = udiv <4 x i32> %a, %b
; CHECK-NEXT: ret <4 x i32> %div
@@ -466,133 +466,133 @@
; TODO(kschimpf): urem i8/i16. Needs bitcasts.
-define i32 @UremI32(i32 %a, i32 %b) {
+define internal i32 @UremI32(i32 %a, i32 %b) {
entry:
%rem = urem i32 %a, %b
ret i32 %rem
}
-; CHECK-NEXT: define i32 @UremI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @UremI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = urem i32 %a, %b
; CHECK-NEXT: ret i32 %rem
; CHECK-NEXT: }
-define i64 @UremI64(i64 %a, i64 %b) {
+define internal i64 @UremI64(i64 %a, i64 %b) {
entry:
%rem = urem i64 %a, %b
ret i64 %rem
}
-; CHECK-NEXT: define i64 @UremI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @UremI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = urem i64 %a, %b
; CHECK-NEXT: ret i64 %rem
; CHECK-NEXT: }
-define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%rem = urem <16 x i8> %a, %b
ret <16 x i8> %rem
}
-; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = urem <16 x i8> %a, %b
; CHECK-NEXT: ret <16 x i8> %rem
; CHECK-NEXT: }
-define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%rem = urem <8 x i16> %a, %b
ret <8 x i16> %rem
}
-; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = urem <8 x i16> %a, %b
; CHECK-NEXT: ret <8 x i16> %rem
; CHECK-NEXT: }
-define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%rem = urem <4 x i32> %a, %b
ret <4 x i32> %rem
}
-; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = urem <4 x i32> %a, %b
; CHECK-NEXT: ret <4 x i32> %rem
; CHECK-NEXT: }
-define float @fdivFloat(float %a, float %b) {
+define internal float @fdivFloat(float %a, float %b) {
entry:
%div = fdiv float %a, %b
ret float %div
}
-; CHECK-NEXT: define float @fdivFloat(float %a, float %b) {
+; CHECK-NEXT: define internal float @fdivFloat(float %a, float %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = fdiv float %a, %b
; CHECK-NEXT: ret float %div
; CHECK-NEXT: }
-define double @fdivDouble(double %a, double %b) {
+define internal double @fdivDouble(double %a, double %b) {
entry:
%div = fdiv double %a, %b
ret double %div
}
-; CHECK-NEXT: define double @fdivDouble(double %a, double %b) {
+; CHECK-NEXT: define internal double @fdivDouble(double %a, double %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = fdiv double %a, %b
; CHECK-NEXT: ret double %div
; CHECK-NEXT: }
-define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) {
+define internal <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) {
entry:
%div = fdiv <4 x float> %a, %b
ret <4 x float> %div
}
-; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: define internal <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %div = fdiv <4 x float> %a, %b
; CHECK-NEXT: ret <4 x float> %div
; CHECK-NEXT: }
-define float @fremFloat(float %a, float %b) {
+define internal float @fremFloat(float %a, float %b) {
entry:
%rem = frem float %a, %b
ret float %rem
}
-; CHECK-NEXT: define float @fremFloat(float %a, float %b) {
+; CHECK-NEXT: define internal float @fremFloat(float %a, float %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = frem float %a, %b
; CHECK-NEXT: ret float %rem
; CHECK-NEXT: }
-define double @fremDouble(double %a, double %b) {
+define internal double @fremDouble(double %a, double %b) {
entry:
%rem = frem double %a, %b
ret double %rem
}
-; CHECK-NEXT: define double @fremDouble(double %a, double %b) {
+; CHECK-NEXT: define internal double @fremDouble(double %a, double %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = frem double %a, %b
; CHECK-NEXT: ret double %rem
; CHECK-NEXT: }
-define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) {
+define internal <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) {
entry:
%rem = frem <4 x float> %a, %b
ret <4 x float> %rem
}
-; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: define internal <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %rem = frem <4 x float> %a, %b
; CHECK-NEXT: ret <4 x float> %rem
@@ -600,61 +600,61 @@
; TODO(kschimpf): and i1/i8/i16. Needs bitcasts.
-define i32 @AndI32(i32 %a, i32 %b) {
+define internal i32 @AndI32(i32 %a, i32 %b) {
entry:
%and = and i32 %b, %a
ret i32 %and
}
-; CHECK-NEXT: define i32 @AndI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @AndI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %and = and i32 %b, %a
; CHECK-NEXT: ret i32 %and
; CHECK-NEXT: }
-define i64 @AndI64(i64 %a, i64 %b) {
+define internal i64 @AndI64(i64 %a, i64 %b) {
entry:
%and = and i64 %b, %a
ret i64 %and
}
-; CHECK-NEXT: define i64 @AndI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @AndI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %and = and i64 %b, %a
; CHECK-NEXT: ret i64 %and
; CHECK-NEXT: }
-define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%and = and <16 x i8> %b, %a
ret <16 x i8> %and
}
-; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %and = and <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %and
; CHECK-NEXT: }
-define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%and = and <8 x i16> %b, %a
ret <8 x i16> %and
}
-; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %and = and <8 x i16> %b, %a
; CHECK-NEXT: ret <8 x i16> %and
; CHECK-NEXT: }
-define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%and = and <4 x i32> %b, %a
ret <4 x i32> %and
}
-; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %and = and <4 x i32> %b, %a
; CHECK-NEXT: ret <4 x i32> %and
@@ -662,61 +662,61 @@
; TODO(kschimpf): or i1/i8/i16. Needs bitcasts.
-define i32 @OrI32(i32 %a, i32 %b) {
+define internal i32 @OrI32(i32 %a, i32 %b) {
entry:
%or = or i32 %b, %a
ret i32 %or
}
-; CHECK-NEXT: define i32 @OrI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @OrI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %or = or i32 %b, %a
; CHECK-NEXT: ret i32 %or
; CHECK-NEXT: }
-define i64 @OrI64(i64 %a, i64 %b) {
+define internal i64 @OrI64(i64 %a, i64 %b) {
entry:
%or = or i64 %b, %a
ret i64 %or
}
-; CHECK-NEXT: define i64 @OrI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @OrI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %or = or i64 %b, %a
; CHECK-NEXT: ret i64 %or
; CHECK-NEXT: }
-define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%or = or <16 x i8> %b, %a
ret <16 x i8> %or
}
-; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %or = or <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %or
; CHECK-NEXT: }
-define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%or = or <8 x i16> %b, %a
ret <8 x i16> %or
}
-; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %or = or <8 x i16> %b, %a
; CHECK-NEXT: ret <8 x i16> %or
; CHECK-NEXT: }
-define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%or = or <4 x i32> %b, %a
ret <4 x i32> %or
}
-; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %or = or <4 x i32> %b, %a
; CHECK-NEXT: ret <4 x i32> %or
@@ -724,61 +724,61 @@
; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts.
-define i32 @XorI32(i32 %a, i32 %b) {
+define internal i32 @XorI32(i32 %a, i32 %b) {
entry:
%xor = xor i32 %b, %a
ret i32 %xor
}
-; CHECK-NEXT: define i32 @XorI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @XorI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %xor = xor i32 %b, %a
; CHECK-NEXT: ret i32 %xor
; CHECK-NEXT: }
-define i64 @XorI64(i64 %a, i64 %b) {
+define internal i64 @XorI64(i64 %a, i64 %b) {
entry:
%xor = xor i64 %b, %a
ret i64 %xor
}
-; CHECK-NEXT: define i64 @XorI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @XorI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %xor = xor i64 %b, %a
; CHECK-NEXT: ret i64 %xor
; CHECK-NEXT: }
-define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%xor = xor <16 x i8> %b, %a
ret <16 x i8> %xor
}
-; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %xor = xor <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %xor
; CHECK-NEXT: }
-define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%xor = xor <8 x i16> %b, %a
ret <8 x i16> %xor
}
-; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %xor = xor <8 x i16> %b, %a
; CHECK-NEXT: ret <8 x i16> %xor
; CHECK-NEXT: }
-define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%xor = xor <4 x i32> %b, %a
ret <4 x i32> %xor
}
-; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %xor = xor <4 x i32> %b, %a
; CHECK-NEXT: ret <4 x i32> %xor
@@ -786,61 +786,61 @@
; TODO(kschimpf): shl i8/i16. Needs bitcasts.
-define i32 @ShlI32(i32 %a, i32 %b) {
+define internal i32 @ShlI32(i32 %a, i32 %b) {
entry:
%shl = shl i32 %b, %a
ret i32 %shl
}
-; CHECK-NEXT: define i32 @ShlI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @ShlI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %shl = shl i32 %b, %a
; CHECK-NEXT: ret i32 %shl
; CHECK-NEXT: }
-define i64 @ShlI64(i64 %a, i64 %b) {
+define internal i64 @ShlI64(i64 %a, i64 %b) {
entry:
%shl = shl i64 %b, %a
ret i64 %shl
}
-; CHECK-NEXT: define i64 @ShlI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @ShlI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %shl = shl i64 %b, %a
; CHECK-NEXT: ret i64 %shl
; CHECK-NEXT: }
-define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%shl = shl <16 x i8> %b, %a
ret <16 x i8> %shl
}
-; CHECK-NEXT: define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %shl = shl <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %shl
; CHECK-NEXT: }
-define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%shl = shl <8 x i16> %b, %a
ret <8 x i16> %shl
}
-; CHECK-NEXT: define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %shl = shl <8 x i16> %b, %a
; CHECK-NEXT: ret <8 x i16> %shl
; CHECK-NEXT: }
-define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%shl = shl <4 x i32> %b, %a
ret <4 x i32> %shl
}
-; CHECK-NEXT: define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %shl = shl <4 x i32> %b, %a
; CHECK-NEXT: ret <4 x i32> %shl
@@ -848,61 +848,61 @@
; TODO(kschimpf): ashr i8/i16. Needs bitcasts.
-define i32 @ashrI32(i32 %a, i32 %b) {
+define internal i32 @ashrI32(i32 %a, i32 %b) {
entry:
%ashr = ashr i32 %b, %a
ret i32 %ashr
}
-; CHECK-NEXT: define i32 @ashrI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @ashrI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %ashr = ashr i32 %b, %a
; CHECK-NEXT: ret i32 %ashr
; CHECK-NEXT: }
-define i64 @AshrI64(i64 %a, i64 %b) {
+define internal i64 @AshrI64(i64 %a, i64 %b) {
entry:
%ashr = ashr i64 %b, %a
ret i64 %ashr
}
-; CHECK-NEXT: define i64 @AshrI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @AshrI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %ashr = ashr i64 %b, %a
; CHECK-NEXT: ret i64 %ashr
; CHECK-NEXT: }
-define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%ashr = ashr <16 x i8> %b, %a
ret <16 x i8> %ashr
}
-; CHECK-NEXT: define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %ashr = ashr <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %ashr
; CHECK-NEXT: }
-define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%ashr = ashr <8 x i16> %b, %a
ret <8 x i16> %ashr
}
-; CHECK-NEXT: define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %ashr = ashr <8 x i16> %b, %a
; CHECK-NEXT: ret <8 x i16> %ashr
; CHECK-NEXT: }
-define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%ashr = ashr <4 x i32> %b, %a
ret <4 x i32> %ashr
}
-; CHECK-NEXT: define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %ashr = ashr <4 x i32> %b, %a
; CHECK-NEXT: ret <4 x i32> %ashr
@@ -910,61 +910,61 @@
; TODO(kschimpf): lshr i8/i16. Needs bitcasts.
-define i32 @lshrI32(i32 %a, i32 %b) {
+define internal i32 @lshrI32(i32 %a, i32 %b) {
entry:
%lshr = lshr i32 %b, %a
ret i32 %lshr
}
-; CHECK-NEXT: define i32 @lshrI32(i32 %a, i32 %b) {
+; CHECK-NEXT: define internal i32 @lshrI32(i32 %a, i32 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %lshr = lshr i32 %b, %a
; CHECK-NEXT: ret i32 %lshr
; CHECK-NEXT: }
-define i64 @LshrI64(i64 %a, i64 %b) {
+define internal i64 @LshrI64(i64 %a, i64 %b) {
entry:
%lshr = lshr i64 %b, %a
ret i64 %lshr
}
-; CHECK-NEXT: define i64 @LshrI64(i64 %a, i64 %b) {
+; CHECK-NEXT: define internal i64 @LshrI64(i64 %a, i64 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %lshr = lshr i64 %b, %a
; CHECK-NEXT: ret i64 %lshr
; CHECK-NEXT: }
-define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+define internal <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) {
entry:
%lshr = lshr <16 x i8> %b, %a
ret <16 x i8> %lshr
}
-; CHECK-NEXT: define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: define internal <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %lshr = lshr <16 x i8> %b, %a
; CHECK-NEXT: ret <16 x i8> %lshr
; CHECK-NEXT: }
-define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+define internal <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) {
entry:
%lshr = lshr <8 x i16> %b, %a
ret <8 x i16> %lshr
}
-; CHECK-NEXT: define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: define internal <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %lshr = lshr <8 x i16> %b, %a
; CHECK-NEXT: ret <8 x i16> %lshr
; CHECK-NEXT: }
-define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+define internal <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) {
entry:
%lshr = lshr <4 x i32> %b, %a
ret <4 x i32> %lshr
}
-; CHECK-NEXT: define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: define internal <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %lshr = lshr <4 x i32> %b, %a
; CHECK-NEXT: ret <4 x i32> %lshr
diff --git a/tests_lit/reader_tests/branch.ll b/tests_lit/reader_tests/branch.ll
index b68a016..0b70223 100644
--- a/tests_lit/reader_tests/branch.ll
+++ b/tests_lit/reader_tests/branch.ll
@@ -6,7 +6,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define void @SimpleBranch() {
+define internal void @SimpleBranch() {
entry:
br label %b3
b1:
@@ -17,7 +17,7 @@
br label %b1
}
-; CHECK: define void @SimpleBranch() {
+; CHECK: define internal void @SimpleBranch() {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label %b3
; CHECK-NEXT: b1:
@@ -28,7 +28,7 @@
; CHECK-NEXT: br label %b1
; CHECK-NEXT: }
-define void @CondBranch(i32 %p) {
+define internal void @CondBranch(i32 %p) {
entry:
%test = trunc i32 %p to i1
br i1 %test, label %b1, label %b2
@@ -38,7 +38,7 @@
br i1 %test, label %b2, label %b1
}
-; CHECK-NEXT: define void @CondBranch(i32 %p) {
+; CHECK-NEXT: define internal void @CondBranch(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %test = trunc i32 %p to i1
; CHECK-NEXT: br i1 %test, label %b1, label %b2
diff --git a/tests_lit/reader_tests/call.ll b/tests_lit/reader_tests/call.ll
index 95227a6..ab1cfec 100644
--- a/tests_lit/reader_tests/call.ll
+++ b/tests_lit/reader_tests/call.ll
@@ -1,12 +1,14 @@
; Test handling of call instructions.
-; RUN: %p2i -i %s --insts | FileCheck %s
+; RUN: %p2i -i %s --insts --args -allow-externally-defined-symbols \
+; RUN: | FileCheck %s
; RUN: %if --need=allow_disable_ir_gen --command \
; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define i32 @fib(i32 %n) {
+define internal i32 @fib(i32 %n) {
entry:
%cmp = icmp slt i32 %n, 2
br i1 %cmp, label %return, label %if.end
@@ -23,7 +25,7 @@
ret i32 %n
}
-; CHECK: define i32 @fib(i32 %n) {
+; CHECK: define internal i32 @fib(i32 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %cmp = icmp slt i32 %n, 2
; CHECK-NEXT: br i1 %cmp, label %return, label %if.end
@@ -38,7 +40,7 @@
; CHECK-NEXT: ret i32 %n
; CHECK-NEXT: }
-define i32 @fact(i32 %n) {
+define internal i32 @fact(i32 %n) {
entry:
%cmp = icmp slt i32 %n, 2
br i1 %cmp, label %return, label %if.end
@@ -53,7 +55,7 @@
ret i32 %n
}
-; CHECK-NEXT: define i32 @fact(i32 %n) {
+; CHECK-NEXT: define internal i32 @fact(i32 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %cmp = icmp slt i32 %n, 2
; CHECK-NEXT: br i1 %cmp, label %return, label %if.end
@@ -66,13 +68,13 @@
; CHECK-NEXT: ret i32 %n
; CHECK-NEXT: }
-define i32 @redirect(i32 %n) {
+define internal i32 @redirect(i32 %n) {
entry:
%call = tail call i32 @redirect_target(i32 %n)
ret i32 %call
}
-; CHECK-NEXT: define i32 @redirect(i32 %n) {
+; CHECK-NEXT: define internal i32 @redirect(i32 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %call = call i32 @redirect_target(i32 %n)
; CHECK-NEXT: ret i32 %call
@@ -80,7 +82,7 @@
declare i32 @redirect_target(i32)
-define void @call_void(i32 %n) {
+define internal void @call_void(i32 %n) {
entry:
%cmp2 = icmp sgt i32 %n, 0
br i1 %cmp2, label %if.then, label %if.end
@@ -96,7 +98,7 @@
ret void
}
-; CHECK-NEXT: define void @call_void(i32 %n) {
+; CHECK-NEXT: define internal void @call_void(i32 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %cmp2 = icmp sgt i32 %n, 0
; CHECK-NEXT: br i1 %cmp2, label %if.then, label %if.end
diff --git a/tests_lit/reader_tests/casts.ll b/tests_lit/reader_tests/casts.ll
index 2bf76eb..2475379 100644
--- a/tests_lit/reader_tests/casts.ll
+++ b/tests_lit/reader_tests/casts.ll
@@ -9,7 +9,7 @@
; TODO(kschimpf) Find way to test pointer conversions (since they in general
; get removed by pnacl-freeze).
-define i32 @TruncI64(i64 %v) {
+define internal i32 @TruncI64(i64 %v) {
%v1 = trunc i64 %v to i1
%v8 = trunc i64 %v to i8
%v16 = trunc i64 %v to i16
@@ -17,7 +17,7 @@
ret i32 %v32
}
-; CHECK: define i32 @TruncI64(i64 %__0) {
+; CHECK: define internal i32 @TruncI64(i64 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i64 %__0 to i1
; CHECK-NEXT: %__2 = trunc i64 %__0 to i8
@@ -26,14 +26,14 @@
; CHECK-NEXT: ret i32 %__4
; CHECK-NEXT: }
-define void @TruncI32(i32 %v) {
+define internal void @TruncI32(i32 %v) {
%v1 = trunc i32 %v to i1
%v8 = trunc i32 %v to i8
%v16 = trunc i32 %v to i16
ret void
}
-; CHECK-NEXT: define void @TruncI32(i32 %__0) {
+; CHECK-NEXT: define internal void @TruncI32(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
; CHECK-NEXT: %__2 = trunc i32 %__0 to i8
@@ -41,14 +41,14 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @TruncI16(i32 %p) {
+define internal void @TruncI16(i32 %p) {
%v = trunc i32 %p to i16
%v1 = trunc i16 %v to i1
%v8 = trunc i16 %v to i8
ret void
}
-; CHECK-NEXT: define void @TruncI16(i32 %__0) {
+; CHECK-NEXT: define internal void @TruncI16(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i16
; CHECK-NEXT: %__2 = trunc i16 %__1 to i1
@@ -56,20 +56,20 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @TruncI8(i32 %p) {
+define internal void @TruncI8(i32 %p) {
%v = trunc i32 %p to i8
%v1 = trunc i8 %v to i1
ret void
}
-; CHECK-NEXT: define void @TruncI8(i32 %__0) {
+; CHECK-NEXT: define internal void @TruncI8(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i8
; CHECK-NEXT: %__2 = trunc i8 %__1 to i1
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define i64 @ZextI1(i32 %p) {
+define internal i64 @ZextI1(i32 %p) {
%v = trunc i32 %p to i1
%v8 = zext i1 %v to i8
%v16 = zext i1 %v to i16
@@ -78,7 +78,7 @@
ret i64 %v64
}
-; CHECK-NEXT: define i64 @ZextI1(i32 %__0) {
+; CHECK-NEXT: define internal i64 @ZextI1(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
; CHECK-NEXT: %__2 = zext i1 %__1 to i8
@@ -88,7 +88,7 @@
; CHECK-NEXT: ret i64 %__5
; CHECK-NEXT: }
-define i32 @ZextI8(i32 %p) {
+define internal i32 @ZextI8(i32 %p) {
%v = trunc i32 %p to i8
%v16 = zext i8 %v to i16
%v32 = zext i8 %v to i32
@@ -96,7 +96,7 @@
ret i32 %v32
}
-; CHECK-NEXT: define i32 @ZextI8(i32 %__0) {
+; CHECK-NEXT: define internal i32 @ZextI8(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i8
; CHECK-NEXT: %__2 = zext i8 %__1 to i16
@@ -105,14 +105,14 @@
; CHECK-NEXT: ret i32 %__3
; CHECK-NEXT: }
-define i64 @ZextI16(i32 %p) {
+define internal i64 @ZextI16(i32 %p) {
%v = trunc i32 %p to i16
%v32 = zext i16 %v to i32
%v64 = zext i16 %v to i64
ret i64 %v64
}
-; CHECK-NEXT: define i64 @ZextI16(i32 %__0) {
+; CHECK-NEXT: define internal i64 @ZextI16(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i16
; CHECK-NEXT: %__2 = zext i16 %__1 to i32
@@ -120,18 +120,18 @@
; CHECK-NEXT: ret i64 %__3
; CHECK-NEXT: }
-define i64 @Zexti32(i32 %v) {
+define internal i64 @Zexti32(i32 %v) {
%v64 = zext i32 %v to i64
ret i64 %v64
}
-; CHECK-NEXT: define i64 @Zexti32(i32 %__0) {
+; CHECK-NEXT: define internal i64 @Zexti32(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = zext i32 %__0 to i64
; CHECK-NEXT: ret i64 %__1
; CHECK-NEXT: }
-define i32 @SextI1(i32 %p) {
+define internal i32 @SextI1(i32 %p) {
%v = trunc i32 %p to i1
%v8 = sext i1 %v to i8
%v16 = sext i1 %v to i16
@@ -140,7 +140,7 @@
ret i32 %v32
}
-; CHECK-NEXT: define i32 @SextI1(i32 %__0) {
+; CHECK-NEXT: define internal i32 @SextI1(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
; CHECK-NEXT: %__2 = sext i1 %__1 to i8
@@ -150,7 +150,7 @@
; CHECK-NEXT: ret i32 %__4
; CHECK-NEXT: }
-define i64 @SextI8(i32 %p) {
+define internal i64 @SextI8(i32 %p) {
%v = trunc i32 %p to i8
%v16 = sext i8 %v to i16
%v32 = sext i8 %v to i32
@@ -158,7 +158,7 @@
ret i64 %v64
}
-; CHECK-NEXT: define i64 @SextI8(i32 %__0) {
+; CHECK-NEXT: define internal i64 @SextI8(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i8
; CHECK-NEXT: %__2 = sext i8 %__1 to i16
@@ -167,14 +167,14 @@
; CHECK-NEXT: ret i64 %__4
; CHECK-NEXT: }
-define i32 @SextI16(i32 %p) {
+define internal i32 @SextI16(i32 %p) {
%v = trunc i32 %p to i16
%v32 = sext i16 %v to i32
%v64 = sext i16 %v to i64
ret i32 %v32
}
-; CHECK-NEXT: define i32 @SextI16(i32 %__0) {
+; CHECK-NEXT: define internal i32 @SextI16(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i16
; CHECK-NEXT: %__2 = sext i16 %__1 to i32
@@ -182,40 +182,40 @@
; CHECK-NEXT: ret i32 %__2
; CHECK-NEXT: }
-define i64 @Sexti32(i32 %v) {
+define internal i64 @Sexti32(i32 %v) {
%v64 = sext i32 %v to i64
ret i64 %v64
}
-; CHECK-NEXT: define i64 @Sexti32(i32 %__0) {
+; CHECK-NEXT: define internal i64 @Sexti32(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = sext i32 %__0 to i64
; CHECK-NEXT: ret i64 %__1
; CHECK-NEXT: }
-define float @Fptrunc(double %v) {
+define internal float @Fptrunc(double %v) {
%vfloat = fptrunc double %v to float
ret float %vfloat
}
-; CHECK-NEXT: define float @Fptrunc(double %__0) {
+; CHECK-NEXT: define internal float @Fptrunc(double %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = fptrunc double %__0 to float
; CHECK-NEXT: ret float %__1
; CHECK-NEXT: }
-define double @Fpext(float %v) {
+define internal double @Fpext(float %v) {
%vdouble = fpext float %v to double
ret double %vdouble
}
-; CHECK-NEXT: define double @Fpext(float %__0) {
+; CHECK-NEXT: define internal double @Fpext(float %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = fpext float %__0 to double
; CHECK-NEXT: ret double %__1
; CHECK-NEXT: }
-define i32 @FptouiFloat(float %v) {
+define internal i32 @FptouiFloat(float %v) {
%v1 = fptoui float %v to i1
%v8 = fptoui float %v to i8
%v16 = fptoui float %v to i16
@@ -224,7 +224,7 @@
ret i32 %v32
}
-; CHECK-NEXT: define i32 @FptouiFloat(float %__0) {
+; CHECK-NEXT: define internal i32 @FptouiFloat(float %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = fptoui float %__0 to i1
; CHECK-NEXT: %__2 = fptoui float %__0 to i8
@@ -234,7 +234,7 @@
; CHECK-NEXT: ret i32 %__4
; CHECK-NEXT: }
-define i32 @FptouiDouble(double %v) {
+define internal i32 @FptouiDouble(double %v) {
%v1 = fptoui double %v to i1
%v8 = fptoui double %v to i8
%v16 = fptoui double %v to i16
@@ -243,7 +243,7 @@
ret i32 %v32
}
-; CHECK-NEXT: define i32 @FptouiDouble(double %__0) {
+; CHECK-NEXT: define internal i32 @FptouiDouble(double %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = fptoui double %__0 to i1
; CHECK-NEXT: %__2 = fptoui double %__0 to i8
@@ -253,7 +253,7 @@
; CHECK-NEXT: ret i32 %__4
; CHECK-NEXT: }
-define i32 @FptosiFloat(float %v) {
+define internal i32 @FptosiFloat(float %v) {
%v1 = fptosi float %v to i1
%v8 = fptosi float %v to i8
%v16 = fptosi float %v to i16
@@ -262,7 +262,7 @@
ret i32 %v32
}
-; CHECK-NEXT: define i32 @FptosiFloat(float %__0) {
+; CHECK-NEXT: define internal i32 @FptosiFloat(float %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = fptosi float %__0 to i1
; CHECK-NEXT: %__2 = fptosi float %__0 to i8
@@ -272,7 +272,7 @@
; CHECK-NEXT: ret i32 %__4
; CHECK-NEXT: }
-define i32 @FptosiDouble(double %v) {
+define internal i32 @FptosiDouble(double %v) {
%v1 = fptosi double %v to i1
%v8 = fptosi double %v to i8
%v16 = fptosi double %v to i16
@@ -281,7 +281,7 @@
ret i32 %v32
}
-; CHECK-NEXT: define i32 @FptosiDouble(double %__0) {
+; CHECK-NEXT: define internal i32 @FptosiDouble(double %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = fptosi double %__0 to i1
; CHECK-NEXT: %__2 = fptosi double %__0 to i8
@@ -291,14 +291,14 @@
; CHECK-NEXT: ret i32 %__4
; CHECK-NEXT: }
-define float @UitofpI1(i32 %p) {
+define internal float @UitofpI1(i32 %p) {
%v = trunc i32 %p to i1
%vfloat = uitofp i1 %v to float
%vdouble = uitofp i1 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @UitofpI1(i32 %__0) {
+; CHECK-NEXT: define internal float @UitofpI1(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
; CHECK-NEXT: %__2 = uitofp i1 %__1 to float
@@ -306,14 +306,14 @@
; CHECK-NEXT: ret float %__2
; CHECK-NEXT: }
-define float @UitofpI8(i32 %p) {
+define internal float @UitofpI8(i32 %p) {
%v = trunc i32 %p to i8
%vfloat = uitofp i8 %v to float
%vdouble = uitofp i8 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @UitofpI8(i32 %__0) {
+; CHECK-NEXT: define internal float @UitofpI8(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i8
; CHECK-NEXT: %__2 = uitofp i8 %__1 to float
@@ -321,14 +321,14 @@
; CHECK-NEXT: ret float %__2
; CHECK-NEXT: }
-define float @UitofpI16(i32 %p) {
+define internal float @UitofpI16(i32 %p) {
%v = trunc i32 %p to i16
%vfloat = uitofp i16 %v to float
%vdouble = uitofp i16 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @UitofpI16(i32 %__0) {
+; CHECK-NEXT: define internal float @UitofpI16(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i16
; CHECK-NEXT: %__2 = uitofp i16 %__1 to float
@@ -336,40 +336,40 @@
; CHECK-NEXT: ret float %__2
; CHECK-NEXT: }
-define float @UitofpI32(i32 %v) {
+define internal float @UitofpI32(i32 %v) {
%vfloat = uitofp i32 %v to float
%vdouble = uitofp i32 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @UitofpI32(i32 %__0) {
+; CHECK-NEXT: define internal float @UitofpI32(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = uitofp i32 %__0 to float
; CHECK-NEXT: %__2 = uitofp i32 %__0 to double
; CHECK-NEXT: ret float %__1
; CHECK-NEXT: }
-define float @UitofpI64(i64 %v) {
+define internal float @UitofpI64(i64 %v) {
%vfloat = uitofp i64 %v to float
%vdouble = uitofp i64 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @UitofpI64(i64 %__0) {
+; CHECK-NEXT: define internal float @UitofpI64(i64 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = uitofp i64 %__0 to float
; CHECK-NEXT: %__2 = uitofp i64 %__0 to double
; CHECK-NEXT: ret float %__1
; CHECK-NEXT: }
-define float @SitofpI1(i32 %p) {
+define internal float @SitofpI1(i32 %p) {
%v = trunc i32 %p to i1
%vfloat = sitofp i1 %v to float
%vdouble = sitofp i1 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @SitofpI1(i32 %__0) {
+; CHECK-NEXT: define internal float @SitofpI1(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
; CHECK-NEXT: %__2 = sitofp i1 %__1 to float
@@ -377,14 +377,14 @@
; CHECK-NEXT: ret float %__2
; CHECK-NEXT: }
-define float @SitofpI8(i32 %p) {
+define internal float @SitofpI8(i32 %p) {
%v = trunc i32 %p to i8
%vfloat = sitofp i8 %v to float
%vdouble = sitofp i8 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @SitofpI8(i32 %__0) {
+; CHECK-NEXT: define internal float @SitofpI8(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i8
; CHECK-NEXT: %__2 = sitofp i8 %__1 to float
@@ -392,14 +392,14 @@
; CHECK-NEXT: ret float %__2
; CHECK-NEXT: }
-define float @SitofpI16(i32 %p) {
+define internal float @SitofpI16(i32 %p) {
%v = trunc i32 %p to i16
%vfloat = sitofp i16 %v to float
%vdouble = sitofp i16 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @SitofpI16(i32 %__0) {
+; CHECK-NEXT: define internal float @SitofpI16(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = trunc i32 %__0 to i16
; CHECK-NEXT: %__2 = sitofp i16 %__1 to float
@@ -407,84 +407,84 @@
; CHECK-NEXT: ret float %__2
; CHECK-NEXT: }
-define float @SitofpI32(i32 %v) {
+define internal float @SitofpI32(i32 %v) {
%vfloat = sitofp i32 %v to float
%vdouble = sitofp i32 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @SitofpI32(i32 %__0) {
+; CHECK-NEXT: define internal float @SitofpI32(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = sitofp i32 %__0 to float
; CHECK-NEXT: %__2 = sitofp i32 %__0 to double
; CHECK-NEXT: ret float %__1
; CHECK-NEXT: }
-define float @SitofpI64(i64 %v) {
+define internal float @SitofpI64(i64 %v) {
%vfloat = sitofp i64 %v to float
%vdouble = sitofp i64 %v to double
ret float %vfloat
}
-; CHECK-NEXT: define float @SitofpI64(i64 %__0) {
+; CHECK-NEXT: define internal float @SitofpI64(i64 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = sitofp i64 %__0 to float
; CHECK-NEXT: %__2 = sitofp i64 %__0 to double
; CHECK-NEXT: ret float %__1
; CHECK-NEXT: }
-define float @BitcastI32(i32 %v) {
+define internal float @BitcastI32(i32 %v) {
%vfloat = bitcast i32 %v to float
ret float %vfloat
}
-; CHECK-NEXT: define float @BitcastI32(i32 %__0) {
+; CHECK-NEXT: define internal float @BitcastI32(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast i32 %__0 to float
; CHECK-NEXT: ret float %__1
; CHECK-NEXT: }
-define double @BitcastI64(i64 %v) {
+define internal double @BitcastI64(i64 %v) {
%vdouble = bitcast i64 %v to double
ret double %vdouble
}
-; CHECK-NEXT: define double @BitcastI64(i64 %__0) {
+; CHECK-NEXT: define internal double @BitcastI64(i64 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast i64 %__0 to double
; CHECK-NEXT: ret double %__1
; CHECK-NEXT: }
-define i32 @BitcastFloat(float %v) {
+define internal i32 @BitcastFloat(float %v) {
%vi32 = bitcast float %v to i32
ret i32 %vi32
}
-; CHECK-NEXT: define i32 @BitcastFloat(float %__0) {
+; CHECK-NEXT: define internal i32 @BitcastFloat(float %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast float %__0 to i32
; CHECK-NEXT: ret i32 %__1
; CHECK-NEXT: }
-define i64 @BitcastDouble(double %v) {
+define internal i64 @BitcastDouble(double %v) {
%vi64 = bitcast double %v to i64
ret i64 %vi64
}
-; CHECK-NEXT: define i64 @BitcastDouble(double %__0) {
+; CHECK-NEXT: define internal i64 @BitcastDouble(double %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast double %__0 to i64
; CHECK-NEXT: ret i64 %__1
; CHECK-NEXT: }
-define void @BitcastV4xFloat(<4 x float> %v) {
+define internal void @BitcastV4xFloat(<4 x float> %v) {
%v4xi32 = bitcast <4 x float> %v to <4 x i32>
%v8xi16 = bitcast <4 x float> %v to <8 x i16>
%v16xi8 = bitcast <4 x float> %v to <16 x i8>
ret void
}
-; CHECK-NEXT: define void @BitcastV4xFloat(<4 x float> %__0) {
+; CHECK-NEXT: define internal void @BitcastV4xFloat(<4 x float> %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast <4 x float> %__0 to <4 x i32>
; CHECK-NEXT: %__2 = bitcast <4 x float> %__0 to <8 x i16>
@@ -492,14 +492,14 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @BitcastV4xi32(<4 x i32> %v) {
+define internal void @BitcastV4xi32(<4 x i32> %v) {
%v4xfloat = bitcast <4 x i32> %v to <4 x float>
%v8xi16 = bitcast <4 x i32> %v to <8 x i16>
%v16xi8 = bitcast <4 x i32> %v to <16 x i8>
ret void
}
-; CHECK-NEXT: define void @BitcastV4xi32(<4 x i32> %__0) {
+; CHECK-NEXT: define internal void @BitcastV4xi32(<4 x i32> %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast <4 x i32> %__0 to <4 x float>
; CHECK-NEXT: %__2 = bitcast <4 x i32> %__0 to <8 x i16>
@@ -507,14 +507,14 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @BitcastV8xi16(<8 x i16> %v) {
+define internal void @BitcastV8xi16(<8 x i16> %v) {
%v4xfloat = bitcast <8 x i16> %v to <4 x float>
%v4xi32 = bitcast <8 x i16> %v to <4 x i32>
%v16xi8 = bitcast <8 x i16> %v to <16 x i8>
ret void
}
-; CHECK-NEXT: define void @BitcastV8xi16(<8 x i16> %__0) {
+; CHECK-NEXT: define internal void @BitcastV8xi16(<8 x i16> %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast <8 x i16> %__0 to <4 x float>
; CHECK-NEXT: %__2 = bitcast <8 x i16> %__0 to <4 x i32>
@@ -522,14 +522,14 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @BitcastV16xi8(<16 x i8> %v) {
+define internal void @BitcastV16xi8(<16 x i8> %v) {
%v4xfloat = bitcast <16 x i8> %v to <4 x float>
%v4xi32 = bitcast <16 x i8> %v to <4 x i32>
%v8xi16 = bitcast <16 x i8> %v to <8 x i16>
ret void
}
-; CHECK-NEXT: define void @BitcastV16xi8(<16 x i8> %__0) {
+; CHECK-NEXT: define internal void @BitcastV16xi8(<16 x i8> %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: %__1 = bitcast <16 x i8> %__0 to <4 x float>
; CHECK-NEXT: %__2 = bitcast <16 x i8> %__0 to <4 x i32>
diff --git a/tests_lit/reader_tests/compare.ll b/tests_lit/reader_tests/compare.ll
index 7c92f51..d140bfd 100644
--- a/tests_lit/reader_tests/compare.ll
+++ b/tests_lit/reader_tests/compare.ll
@@ -6,7 +6,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define i1 @IcmpI1(i32 %p1, i32 %p2) {
+define internal i1 @IcmpI1(i32 %p1, i32 %p2) {
entry:
%a1 = trunc i32 %p1 to i1
%a2 = trunc i32 %p2 to i1
@@ -23,7 +23,7 @@
ret i1 %veq
}
-; CHECK: define i1 @IcmpI1(i32 %p1, i32 %p2) {
+; CHECK: define internal i1 @IcmpI1(i32 %p1, i32 %p2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a1 = trunc i32 %p1 to i1
; CHECK-NEXT: %a2 = trunc i32 %p2 to i1
@@ -40,7 +40,7 @@
; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
-define i1 @IcmpI8(i32 %p1, i32 %p2) {
+define internal i1 @IcmpI8(i32 %p1, i32 %p2) {
entry:
%a1 = trunc i32 %p1 to i8
%a2 = trunc i32 %p2 to i8
@@ -57,7 +57,7 @@
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI8(i32 %p1, i32 %p2) {
+; CHECK-NEXT: define internal i1 @IcmpI8(i32 %p1, i32 %p2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a1 = trunc i32 %p1 to i8
; CHECK-NEXT: %a2 = trunc i32 %p2 to i8
@@ -74,7 +74,7 @@
; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
-define i1 @IcmpI16(i32 %p1, i32 %p2) {
+define internal i1 @IcmpI16(i32 %p1, i32 %p2) {
entry:
%a1 = trunc i32 %p1 to i16
%a2 = trunc i32 %p2 to i16
@@ -91,7 +91,7 @@
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI16(i32 %p1, i32 %p2) {
+; CHECK-NEXT: define internal i1 @IcmpI16(i32 %p1, i32 %p2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a1 = trunc i32 %p1 to i16
; CHECK-NEXT: %a2 = trunc i32 %p2 to i16
@@ -108,7 +108,7 @@
; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
-define i1 @IcmpI32(i32 %a1, i32 %a2) {
+define internal i1 @IcmpI32(i32 %a1, i32 %a2) {
entry:
%veq = icmp eq i32 %a1, %a2
%vne = icmp ne i32 %a1, %a2
@@ -123,7 +123,7 @@
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI32(i32 %a1, i32 %a2) {
+; CHECK-NEXT: define internal i1 @IcmpI32(i32 %a1, i32 %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq i32 %a1, %a2
; CHECK-NEXT: %vne = icmp ne i32 %a1, %a2
@@ -138,7 +138,7 @@
; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
-define i1 @IcmpI64(i64 %a1, i64 %a2) {
+define internal i1 @IcmpI64(i64 %a1, i64 %a2) {
entry:
%veq = icmp eq i64 %a1, %a2
%vne = icmp ne i64 %a1, %a2
@@ -153,7 +153,7 @@
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI64(i64 %a1, i64 %a2) {
+; CHECK-NEXT: define internal i1 @IcmpI64(i64 %a1, i64 %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq i64 %a1, %a2
; CHECK-NEXT: %vne = icmp ne i64 %a1, %a2
@@ -168,7 +168,7 @@
; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
-define <4 x i1> @IcmpV4xI1(<4 x i1> %a1, <4 x i1> %a2) {
+define internal <4 x i1> @IcmpV4xI1(<4 x i1> %a1, <4 x i1> %a2) {
entry:
%veq = icmp eq <4 x i1> %a1, %a2
%vne = icmp ne <4 x i1> %a1, %a2
@@ -183,7 +183,7 @@
ret <4 x i1> %veq
}
-; CHECK-NEXT: define <4 x i1> @IcmpV4xI1(<4 x i1> %a1, <4 x i1> %a2) {
+; CHECK-NEXT: define internal <4 x i1> @IcmpV4xI1(<4 x i1> %a1, <4 x i1> %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq <4 x i1> %a1, %a2
; CHECK-NEXT: %vne = icmp ne <4 x i1> %a1, %a2
@@ -198,7 +198,7 @@
; CHECK-NEXT: ret <4 x i1> %veq
; CHECK-NEXT: }
-define <8 x i1> @IcmpV8xI1(<8 x i1> %a1, <8 x i1> %a2) {
+define internal <8 x i1> @IcmpV8xI1(<8 x i1> %a1, <8 x i1> %a2) {
entry:
%veq = icmp eq <8 x i1> %a1, %a2
%vne = icmp ne <8 x i1> %a1, %a2
@@ -213,7 +213,7 @@
ret <8 x i1> %veq
}
-; CHECK-NEXT: define <8 x i1> @IcmpV8xI1(<8 x i1> %a1, <8 x i1> %a2) {
+; CHECK-NEXT: define internal <8 x i1> @IcmpV8xI1(<8 x i1> %a1, <8 x i1> %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq <8 x i1> %a1, %a2
; CHECK-NEXT: %vne = icmp ne <8 x i1> %a1, %a2
@@ -228,7 +228,7 @@
; CHECK-NEXT: ret <8 x i1> %veq
; CHECK-NEXT: }
-define <16 x i1> @IcmpV16xI1(<16 x i1> %a1, <16 x i1> %a2) {
+define internal <16 x i1> @IcmpV16xI1(<16 x i1> %a1, <16 x i1> %a2) {
entry:
%veq = icmp eq <16 x i1> %a1, %a2
%vne = icmp ne <16 x i1> %a1, %a2
@@ -243,7 +243,7 @@
ret <16 x i1> %veq
}
-; CHECK-NEXT: define <16 x i1> @IcmpV16xI1(<16 x i1> %a1, <16 x i1> %a2) {
+; CHECK-NEXT: define internal <16 x i1> @IcmpV16xI1(<16 x i1> %a1, <16 x i1> %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq <16 x i1> %a1, %a2
; CHECK-NEXT: %vne = icmp ne <16 x i1> %a1, %a2
@@ -258,7 +258,7 @@
; CHECK-NEXT: ret <16 x i1> %veq
; CHECK-NEXT: }
-define <16 x i1> @IcmpV16xI8(<16 x i8> %a1, <16 x i8> %a2) {
+define internal <16 x i1> @IcmpV16xI8(<16 x i8> %a1, <16 x i8> %a2) {
entry:
%veq = icmp eq <16 x i8> %a1, %a2
%vne = icmp ne <16 x i8> %a1, %a2
@@ -273,7 +273,7 @@
ret <16 x i1> %veq
}
-; CHECK-NEXT: define <16 x i1> @IcmpV16xI8(<16 x i8> %a1, <16 x i8> %a2) {
+; CHECK-NEXT: define internal <16 x i1> @IcmpV16xI8(<16 x i8> %a1, <16 x i8> %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq <16 x i8> %a1, %a2
; CHECK-NEXT: %vne = icmp ne <16 x i8> %a1, %a2
@@ -288,7 +288,7 @@
; CHECK-NEXT: ret <16 x i1> %veq
; CHECK-NEXT: }
-define <8 x i1> @IcmpV8xI16(<8 x i16> %a1, <8 x i16> %a2) {
+define internal <8 x i1> @IcmpV8xI16(<8 x i16> %a1, <8 x i16> %a2) {
entry:
%veq = icmp eq <8 x i16> %a1, %a2
%vne = icmp ne <8 x i16> %a1, %a2
@@ -303,7 +303,7 @@
ret <8 x i1> %veq
}
-; CHECK-NEXT: define <8 x i1> @IcmpV8xI16(<8 x i16> %a1, <8 x i16> %a2) {
+; CHECK-NEXT: define internal <8 x i1> @IcmpV8xI16(<8 x i16> %a1, <8 x i16> %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq <8 x i16> %a1, %a2
; CHECK-NEXT: %vne = icmp ne <8 x i16> %a1, %a2
@@ -318,7 +318,7 @@
; CHECK-NEXT: ret <8 x i1> %veq
; CHECK-NEXT: }
-define <4 x i1> @IcmpV4xI32(<4 x i32> %a1, <4 x i32> %a2) {
+define internal <4 x i1> @IcmpV4xI32(<4 x i32> %a1, <4 x i32> %a2) {
entry:
%veq = icmp eq <4 x i32> %a1, %a2
%vne = icmp ne <4 x i32> %a1, %a2
@@ -333,7 +333,7 @@
ret <4 x i1> %veq
}
-; CHECK-NEXT: define <4 x i1> @IcmpV4xI32(<4 x i32> %a1, <4 x i32> %a2) {
+; CHECK-NEXT: define internal <4 x i1> @IcmpV4xI32(<4 x i32> %a1, <4 x i32> %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %veq = icmp eq <4 x i32> %a1, %a2
; CHECK-NEXT: %vne = icmp ne <4 x i32> %a1, %a2
@@ -348,7 +348,7 @@
; CHECK-NEXT: ret <4 x i1> %veq
; CHECK-NEXT: }
-define i1 @FcmpFloat(float %a1, float %a2) {
+define internal i1 @FcmpFloat(float %a1, float %a2) {
entry:
%vfalse = fcmp false float %a1, %a2
%voeq = fcmp oeq float %a1, %a2
@@ -369,7 +369,7 @@
ret i1 %voeq
}
-; CHECK-NEXT: define i1 @FcmpFloat(float %a1, float %a2) {
+; CHECK-NEXT: define internal i1 @FcmpFloat(float %a1, float %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vfalse = fcmp false float %a1, %a2
; CHECK-NEXT: %voeq = fcmp oeq float %a1, %a2
@@ -390,7 +390,7 @@
; CHECK-NEXT: ret i1 %voeq
; CHECK-NEXT: }
-define i1 @FcmpDouble(double %a1, double %a2) {
+define internal i1 @FcmpDouble(double %a1, double %a2) {
entry:
%vfalse = fcmp false double %a1, %a2
%voeq = fcmp oeq double %a1, %a2
@@ -411,7 +411,7 @@
ret i1 %voeq
}
-; CHECK-NEXT: define i1 @FcmpDouble(double %a1, double %a2) {
+; CHECK-NEXT: define internal i1 @FcmpDouble(double %a1, double %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vfalse = fcmp false double %a1, %a2
; CHECK-NEXT: %voeq = fcmp oeq double %a1, %a2
@@ -432,7 +432,7 @@
; CHECK-NEXT: ret i1 %voeq
; CHECK-NEXT: }
-define <4 x i1> @FcmpV4xFloat(<4 x float> %a1, <4 x float> %a2) {
+define internal <4 x i1> @FcmpV4xFloat(<4 x float> %a1, <4 x float> %a2) {
entry:
%vfalse = fcmp false <4 x float> %a1, %a2
%voeq = fcmp oeq <4 x float> %a1, %a2
@@ -453,7 +453,7 @@
ret <4 x i1> %voeq
}
-; CHECK-NEXT: define <4 x i1> @FcmpV4xFloat(<4 x float> %a1, <4 x float> %a2) {
+; CHECK-NEXT: define internal <4 x i1> @FcmpV4xFloat(<4 x float> %a1, <4 x float> %a2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vfalse = fcmp false <4 x float> %a1, %a2
; CHECK-NEXT: %voeq = fcmp oeq <4 x float> %a1, %a2
diff --git a/tests_lit/reader_tests/constants.ll b/tests_lit/reader_tests/constants.ll
index d8127e6..f6efeaf 100644
--- a/tests_lit/reader_tests/constants.ll
+++ b/tests_lit/reader_tests/constants.ll
@@ -6,7 +6,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define void @TestIntegers() {
+define internal void @TestIntegers() {
entry:
; CHECK: entry:
@@ -62,7 +62,7 @@
}
-define void @TestFloats() {
+define internal void @TestFloats() {
entry:
; CHECK: entry:
diff --git a/tests_lit/reader_tests/extern_globals.ll b/tests_lit/reader_tests/extern_globals.ll
index 9d5eb45..ddc870e 100644
--- a/tests_lit/reader_tests/extern_globals.ll
+++ b/tests_lit/reader_tests/extern_globals.ll
@@ -8,8 +8,10 @@
; work if we read LLVM IR source, and convert to to ICE.
; REQUIRES: allow_llvm_ir_as_input
-; RUN: %lc2i -i %s --insts --args --allow-uninitialized-globals | FileCheck %s
; RUN: %lc2i -i %s --insts --args --allow-uninitialized-globals \
+; RUN: -allow-externally-defined-symbols | FileCheck %s
+; RUN: %lc2i -i %s --insts --args --allow-uninitialized-globals \
+; RUN: -allow-externally-defined-symbols \
; RUN: -prefix Subzero_ | FileCheck --check-prefix=CROSS %s
@ArrayInitPartial = internal global [40 x i8] c"<\00\00\00F\00\00\00P\00\00\00Z\00\00\00d\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 4
diff --git a/tests_lit/reader_tests/forwardref.ll b/tests_lit/reader_tests/forwardref.ll
index 86b41b7..980f633 100644
--- a/tests_lit/reader_tests/forwardref.ll
+++ b/tests_lit/reader_tests/forwardref.ll
@@ -8,7 +8,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define void @LoopCarriedDep() {
+define internal void @LoopCarriedDep() {
b0:
%v0 = add i32 1, 2
br label %b1
@@ -18,7 +18,7 @@
br label %b1
}
-; CHECK: define void @LoopCarriedDep() {
+; CHECK: define internal void @LoopCarriedDep() {
; CHECK-NEXT: b0:
; CHECK-NEXT: %v0 = add i32 1, 2
; CHECK-NEXT: br label %b1
@@ -46,7 +46,7 @@
; DUMP-NEXT: br label %b1;
; DUMP-NEXT: }
-define void @BackBranch(i32 %p0) {
+define internal void @BackBranch(i32 %p0) {
b0:
br label %b4
b1:
@@ -68,7 +68,7 @@
ret void
}
-; CHECK: define void @BackBranch(i32 %p0) {
+; CHECK: define internal void @BackBranch(i32 %p0) {
; CHECK-NEXT: b0:
; CHECK-NEXT: br label %b4
; CHECK-NEXT: b1:
diff --git a/tests_lit/reader_tests/globalinit.pnacl.ll b/tests_lit/reader_tests/globalinit.pnacl.ll
index 0d92351..73c15e3 100644
--- a/tests_lit/reader_tests/globalinit.pnacl.ll
+++ b/tests_lit/reader_tests/globalinit.pnacl.ll
@@ -1,10 +1,14 @@
; Test of global initializers.
-; RUN: %p2i -i %s --insts | FileCheck %s
-; RUN: %l2i -i %s --insts | %ifl FileCheck %s
-; RUN: %lc2i -i %s --insts | %iflc FileCheck %s
+; RUN: %p2i -i %s --insts --args -allow-externally-defined-symbols \
+; RUN: | FileCheck %s
+; RUN: %l2i -i %s --insts --args -allow-externally-defined-symbols \
+; RUN: | %ifl FileCheck %s
+; RUN: %lc2i -i %s --insts --args -allow-externally-defined-symbols \
+; RUN: | %iflc FileCheck %s
; RUN: %if --need=allow_disable_ir_gen --command \
; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
diff --git a/tests_lit/reader_tests/globalrelocs.ll b/tests_lit/reader_tests/globalrelocs.ll
index a6a44f7..55d9524 100644
--- a/tests_lit/reader_tests/globalrelocs.ll
+++ b/tests_lit/reader_tests/globalrelocs.ll
@@ -93,10 +93,10 @@
@short = internal constant [2 x i8] zeroinitializer
; CHECK-NEXT: @short = internal constant [2 x i8] zeroinitializer
-define void @func() {
+define internal void @func() {
ret void
}
-; CHECK-NEXT: define void @func() {
+; CHECK-NEXT: define internal void @func() {
; NOIR: Total across all functions
diff --git a/tests_lit/reader_tests/insertextract.ll b/tests_lit/reader_tests/insertextract.ll
index ca01469..8259065 100644
--- a/tests_lit/reader_tests/insertextract.ll
+++ b/tests_lit/reader_tests/insertextract.ll
@@ -8,7 +8,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define void @ExtractV4xi1(<4 x i1> %v) {
+define internal void @ExtractV4xi1(<4 x i1> %v) {
entry:
%e0 = extractelement <4 x i1> %v, i32 0
%e1 = extractelement <4 x i1> %v, i32 1
@@ -17,7 +17,7 @@
ret void
}
-; CHECK: define void @ExtractV4xi1(<4 x i1> %v) {
+; CHECK: define internal void @ExtractV4xi1(<4 x i1> %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e0 = extractelement <4 x i1> %v, i32 0
; CHECK-NEXT: %e1 = extractelement <4 x i1> %v, i32 1
@@ -26,7 +26,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @ExtractV8xi1(<8 x i1> %v) {
+define internal void @ExtractV8xi1(<8 x i1> %v) {
entry:
%e0 = extractelement <8 x i1> %v, i32 0
%e1 = extractelement <8 x i1> %v, i32 1
@@ -39,7 +39,7 @@
ret void
}
-; CHECK-NEXT: define void @ExtractV8xi1(<8 x i1> %v) {
+; CHECK-NEXT: define internal void @ExtractV8xi1(<8 x i1> %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e0 = extractelement <8 x i1> %v, i32 0
; CHECK-NEXT: %e1 = extractelement <8 x i1> %v, i32 1
@@ -52,7 +52,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @ExtractV16xi1(<16 x i1> %v) {
+define internal void @ExtractV16xi1(<16 x i1> %v) {
entry:
%e0 = extractelement <16 x i1> %v, i32 0
%e1 = extractelement <16 x i1> %v, i32 1
@@ -73,7 +73,7 @@
ret void
}
-; CHECK-NEXT: define void @ExtractV16xi1(<16 x i1> %v) {
+; CHECK-NEXT: define internal void @ExtractV16xi1(<16 x i1> %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e0 = extractelement <16 x i1> %v, i32 0
; CHECK-NEXT: %e1 = extractelement <16 x i1> %v, i32 1
@@ -94,7 +94,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @ExtractV16xi8(<16 x i8> %v, i32 %i) {
+define internal void @ExtractV16xi8(<16 x i8> %v, i32 %i) {
entry:
%e0 = extractelement <16 x i8> %v, i32 0
%e1 = extractelement <16 x i8> %v, i32 1
@@ -115,7 +115,7 @@
ret void
}
-; CHECK-NEXT: define void @ExtractV16xi8(<16 x i8> %v, i32 %i) {
+; CHECK-NEXT: define internal void @ExtractV16xi8(<16 x i8> %v, i32 %i) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e0 = extractelement <16 x i8> %v, i32 0
; CHECK-NEXT: %e1 = extractelement <16 x i8> %v, i32 1
@@ -136,7 +136,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @ExtractV8xi16(<8 x i16> %v) {
+define internal void @ExtractV8xi16(<8 x i16> %v) {
entry:
%e0 = extractelement <8 x i16> %v, i32 0
%e1 = extractelement <8 x i16> %v, i32 1
@@ -149,7 +149,7 @@
ret void
}
-; CHECK-NEXT: define void @ExtractV8xi16(<8 x i16> %v) {
+; CHECK-NEXT: define internal void @ExtractV8xi16(<8 x i16> %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e0 = extractelement <8 x i16> %v, i32 0
; CHECK-NEXT: %e1 = extractelement <8 x i16> %v, i32 1
@@ -162,7 +162,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define i32 @ExtractV4xi32(<4 x i32> %v) {
+define internal i32 @ExtractV4xi32(<4 x i32> %v) {
entry:
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
@@ -171,7 +171,7 @@
ret i32 %e0
}
-; CHECK-NEXT: define i32 @ExtractV4xi32(<4 x i32> %v) {
+; CHECK-NEXT: define internal i32 @ExtractV4xi32(<4 x i32> %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e0 = extractelement <4 x i32> %v, i32 0
; CHECK-NEXT: %e1 = extractelement <4 x i32> %v, i32 1
@@ -180,7 +180,7 @@
; CHECK-NEXT: ret i32 %e0
; CHECK-NEXT: }
-define float @ExtractV4xfloat(<4 x float> %v) {
+define internal float @ExtractV4xfloat(<4 x float> %v) {
entry:
%e0 = extractelement <4 x float> %v, i32 0
%e1 = extractelement <4 x float> %v, i32 1
@@ -189,7 +189,7 @@
ret float %e0
}
-; CHECK-NEXT: define float @ExtractV4xfloat(<4 x float> %v) {
+; CHECK-NEXT: define internal float @ExtractV4xfloat(<4 x float> %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e0 = extractelement <4 x float> %v, i32 0
; CHECK-NEXT: %e1 = extractelement <4 x float> %v, i32 1
@@ -198,7 +198,7 @@
; CHECK-NEXT: ret float %e0
; CHECK-NEXT: }
-define <4 x i1> @InsertV4xi1(<4 x i1> %v, i32 %pe) {
+define internal <4 x i1> @InsertV4xi1(<4 x i1> %v, i32 %pe) {
entry:
%e = trunc i32 %pe to i1
%r0 = insertelement <4 x i1> %v, i1 %e, i32 0
@@ -208,7 +208,7 @@
ret <4 x i1> %r3
}
-; CHECK-NEXT: define <4 x i1> @InsertV4xi1(<4 x i1> %v, i32 %pe) {
+; CHECK-NEXT: define internal <4 x i1> @InsertV4xi1(<4 x i1> %v, i32 %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e = trunc i32 %pe to i1
; CHECK-NEXT: %r0 = insertelement <4 x i1> %v, i1 %e, i32 0
@@ -218,7 +218,7 @@
; CHECK-NEXT: ret <4 x i1> %r3
; CHECK-NEXT: }
-define <8 x i1> @InsertV8xi1(<8 x i1> %v, i32 %pe) {
+define internal <8 x i1> @InsertV8xi1(<8 x i1> %v, i32 %pe) {
entry:
%e = trunc i32 %pe to i1
%r0 = insertelement <8 x i1> %v, i1 %e, i32 0
@@ -232,7 +232,7 @@
ret <8 x i1> %r7
}
-; CHECK-NEXT: define <8 x i1> @InsertV8xi1(<8 x i1> %v, i32 %pe) {
+; CHECK-NEXT: define internal <8 x i1> @InsertV8xi1(<8 x i1> %v, i32 %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e = trunc i32 %pe to i1
; CHECK-NEXT: %r0 = insertelement <8 x i1> %v, i1 %e, i32 0
@@ -246,7 +246,7 @@
; CHECK-NEXT: ret <8 x i1> %r7
; CHECK-NEXT: }
-define <16 x i1> @InsertV16xi1(<16 x i1> %v, i32 %pe) {
+define internal <16 x i1> @InsertV16xi1(<16 x i1> %v, i32 %pe) {
entry:
%e = trunc i32 %pe to i1
%r0 = insertelement <16 x i1> %v, i1 %e, i32 0
@@ -268,7 +268,7 @@
ret <16 x i1> %r15
}
-; CHECK-NEXT: define <16 x i1> @InsertV16xi1(<16 x i1> %v, i32 %pe) {
+; CHECK-NEXT: define internal <16 x i1> @InsertV16xi1(<16 x i1> %v, i32 %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e = trunc i32 %pe to i1
; CHECK-NEXT: %r0 = insertelement <16 x i1> %v, i1 %e, i32 0
@@ -290,7 +290,7 @@
; CHECK-NEXT: ret <16 x i1> %r15
; CHECK-NEXT: }
-define <16 x i8> @InsertV16xi8(<16 x i8> %v, i32 %pe) {
+define internal <16 x i8> @InsertV16xi8(<16 x i8> %v, i32 %pe) {
entry:
%e = trunc i32 %pe to i8
%r0 = insertelement <16 x i8> %v, i8 %e, i32 0
@@ -304,7 +304,7 @@
ret <16 x i8> %r7
}
-; CHECK-NEXT: define <16 x i8> @InsertV16xi8(<16 x i8> %v, i32 %pe) {
+; CHECK-NEXT: define internal <16 x i8> @InsertV16xi8(<16 x i8> %v, i32 %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e = trunc i32 %pe to i8
; CHECK-NEXT: %r0 = insertelement <16 x i8> %v, i8 %e, i32 0
@@ -318,7 +318,7 @@
; CHECK-NEXT: ret <16 x i8> %r7
; CHECK-NEXT: }
-define <8 x i16> @InsertV8xi16(<8 x i16> %v, i32 %pe) {
+define internal <8 x i16> @InsertV8xi16(<8 x i16> %v, i32 %pe) {
entry:
%e = trunc i32 %pe to i16
%r0 = insertelement <8 x i16> %v, i16 %e, i32 0
@@ -332,7 +332,7 @@
ret <8 x i16> %r7
}
-; CHECK-NEXT: define <8 x i16> @InsertV8xi16(<8 x i16> %v, i32 %pe) {
+; CHECK-NEXT: define internal <8 x i16> @InsertV8xi16(<8 x i16> %v, i32 %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %e = trunc i32 %pe to i16
; CHECK-NEXT: %r0 = insertelement <8 x i16> %v, i16 %e, i32 0
@@ -346,7 +346,7 @@
; CHECK-NEXT: ret <8 x i16> %r7
; CHECK-NEXT: }
-define <4 x i32> @InsertV4xi32(<4 x i32> %v, i32 %e) {
+define internal <4 x i32> @InsertV4xi32(<4 x i32> %v, i32 %e) {
entry:
%r0 = insertelement <4 x i32> %v, i32 %e, i32 0
%r1 = insertelement <4 x i32> %v, i32 %e, i32 1
@@ -355,7 +355,7 @@
ret <4 x i32> %r3
}
-; CHECK-NEXT: define <4 x i32> @InsertV4xi32(<4 x i32> %v, i32 %e) {
+; CHECK-NEXT: define internal <4 x i32> @InsertV4xi32(<4 x i32> %v, i32 %e) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r0 = insertelement <4 x i32> %v, i32 %e, i32 0
; CHECK-NEXT: %r1 = insertelement <4 x i32> %v, i32 %e, i32 1
@@ -364,7 +364,7 @@
; CHECK-NEXT: ret <4 x i32> %r3
; CHECK-NEXT: }
-define <4 x float> @InsertV4xfloat(<4 x float> %v, float %e) {
+define internal <4 x float> @InsertV4xfloat(<4 x float> %v, float %e) {
entry:
%r0 = insertelement <4 x float> %v, float %e, i32 0
%r1 = insertelement <4 x float> %v, float %e, i32 1
@@ -373,7 +373,7 @@
ret <4 x float> %r3
}
-; CHECK-NEXT: define <4 x float> @InsertV4xfloat(<4 x float> %v, float %e) {
+; CHECK-NEXT: define internal <4 x float> @InsertV4xfloat(<4 x float> %v, float %e) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r0 = insertelement <4 x float> %v, float %e, i32 0
; CHECK-NEXT: %r1 = insertelement <4 x float> %v, float %e, i32 1
diff --git a/tests_lit/reader_tests/load.ll b/tests_lit/reader_tests/load.ll
index 087e750..08d4e84 100644
--- a/tests_lit/reader_tests/load.ll
+++ b/tests_lit/reader_tests/load.ll
@@ -6,7 +6,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define i32 @load_i8(i32 %addr) {
+define internal i32 @load_i8(i32 %addr) {
entry:
%addr_i8 = inttoptr i32 %addr to i8*
%v = load i8, i8* %addr_i8, align 1
@@ -19,7 +19,7 @@
; CHECK-NEXT: ret i32 %__2
}
-define i32 @load_i16(i32 %addr) {
+define internal i32 @load_i16(i32 %addr) {
entry:
%addr_i16 = inttoptr i32 %addr to i16*
%v = load i16, i16* %addr_i16, align 1
@@ -32,7 +32,7 @@
; CHECK-NEXT: ret i32 %__2
}
-define i32 @load_i32(i32 %addr) {
+define internal i32 @load_i32(i32 %addr) {
entry:
%addr_i32 = inttoptr i32 %addr to i32*
%v = load i32, i32* %addr_i32, align 1
@@ -43,7 +43,7 @@
; CHECK-NEXT: ret i32 %__1
}
-define i64 @load_i64(i32 %addr) {
+define internal i64 @load_i64(i32 %addr) {
entry:
%addr_i64 = inttoptr i32 %addr to i64*
%v = load i64, i64* %addr_i64, align 1
@@ -54,7 +54,7 @@
; CHECK-NEXT: ret i64 %__1
}
-define float @load_float_a1(i32 %addr) {
+define internal float @load_float_a1(i32 %addr) {
entry:
%addr_float = inttoptr i32 %addr to float*
%v = load float, float* %addr_float, align 1
@@ -68,7 +68,7 @@
}
-define float @load_float_a4(i32 %addr) {
+define internal float @load_float_a4(i32 %addr) {
entry:
%addr_float = inttoptr i32 %addr to float*
%v = load float, float* %addr_float, align 4
@@ -79,7 +79,7 @@
; CHECK-NEXT: ret float %__1
}
-define double @load_double_a1(i32 %addr) {
+define internal double @load_double_a1(i32 %addr) {
entry:
%addr_double = inttoptr i32 %addr to double*
%v = load double, double* %addr_double, align 1
@@ -93,7 +93,7 @@
}
-define double @load_double_a8(i32 %addr) {
+define internal double @load_double_a8(i32 %addr) {
entry:
%addr_double = inttoptr i32 %addr to double*
%v = load double, double* %addr_double, align 8
@@ -104,7 +104,7 @@
; CHECK-NEXT: ret double %__1
}
-define <16 x i8> @load_v16xI8(i32 %addr) {
+define internal <16 x i8> @load_v16xI8(i32 %addr) {
entry:
%addr_v16xI8 = inttoptr i32 %addr to <16 x i8>*
%v = load <16 x i8>, <16 x i8>* %addr_v16xI8, align 1
@@ -115,7 +115,7 @@
; CHECK-NEXT: ret <16 x i8> %__1
}
-define <8 x i16> @load_v8xI16(i32 %addr) {
+define internal <8 x i16> @load_v8xI16(i32 %addr) {
entry:
%addr_v8xI16 = inttoptr i32 %addr to <8 x i16>*
%v = load <8 x i16>, <8 x i16>* %addr_v8xI16, align 2
@@ -126,7 +126,7 @@
; CHECK-NEXT: ret <8 x i16> %__1
}
-define <4 x i32> @load_v4xI32(i32 %addr) {
+define internal <4 x i32> @load_v4xI32(i32 %addr) {
entry:
%addr_v4xI32 = inttoptr i32 %addr to <4 x i32>*
%v = load <4 x i32>, <4 x i32>* %addr_v4xI32, align 4
@@ -137,7 +137,7 @@
; CHECK-NEXT: ret <4 x i32> %__1
}
-define <4 x float> @load_v4xFloat(i32 %addr) {
+define internal <4 x float> @load_v4xFloat(i32 %addr) {
entry:
%addr_v4xFloat = inttoptr i32 %addr to <4 x float>*
%v = load <4 x float>, <4 x float>* %addr_v4xFloat, align 4
diff --git a/tests_lit/reader_tests/nacl-atomic-intrinsics.ll b/tests_lit/reader_tests/nacl-atomic-intrinsics.ll
index b59b380..d12c723 100644
--- a/tests_lit/reader_tests/nacl-atomic-intrinsics.ll
+++ b/tests_lit/reader_tests/nacl-atomic-intrinsics.ll
@@ -28,7 +28,7 @@
;;; Load
-define i32 @test_atomic_load_8(i32 %iptr) {
+define internal i32 @test_atomic_load_8(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
; parameter value "6" is for the sequential consistency memory order.
@@ -37,14 +37,14 @@
ret i32 %r
}
-; CHECK: define i32 @test_atomic_load_8(i32 %iptr) {
+; CHECK: define internal i32 @test_atomic_load_8(i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %i = call i8 @llvm.nacl.atomic.load.i8(i32 %iptr, i32 6)
; CHECK-NEXT: %r = zext i8 %i to i32
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i32 @test_atomic_load_16(i32 %iptr) {
+define internal i32 @test_atomic_load_16(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i16*
%i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6)
@@ -52,34 +52,34 @@
ret i32 %r
}
-; CHECK-NEXT: define i32 @test_atomic_load_16(i32 %iptr) {
+; CHECK-NEXT: define internal i32 @test_atomic_load_16(i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %i = call i16 @llvm.nacl.atomic.load.i16(i32 %iptr, i32 6)
; CHECK-NEXT: %r = zext i16 %i to i32
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i32 @test_atomic_load_32(i32 %iptr) {
+define internal i32 @test_atomic_load_32(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
ret i32 %r
}
-; CHECK-NEXT: define i32 @test_atomic_load_32(i32 %iptr) {
+; CHECK-NEXT: define internal i32 @test_atomic_load_32(i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i32 @llvm.nacl.atomic.load.i32(i32 %iptr, i32 6)
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i64 @test_atomic_load_64(i32 %iptr) {
+define internal i64 @test_atomic_load_64(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
ret i64 %r
}
-; CHECK-NEXT: define i64 @test_atomic_load_64(i32 %iptr) {
+; CHECK-NEXT: define internal i64 @test_atomic_load_64(i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i64 @llvm.nacl.atomic.load.i64(i32 %iptr, i32 6)
; CHECK-NEXT: ret i64 %r
@@ -87,7 +87,7 @@
;;; Store
-define void @test_atomic_store_8(i32 %iptr, i32 %v) {
+define internal void @test_atomic_store_8(i32 %iptr, i32 %v) {
entry:
%truncv = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -95,14 +95,14 @@
ret void
}
-; CHECK-NEXT: define void @test_atomic_store_8(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal void @test_atomic_store_8(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %truncv = trunc i32 %v to i8
; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %truncv, i32 %iptr, i32 6)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @test_atomic_store_16(i32 %iptr, i32 %v) {
+define internal void @test_atomic_store_16(i32 %iptr, i32 %v) {
entry:
%truncv = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -110,47 +110,47 @@
ret void
}
-; CHECK-NEXT: define void @test_atomic_store_16(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal void @test_atomic_store_16(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %truncv = trunc i32 %v to i16
; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %truncv, i32 %iptr, i32 6)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @test_atomic_store_32(i32 %iptr, i32 %v) {
+define internal void @test_atomic_store_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6)
ret void
}
-; CHECK-NEXT: define void @test_atomic_store_32(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal void @test_atomic_store_32(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %v, i32 %iptr, i32 6)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @test_atomic_store_64(i32 %iptr, i64 %v) {
+define internal void @test_atomic_store_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6)
ret void
}
-; CHECK-NEXT: define void @test_atomic_store_64(i32 %iptr, i64 %v) {
+; CHECK-NEXT: define internal void @test_atomic_store_64(i32 %iptr, i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %v, i32 %iptr, i32 6)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @test_atomic_store_64_const(i32 %iptr) {
+define internal void @test_atomic_store_64_const(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6)
ret void
}
-; CHECK-NEXT: define void @test_atomic_store_64_const(i32 %iptr) {
+; CHECK-NEXT: define internal void @test_atomic_store_64_const(i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i32 %iptr, i32 6)
; CHECK-NEXT: ret void
@@ -160,7 +160,7 @@
;; add
-define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -170,7 +170,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i8
; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i32 %iptr, i8 %trunc, i32 6)
@@ -178,7 +178,7 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -187,7 +187,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i16
; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i32 %iptr, i16 %trunc, i32 6)
@@ -195,27 +195,27 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
ret i32 %a
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32 %iptr, i32 %v, i32 6)
; CHECK-NEXT: ret i32 %a
; CHECK-NEXT: }
-define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
ret i64 %a
}
-; CHECK-NEXT: define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
+; CHECK-NEXT: define internal i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i32 %iptr, i64 %v, i32 6)
; CHECK-NEXT: ret i64 %a
@@ -223,7 +223,7 @@
;; sub
-define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -232,7 +232,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i8
; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i32 %iptr, i8 %trunc, i32 6)
@@ -240,7 +240,7 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -249,7 +249,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i16
; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i32 %iptr, i16 %trunc, i32 6)
@@ -257,27 +257,27 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
ret i32 %a
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32 %iptr, i32 %v, i32 6)
; CHECK-NEXT: ret i32 %a
; CHECK-NEXT: }
-define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6)
ret i64 %a
}
-; CHECK-NEXT: define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
+; CHECK-NEXT: define internal i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i32 %iptr, i64 %v, i32 6)
; CHECK-NEXT: ret i64 %a
@@ -285,7 +285,7 @@
;; or
-define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -294,7 +294,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i8
; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i32 %iptr, i8 %trunc, i32 6)
@@ -302,7 +302,7 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -311,7 +311,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i16
; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i32 %iptr, i16 %trunc, i32 6)
@@ -319,27 +319,27 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
ret i32 %a
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32 %iptr, i32 %v, i32 6)
; CHECK-NEXT: ret i32 %a
; CHECK-NEXT: }
-define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6)
ret i64 %a
}
-; CHECK-NEXT: define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
+; CHECK-NEXT: define internal i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i32 %iptr, i64 %v, i32 6)
; CHECK-NEXT: ret i64 %a
@@ -347,7 +347,7 @@
;; and
-define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -356,7 +356,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i8
; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i32 %iptr, i8 %trunc, i32 6)
@@ -364,7 +364,7 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -373,7 +373,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i16
; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i32 %iptr, i16 %trunc, i32 6)
@@ -381,27 +381,27 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
ret i32 %a
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32 %iptr, i32 %v, i32 6)
; CHECK-NEXT: ret i32 %a
; CHECK-NEXT: }
-define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6)
ret i64 %a
}
-; CHECK-NEXT: define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
+; CHECK-NEXT: define internal i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i32 %iptr, i64 %v, i32 6)
; CHECK-NEXT: ret i64 %a
@@ -409,7 +409,7 @@
;; xor
-define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -418,7 +418,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i8
; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i32 %iptr, i8 %trunc, i32 6)
@@ -426,7 +426,7 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -435,7 +435,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i16
; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i32 %iptr, i16 %trunc, i32 6)
@@ -443,27 +443,27 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
ret i32 %a
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32 %iptr, i32 %v, i32 6)
; CHECK-NEXT: ret i32 %a
; CHECK-NEXT: }
-define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6)
ret i64 %a
}
-; CHECK-NEXT: define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
+; CHECK-NEXT: define internal i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i32 %iptr, i64 %v, i32 6)
; CHECK-NEXT: ret i64 %a
@@ -471,7 +471,7 @@
;; exchange
-define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
@@ -480,7 +480,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i8
; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i32 %iptr, i8 %trunc, i32 6)
@@ -488,7 +488,7 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i16
%ptr = inttoptr i32 %iptr to i16*
@@ -497,7 +497,7 @@
ret i32 %a_ext
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc = trunc i32 %v to i16
; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i32 %iptr, i16 %trunc, i32 6)
@@ -505,27 +505,27 @@
; CHECK-NEXT: ret i32 %a_ext
; CHECK-NEXT: }
-define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
+define internal i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
ret i32 %a
}
-; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
+; CHECK-NEXT: define internal i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32 %iptr, i32 %v, i32 6)
; CHECK-NEXT: ret i32 %a
; CHECK-NEXT: }
-define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
+define internal i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6)
ret i64 %a
}
-; CHECK-NEXT: define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
+; CHECK-NEXT: define internal i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i32 %iptr, i64 %v, i32 6)
; CHECK-NEXT: ret i64 %a
@@ -533,7 +533,7 @@
;;;; Cmpxchg
-define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
entry:
%trunc_exp = trunc i32 %expected to i8
%trunc_des = trunc i32 %desired to i8
@@ -544,7 +544,7 @@
ret i32 %old_ext
}
-; CHECK-NEXT: define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
+; CHECK-NEXT: define internal i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc_exp = trunc i32 %expected to i8
; CHECK-NEXT: %trunc_des = trunc i32 %desired to i8
@@ -553,7 +553,7 @@
; CHECK-NEXT: ret i32 %old_ext
; CHECK-NEXT: }
-define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
entry:
%trunc_exp = trunc i32 %expected to i16
%trunc_des = trunc i32 %desired to i16
@@ -564,7 +564,7 @@
ret i32 %old_ext
}
-; CHECK-NEXT: define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
+; CHECK-NEXT: define internal i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %trunc_exp = trunc i32 %expected to i16
; CHECK-NEXT: %trunc_des = trunc i32 %desired to i16
@@ -573,7 +573,7 @@
; CHECK-NEXT: ret i32 %old_ext
; CHECK-NEXT: }
-define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
+define internal i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
@@ -581,13 +581,13 @@
ret i32 %old
}
-; CHECK-NEXT: define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
+; CHECK-NEXT: define internal i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32 %iptr, i32 %expected, i32 %desired, i32 6, i32 6)
; CHECK-NEXT: ret i32 %old
; CHECK-NEXT: }
-define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
+define internal i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
@@ -595,7 +595,7 @@
ret i64 %old
}
-; CHECK-NEXT: define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
+; CHECK-NEXT: define internal i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i32 %iptr, i64 %expected, i64 %desired, i32 6, i32 6)
; CHECK-NEXT: ret i64 %old
@@ -603,31 +603,31 @@
;;;; Fence and is-lock-free.
-define void @test_atomic_fence() {
+define internal void @test_atomic_fence() {
entry:
call void @llvm.nacl.atomic.fence(i32 6)
ret void
}
-; CHECK-NEXT: define void @test_atomic_fence() {
+; CHECK-NEXT: define internal void @test_atomic_fence() {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @test_atomic_fence_all() {
+define internal void @test_atomic_fence_all() {
entry:
call void @llvm.nacl.atomic.fence.all()
ret void
}
-; CHECK-NEXT: define void @test_atomic_fence_all() {
+; CHECK-NEXT: define internal void @test_atomic_fence_all() {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.nacl.atomic.fence.all()
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define i32 @test_atomic_is_lock_free(i32 %iptr) {
+define internal i32 @test_atomic_is_lock_free(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
@@ -635,7 +635,7 @@
ret i32 %r
}
-; CHECK-NEXT: define i32 @test_atomic_is_lock_free(i32 %iptr) {
+; CHECK-NEXT: define internal i32 @test_atomic_is_lock_free(i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i32 %iptr)
; CHECK-NEXT: %r = zext i1 %i to i32
diff --git a/tests_lit/reader_tests/nacl-other-intrinsics.ll b/tests_lit/reader_tests/nacl-other-intrinsics.ll
index fda8932..38cd72d 100644
--- a/tests_lit/reader_tests/nacl-other-intrinsics.ll
+++ b/tests_lit/reader_tests/nacl-other-intrinsics.ll
@@ -1,8 +1,10 @@
; This tests parsing NaCl intrinsics not related to atomic operations.
-; RUN: %p2i -i %s --insts | FileCheck %s
+; RUN: %p2i -i %s --insts --args -allow-externally-defined-symbols \
+; RUN: | FileCheck %s
; RUN: %if --need=allow_disable_ir_gen --command \
; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \
+; RUN: -allow-externally-defined-symbols \
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
@@ -30,20 +32,20 @@
declare i8* @llvm.stacksave()
declare void @llvm.stackrestore(i8*)
-define i32 @test_nacl_read_tp() {
+define internal i32 @test_nacl_read_tp() {
entry:
%ptr = call i8* @llvm.nacl.read.tp()
%__1 = ptrtoint i8* %ptr to i32
ret i32 %__1
}
-; CHECK: define i32 @test_nacl_read_tp() {
+; CHECK: define internal i32 @test_nacl_read_tp() {
; CHECK-NEXT: entry:
; CHECK-NEXT: %ptr = call i32 @llvm.nacl.read.tp()
; CHECK-NEXT: ret i32 %ptr
; CHECK-NEXT: }
-define void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
+define internal void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -52,13 +54,13 @@
ret void
}
-; CHECK-NEXT: define void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
+; CHECK-NEXT: define internal void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_src, i32 %len, i32 1, i1 false)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
+define internal void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
entry:
%dst = inttoptr i32 %iptr_dst to i8*
%src = inttoptr i32 %iptr_src to i8*
@@ -67,13 +69,13 @@
ret void
}
-; CHECK-NEXT: define void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
+; CHECK-NEXT: define internal void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_src, i32 %len, i32 1, i1 false)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) {
+define internal void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) {
entry:
%val = trunc i32 %wide_val to i8
%dst = inttoptr i32 %iptr_dst to i8*
@@ -82,14 +84,14 @@
ret void
}
-; CHECK-NEXT: define void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) {
+; CHECK-NEXT: define internal void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %val = trunc i32 %wide_val to i8
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i32 %iptr_dst, i8 %val, i32 %len, i32 1, i1 false)
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define i32 @test_setjmplongjmp(i32 %iptr_env) {
+define internal i32 @test_setjmplongjmp(i32 %iptr_env) {
entry:
%env = inttoptr i32 %iptr_env to i8*
%i = call i32 @llvm.nacl.setjmp(i8* %env)
@@ -104,7 +106,7 @@
ret i32 1
}
-; CHECK-NEXT: define i32 @test_setjmplongjmp(i32 %iptr_env) {
+; CHECK-NEXT: define internal i32 @test_setjmplongjmp(i32 %iptr_env) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %i = call i32 @llvm.nacl.setjmp(i32 %iptr_env)
; CHECK-NEXT: %r1 = icmp eq i32 %i, 0
@@ -116,7 +118,7 @@
; CHECK-NEXT: ret i32 1
; CHECK-NEXT: }
-define float @test_sqrt_float(float %x, i32 %iptr) {
+define internal float @test_sqrt_float(float %x, i32 %iptr) {
entry:
%r = call float @llvm.sqrt.f32(float %x)
%r2 = call float @llvm.sqrt.f32(float %r)
@@ -125,7 +127,7 @@
ret float %r4
}
-; CHECK-NEXT: define float @test_sqrt_float(float %x, i32 %iptr) {
+; CHECK-NEXT: define internal float @test_sqrt_float(float %x, i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call float @llvm.sqrt.f32(float %x)
; CHECK-NEXT: %r2 = call float @llvm.sqrt.f32(float %r)
@@ -134,7 +136,7 @@
; CHECK-NEXT: ret float %r4
; CHECK-NEXT: }
-define double @test_sqrt_double(double %x, i32 %iptr) {
+define internal double @test_sqrt_double(double %x, i32 %iptr) {
entry:
%r = call double @llvm.sqrt.f64(double %x)
%r2 = call double @llvm.sqrt.f64(double %r)
@@ -143,7 +145,7 @@
ret double %r4
}
-; CHECK-NEXT: define double @test_sqrt_double(double %x, i32 %iptr) {
+; CHECK-NEXT: define internal double @test_sqrt_double(double %x, i32 %iptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call double @llvm.sqrt.f64(double %x)
; CHECK-NEXT: %r2 = call double @llvm.sqrt.f64(double %r)
@@ -152,7 +154,7 @@
; CHECK-NEXT: ret double %r4
; CHECK-NEXT: }
-define float @test_fabs_float(float %x) {
+define internal float @test_fabs_float(float %x) {
entry:
%r = call float @llvm.fabs.f32(float %x)
%r2 = call float @llvm.fabs.f32(float %r)
@@ -161,7 +163,7 @@
ret float %r4
}
-; CHECK-NEXT: define float @test_fabs_float(float %x) {
+; CHECK-NEXT: define internal float @test_fabs_float(float %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call float @llvm.fabs.f32(float %x)
; CHECK-NEXT: %r2 = call float @llvm.fabs.f32(float %r)
@@ -170,7 +172,7 @@
; CHECK-NEXT: ret float %r4
; CHECK-NEXT: }
-define double @test_fabs_double(double %x) {
+define internal double @test_fabs_double(double %x) {
entry:
%r = call double @llvm.fabs.f64(double %x)
%r2 = call double @llvm.fabs.f64(double %r)
@@ -179,7 +181,7 @@
ret double %r4
}
-; CHECK-NEXT: define double @test_fabs_double(double %x) {
+; CHECK-NEXT: define internal double @test_fabs_double(double %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call double @llvm.fabs.f64(double %x)
; CHECK-NEXT: %r2 = call double @llvm.fabs.f64(double %r)
@@ -188,7 +190,7 @@
; CHECK-NEXT: ret double %r4
; CHECK-NEXT: }
-define <4 x float> @test_fabs_v4f32(<4 x float> %x) {
+define internal <4 x float> @test_fabs_v4f32(<4 x float> %x) {
entry:
%r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x)
%r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r)
@@ -197,7 +199,7 @@
ret <4 x float> %r4
}
-; CHECK-NEXT: define <4 x float> @test_fabs_v4f32(<4 x float> %x) {
+; CHECK-NEXT: define internal <4 x float> @test_fabs_v4f32(<4 x float> %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x)
; CHECK-NEXT: %r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r)
@@ -206,7 +208,7 @@
; CHECK-NEXT: ret <4 x float> %r4
; CHECK-NEXT: }
-define i32 @test_trap(i32 %br) {
+define internal i32 @test_trap(i32 %br) {
entry:
%r1 = icmp eq i32 %br, 0
br i1 %r1, label %Zero, label %NonZero
@@ -217,7 +219,7 @@
ret i32 1
}
-; CHECK-NEXT: define i32 @test_trap(i32 %br) {
+; CHECK-NEXT: define internal i32 @test_trap(i32 %br) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r1 = icmp eq i32 %br, 0
; CHECK-NEXT: br i1 %r1, label %Zero, label %NonZero
@@ -228,7 +230,7 @@
; CHECK-NEXT: ret i32 1
; CHECK-NEXT: }
-define i32 @test_bswap_16(i32 %x) {
+define internal i32 @test_bswap_16(i32 %x) {
entry:
%x_trunc = trunc i32 %x to i16
%r = call i16 @llvm.bswap.i16(i16 %x_trunc)
@@ -236,7 +238,7 @@
ret i32 %r_zext
}
-; CHECK-NEXT: define i32 @test_bswap_16(i32 %x) {
+; CHECK-NEXT: define internal i32 @test_bswap_16(i32 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %x_trunc = trunc i32 %x to i16
; CHECK-NEXT: %r = call i16 @llvm.bswap.i16(i16 %x_trunc)
@@ -244,110 +246,110 @@
; CHECK-NEXT: ret i32 %r_zext
; CHECK-NEXT: }
-define i32 @test_bswap_32(i32 %x) {
+define internal i32 @test_bswap_32(i32 %x) {
entry:
%r = call i32 @llvm.bswap.i32(i32 %x)
ret i32 %r
}
-; CHECK-NEXT: define i32 @test_bswap_32(i32 %x) {
+; CHECK-NEXT: define internal i32 @test_bswap_32(i32 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i32 @llvm.bswap.i32(i32 %x)
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i64 @test_bswap_64(i64 %x) {
+define internal i64 @test_bswap_64(i64 %x) {
entry:
%r = call i64 @llvm.bswap.i64(i64 %x)
ret i64 %r
}
-; CHECK-NEXT: define i64 @test_bswap_64(i64 %x) {
+; CHECK-NEXT: define internal i64 @test_bswap_64(i64 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i64 @llvm.bswap.i64(i64 %x)
; CHECK-NEXT: ret i64 %r
; CHECK-NEXT: }
-define i32 @test_ctlz_32(i32 %x) {
+define internal i32 @test_ctlz_32(i32 %x) {
entry:
%r = call i32 @llvm.ctlz.i32(i32 %x, i1 false)
ret i32 %r
}
-; CHECK-NEXT: define i32 @test_ctlz_32(i32 %x) {
+; CHECK-NEXT: define internal i32 @test_ctlz_32(i32 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i32 @llvm.ctlz.i32(i32 %x, i1 false)
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i64 @test_ctlz_64(i64 %x) {
+define internal i64 @test_ctlz_64(i64 %x) {
entry:
%r = call i64 @llvm.ctlz.i64(i64 %x, i1 false)
ret i64 %r
}
-; CHECK-NEXT: define i64 @test_ctlz_64(i64 %x) {
+; CHECK-NEXT: define internal i64 @test_ctlz_64(i64 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i64 @llvm.ctlz.i64(i64 %x, i1 false)
; CHECK-NEXT: ret i64 %r
; CHECK-NEXT: }
-define i32 @test_cttz_32(i32 %x) {
+define internal i32 @test_cttz_32(i32 %x) {
entry:
%r = call i32 @llvm.cttz.i32(i32 %x, i1 false)
ret i32 %r
}
-; CHECK-NEXT: define i32 @test_cttz_32(i32 %x) {
+; CHECK-NEXT: define internal i32 @test_cttz_32(i32 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i32 @llvm.cttz.i32(i32 %x, i1 false)
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i64 @test_cttz_64(i64 %x) {
+define internal i64 @test_cttz_64(i64 %x) {
entry:
%r = call i64 @llvm.cttz.i64(i64 %x, i1 false)
ret i64 %r
}
-; CHECK-NEXT: define i64 @test_cttz_64(i64 %x) {
+; CHECK-NEXT: define internal i64 @test_cttz_64(i64 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i64 @llvm.cttz.i64(i64 %x, i1 false)
; CHECK-NEXT: ret i64 %r
; CHECK-NEXT: }
-define i32 @test_popcount_32(i32 %x) {
+define internal i32 @test_popcount_32(i32 %x) {
entry:
%r = call i32 @llvm.ctpop.i32(i32 %x)
ret i32 %r
}
-; CHECK-NEXT: define i32 @test_popcount_32(i32 %x) {
+; CHECK-NEXT: define internal i32 @test_popcount_32(i32 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i32 @llvm.ctpop.i32(i32 %x)
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i64 @test_popcount_64(i64 %x) {
+define internal i64 @test_popcount_64(i64 %x) {
entry:
%r = call i64 @llvm.ctpop.i64(i64 %x)
ret i64 %r
}
-; CHECK-NEXT: define i64 @test_popcount_64(i64 %x) {
+; CHECK-NEXT: define internal i64 @test_popcount_64(i64 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = call i64 @llvm.ctpop.i64(i64 %x)
; CHECK-NEXT: ret i64 %r
; CHECK-NEXT: }
-define void @test_stacksave_noalloca() {
+define internal void @test_stacksave_noalloca() {
entry:
%sp = call i8* @llvm.stacksave()
call void @llvm.stackrestore(i8* %sp)
ret void
}
-; CHECK-NEXT: define void @test_stacksave_noalloca() {
+; CHECK-NEXT: define internal void @test_stacksave_noalloca() {
; CHECK-NEXT: entry:
; CHECK-NEXT: %sp = call i32 @llvm.stacksave()
; CHECK-NEXT: call void @llvm.stackrestore(i32 %sp)
@@ -356,7 +358,7 @@
declare i32 @foo(i32 %x)
-define void @test_stacksave_multiple(i32 %x) {
+define internal void @test_stacksave_multiple(i32 %x) {
entry:
%x_4 = mul i32 %x, 4
%sp1 = call i8* @llvm.stacksave()
@@ -383,7 +385,7 @@
ret void
}
-; CHECK-NEXT: define void @test_stacksave_multiple(i32 %x) {
+; CHECK-NEXT: define internal void @test_stacksave_multiple(i32 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %x_4 = mul i32 %x, 4
; CHECK-NEXT: %sp1 = call i32 @llvm.stacksave()
diff --git a/tests_lit/reader_tests/select.ll b/tests_lit/reader_tests/select.ll
index 8421084..834f1a8 100644
--- a/tests_lit/reader_tests/select.ll
+++ b/tests_lit/reader_tests/select.ll
@@ -6,7 +6,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define void @Seli1(i32 %p) {
+define internal void @Seli1(i32 %p) {
entry:
%vc = trunc i32 %p to i1
%vt = trunc i32 %p to i1
@@ -15,7 +15,7 @@
ret void
}
-; CHECK: define void @Seli1(i32 %p) {
+; CHECK: define internal void @Seli1(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %p to i1
; CHECK-NEXT: %vt = trunc i32 %p to i1
@@ -24,7 +24,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @Seli8(i32 %p) {
+define internal void @Seli8(i32 %p) {
entry:
%vc = trunc i32 %p to i1
%vt = trunc i32 %p to i8
@@ -33,7 +33,7 @@
ret void
}
-; CHECK-NEXT: define void @Seli8(i32 %p) {
+; CHECK-NEXT: define internal void @Seli8(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %p to i1
; CHECK-NEXT: %vt = trunc i32 %p to i8
@@ -42,7 +42,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @Seli16(i32 %p) {
+define internal void @Seli16(i32 %p) {
entry:
%vc = trunc i32 %p to i1
%vt = trunc i32 %p to i16
@@ -51,7 +51,7 @@
ret void
}
-; CHECK-NEXT: define void @Seli16(i32 %p) {
+; CHECK-NEXT: define internal void @Seli16(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %p to i1
; CHECK-NEXT: %vt = trunc i32 %p to i16
@@ -60,239 +60,239 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define i32 @Seli32(i32 %pc, i32 %pt, i32 %pe) {
+define internal i32 @Seli32(i32 %pc, i32 %pt, i32 %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, i32 %pt, i32 %pe
ret i32 %r
}
-; CHECK-NEXT: define i32 @Seli32(i32 %pc, i32 %pt, i32 %pe) {
+; CHECK-NEXT: define internal i32 @Seli32(i32 %pc, i32 %pt, i32 %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, i32 %pt, i32 %pe
; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define i64 @Seli64(i64 %pc, i64 %pt, i64 %pe) {
+define internal i64 @Seli64(i64 %pc, i64 %pt, i64 %pe) {
entry:
%vc = trunc i64 %pc to i1
%r = select i1 %vc, i64 %pt, i64 %pe
ret i64 %r
}
-; CHECK-NEXT: define i64 @Seli64(i64 %pc, i64 %pt, i64 %pe) {
+; CHECK-NEXT: define internal i64 @Seli64(i64 %pc, i64 %pt, i64 %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i64 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, i64 %pt, i64 %pe
; CHECK-NEXT: ret i64 %r
; CHECK-NEXT: }
-define float @SelFloat(i32 %pc, float %pt, float %pe) {
+define internal float @SelFloat(i32 %pc, float %pt, float %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, float %pt, float %pe
ret float %r
}
-; CHECK-NEXT: define float @SelFloat(i32 %pc, float %pt, float %pe) {
+; CHECK-NEXT: define internal float @SelFloat(i32 %pc, float %pt, float %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, float %pt, float %pe
; CHECK-NEXT: ret float %r
; CHECK-NEXT: }
-define double @SelDouble(i32 %pc, double %pt, double %pe) {
+define internal double @SelDouble(i32 %pc, double %pt, double %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, double %pt, double %pe
ret double %r
}
-; CHECK-NEXT: define double @SelDouble(i32 %pc, double %pt, double %pe) {
+; CHECK-NEXT: define internal double @SelDouble(i32 %pc, double %pt, double %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, double %pt, double %pe
; CHECK-NEXT: ret double %r
; CHECK-NEXT: }
-define <16 x i1> @SelV16x1(i32 %pc, <16 x i1> %pt, <16 x i1> %pe) {
+define internal <16 x i1> @SelV16x1(i32 %pc, <16 x i1> %pt, <16 x i1> %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <16 x i1> %pt, <16 x i1> %pe
ret <16 x i1> %r
}
-; CHECK-NEXT: define <16 x i1> @SelV16x1(i32 %pc, <16 x i1> %pt, <16 x i1> %pe) {
+; CHECK-NEXT: define internal <16 x i1> @SelV16x1(i32 %pc, <16 x i1> %pt, <16 x i1> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, <16 x i1> %pt, <16 x i1> %pe
; CHECK-NEXT: ret <16 x i1> %r
; CHECK-NEXT: }
-define <8 x i1> @SelV8x1(i32 %pc, <8 x i1> %pt, <8 x i1> %pe) {
+define internal <8 x i1> @SelV8x1(i32 %pc, <8 x i1> %pt, <8 x i1> %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <8 x i1> %pt, <8 x i1> %pe
ret <8 x i1> %r
}
-; CHECK-NEXT: define <8 x i1> @SelV8x1(i32 %pc, <8 x i1> %pt, <8 x i1> %pe) {
+; CHECK-NEXT: define internal <8 x i1> @SelV8x1(i32 %pc, <8 x i1> %pt, <8 x i1> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, <8 x i1> %pt, <8 x i1> %pe
; CHECK-NEXT: ret <8 x i1> %r
; CHECK-NEXT: }
-define <4 x i1> @SelV4x1(i32 %pc, <4 x i1> %pt, <4 x i1> %pe) {
+define internal <4 x i1> @SelV4x1(i32 %pc, <4 x i1> %pt, <4 x i1> %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <4 x i1> %pt, <4 x i1> %pe
ret <4 x i1> %r
}
-; CHECK-NEXT: define <4 x i1> @SelV4x1(i32 %pc, <4 x i1> %pt, <4 x i1> %pe) {
+; CHECK-NEXT: define internal <4 x i1> @SelV4x1(i32 %pc, <4 x i1> %pt, <4 x i1> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, <4 x i1> %pt, <4 x i1> %pe
; CHECK-NEXT: ret <4 x i1> %r
; CHECK-NEXT: }
-define <16 x i8> @SelV16x8(i32 %pc, <16 x i8> %pt, <16 x i8> %pe) {
+define internal <16 x i8> @SelV16x8(i32 %pc, <16 x i8> %pt, <16 x i8> %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <16 x i8> %pt, <16 x i8> %pe
ret <16 x i8> %r
}
-; CHECK-NEXT: define <16 x i8> @SelV16x8(i32 %pc, <16 x i8> %pt, <16 x i8> %pe) {
+; CHECK-NEXT: define internal <16 x i8> @SelV16x8(i32 %pc, <16 x i8> %pt, <16 x i8> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, <16 x i8> %pt, <16 x i8> %pe
; CHECK-NEXT: ret <16 x i8> %r
; CHECK-NEXT: }
-define <8 x i16> @SelV8x16(i32 %pc, <8 x i16> %pt, <8 x i16> %pe) {
+define internal <8 x i16> @SelV8x16(i32 %pc, <8 x i16> %pt, <8 x i16> %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <8 x i16> %pt, <8 x i16> %pe
ret <8 x i16> %r
}
-; CHECK-NEXT: define <8 x i16> @SelV8x16(i32 %pc, <8 x i16> %pt, <8 x i16> %pe) {
+; CHECK-NEXT: define internal <8 x i16> @SelV8x16(i32 %pc, <8 x i16> %pt, <8 x i16> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, <8 x i16> %pt, <8 x i16> %pe
; CHECK-NEXT: ret <8 x i16> %r
; CHECK-NEXT: }
-define <4 x i32> @SelV4x32(i32 %pc, <4 x i32> %pt, <4 x i32> %pe) {
+define internal <4 x i32> @SelV4x32(i32 %pc, <4 x i32> %pt, <4 x i32> %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <4 x i32> %pt, <4 x i32> %pe
ret <4 x i32> %r
}
-; CHECK-NEXT: define <4 x i32> @SelV4x32(i32 %pc, <4 x i32> %pt, <4 x i32> %pe) {
+; CHECK-NEXT: define internal <4 x i32> @SelV4x32(i32 %pc, <4 x i32> %pt, <4 x i32> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, <4 x i32> %pt, <4 x i32> %pe
; CHECK-NEXT: ret <4 x i32> %r
; CHECK-NEXT: }
-define <4 x float> @SelV4xfloat(i32 %pc, <4 x float> %pt, <4 x float> %pe) {
+define internal <4 x float> @SelV4xfloat(i32 %pc, <4 x float> %pt, <4 x float> %pe) {
entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <4 x float> %pt, <4 x float> %pe
ret <4 x float> %r
}
-; CHECK-NEXT: define <4 x float> @SelV4xfloat(i32 %pc, <4 x float> %pt, <4 x float> %pe) {
+; CHECK-NEXT: define internal <4 x float> @SelV4xfloat(i32 %pc, <4 x float> %pt, <4 x float> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vc = trunc i32 %pc to i1
; CHECK-NEXT: %r = select i1 %vc, <4 x float> %pt, <4 x float> %pe
; CHECK-NEXT: ret <4 x float> %r
; CHECK-NEXT: }
-define <16 x i1> @SelV16x1Vcond(<16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe) {
+define internal <16 x i1> @SelV16x1Vcond(<16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe) {
entry:
%r = select <16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe
ret <16 x i1> %r
}
-; CHECK-NEXT: define <16 x i1> @SelV16x1Vcond(<16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe) {
+; CHECK-NEXT: define internal <16 x i1> @SelV16x1Vcond(<16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = select <16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe
; CHECK-NEXT: ret <16 x i1> %r
; CHECK-NEXT: }
-define <8 x i1> @SelV8x1Vcond(<8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe) {
+define internal <8 x i1> @SelV8x1Vcond(<8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe) {
entry:
%r = select <8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe
ret <8 x i1> %r
}
-; CHECK-NEXT: define <8 x i1> @SelV8x1Vcond(<8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe) {
+; CHECK-NEXT: define internal <8 x i1> @SelV8x1Vcond(<8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = select <8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe
; CHECK-NEXT: ret <8 x i1> %r
; CHECK-NEXT: }
-define <4 x i1> @SelV4x1Vcond(<4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe) {
+define internal <4 x i1> @SelV4x1Vcond(<4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe) {
entry:
%r = select <4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe
ret <4 x i1> %r
}
-; CHECK-NEXT: define <4 x i1> @SelV4x1Vcond(<4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe) {
+; CHECK-NEXT: define internal <4 x i1> @SelV4x1Vcond(<4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = select <4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe
; CHECK-NEXT: ret <4 x i1> %r
; CHECK-NEXT: }
-define <16 x i8> @SelV16x8Vcond(<16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe) {
+define internal <16 x i8> @SelV16x8Vcond(<16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe) {
entry:
%r = select <16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe
ret <16 x i8> %r
}
-; CHECK-NEXT: define <16 x i8> @SelV16x8Vcond(<16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe) {
+; CHECK-NEXT: define internal <16 x i8> @SelV16x8Vcond(<16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = select <16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe
; CHECK-NEXT: ret <16 x i8> %r
; CHECK-NEXT: }
-define <8 x i16> @SelV8x16Vcond(<8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe) {
+define internal <8 x i16> @SelV8x16Vcond(<8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe) {
entry:
%r = select <8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe
ret <8 x i16> %r
}
-; CHECK-NEXT: define <8 x i16> @SelV8x16Vcond(<8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe) {
+; CHECK-NEXT: define internal <8 x i16> @SelV8x16Vcond(<8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = select <8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe
; CHECK-NEXT: ret <8 x i16> %r
; CHECK-NEXT: }
-define <4 x i32> @SelV4x32Vcond(<4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe) {
+define internal <4 x i32> @SelV4x32Vcond(<4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe) {
entry:
%r = select <4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe
ret <4 x i32> %r
}
-; CHECK-NEXT: define <4 x i32> @SelV4x32Vcond(<4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe) {
+; CHECK-NEXT: define internal <4 x i32> @SelV4x32Vcond(<4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = select <4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe
; CHECK-NEXT: ret <4 x i32> %r
; CHECK-NEXT: }
-define <4 x float> @SelV4xfloatVcond(<4 x i1> %pc, <4 x float> %pt, <4 x float> %pe) {
+define internal <4 x float> @SelV4xfloatVcond(<4 x i1> %pc, <4 x float> %pt, <4 x float> %pe) {
entry:
%r = select <4 x i1> %pc, <4 x float> %pt, <4 x float> %pe
ret <4 x float> %r
}
-; CHECK-NEXT: define <4 x float> @SelV4xfloatVcond(<4 x i1> %pc, <4 x float> %pt, <4 x float> %pe) {
+; CHECK-NEXT: define internal <4 x float> @SelV4xfloatVcond(<4 x i1> %pc, <4 x float> %pt, <4 x float> %pe) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %r = select <4 x i1> %pc, <4 x float> %pt, <4 x float> %pe
; CHECK-NEXT: ret <4 x float> %r
diff --git a/tests_lit/reader_tests/store.ll b/tests_lit/reader_tests/store.ll
index 091e763..33f3302 100644
--- a/tests_lit/reader_tests/store.ll
+++ b/tests_lit/reader_tests/store.ll
@@ -6,7 +6,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define void @store_i8(i32 %addr) {
+define internal void @store_i8(i32 %addr) {
entry:
%addr_i8 = inttoptr i32 %addr to i8*
store i8 3, i8* %addr_i8, align 1
@@ -17,7 +17,7 @@
; CHECK-NEXT: ret void
}
-define void @store_i16(i32 %addr) {
+define internal void @store_i16(i32 %addr) {
entry:
%addr_i16 = inttoptr i32 %addr to i16*
store i16 5, i16* %addr_i16, align 1
@@ -28,7 +28,7 @@
; CHECK-NEXT: ret void
}
-define void @store_i32(i32 %addr, i32 %v) {
+define internal void @store_i32(i32 %addr, i32 %v) {
entry:
%addr_i32 = inttoptr i32 %addr to i32*
store i32 %v, i32* %addr_i32, align 1
@@ -39,7 +39,7 @@
; CHECK-NEXT: ret void
}
-define void @store_i64(i32 %addr, i64 %v) {
+define internal void @store_i64(i32 %addr, i64 %v) {
entry:
%addr_i64 = inttoptr i32 %addr to i64*
store i64 %v, i64* %addr_i64, align 1
@@ -50,7 +50,7 @@
; CHECK-NEXT: ret void
}
-define void @store_float_a1(i32 %addr, float %v) {
+define internal void @store_float_a1(i32 %addr, float %v) {
entry:
%addr_float = inttoptr i32 %addr to float*
store float %v, float* %addr_float, align 1
@@ -63,7 +63,7 @@
; CHECK-NEXT: ret void
}
-define void @store_float_a4(i32 %addr, float %v) {
+define internal void @store_float_a4(i32 %addr, float %v) {
entry:
%addr_float = inttoptr i32 %addr to float*
store float %v, float* %addr_float, align 4
@@ -74,7 +74,7 @@
; CHECK-NEXT: ret void
}
-define void @store_double_a1(i32 %addr, double %v) {
+define internal void @store_double_a1(i32 %addr, double %v) {
entry:
%addr_double = inttoptr i32 %addr to double*
store double %v, double* %addr_double, align 1
@@ -87,7 +87,7 @@
; CHECK-NEXT: ret void
}
-define void @store_double_a8(i32 %addr, double %v) {
+define internal void @store_double_a8(i32 %addr, double %v) {
entry:
%addr_double = inttoptr i32 %addr to double*
store double %v, double* %addr_double, align 8
@@ -98,7 +98,7 @@
; CHECK-NEXT: ret void
}
-define void @store_v16xI8(i32 %addr, <16 x i8> %v) {
+define internal void @store_v16xI8(i32 %addr, <16 x i8> %v) {
%addr_v16xI8 = inttoptr i32 %addr to <16 x i8>*
store <16 x i8> %v, <16 x i8>* %addr_v16xI8, align 1
ret void
@@ -108,7 +108,7 @@
; CHECK-NEXT: ret void
}
-define void @store_v8xI16(i32 %addr, <8 x i16> %v) {
+define internal void @store_v8xI16(i32 %addr, <8 x i16> %v) {
%addr_v8xI16 = inttoptr i32 %addr to <8 x i16>*
store <8 x i16> %v, <8 x i16>* %addr_v8xI16, align 2
ret void
@@ -118,7 +118,7 @@
; CHECK-NEXT: ret void
}
-define void @store_v4xI32(i32 %addr, <4 x i32> %v) {
+define internal void @store_v4xI32(i32 %addr, <4 x i32> %v) {
%addr_v4xI32 = inttoptr i32 %addr to <4 x i32>*
store <4 x i32> %v, <4 x i32>* %addr_v4xI32, align 4
ret void
@@ -128,7 +128,7 @@
; CHECK-NEXT: ret void
}
-define void @store_v4xFloat(i32 %addr, <4 x float> %v) {
+define internal void @store_v4xFloat(i32 %addr, <4 x float> %v) {
%addr_v4xFloat = inttoptr i32 %addr to <4 x float>*
store <4 x float> %v, <4 x float>* %addr_v4xFloat, align 4
ret void
diff --git a/tests_lit/reader_tests/switch.ll b/tests_lit/reader_tests/switch.ll
index 0bbbf88..5a6014d 100644
--- a/tests_lit/reader_tests/switch.ll
+++ b/tests_lit/reader_tests/switch.ll
@@ -6,7 +6,7 @@
; RUN: | %if --need=allow_disable_ir_gen --command \
; RUN: FileCheck --check-prefix=NOIR %s
-define void @testDefaultSwitch(i32 %a) {
+define internal void @testDefaultSwitch(i32 %a) {
entry:
switch i32 %a, label %exit [
]
@@ -14,7 +14,7 @@
ret void
}
-; CHECK: define void @testDefaultSwitch(i32 %a) {
+; CHECK: define internal void @testDefaultSwitch(i32 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 %a, label %exit [
; CHECK-NEXT: ]
@@ -22,7 +22,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define i32 @testSwitch(i32 %a) {
+define internal i32 @testSwitch(i32 %a) {
entry:
switch i32 %a, label %sw.default [
i32 1, label %sw.epilog
@@ -51,7 +51,7 @@
ret i32 %result.1
}
-; CHECK-NEXT: define i32 @testSwitch(i32 %a) {
+; CHECK-NEXT: define internal i32 @testSwitch(i32 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 %a, label %sw.default [
; CHECK-NEXT: i32 1, label %sw.epilog
@@ -76,7 +76,7 @@
; CHECK-NEXT: ret i32 %result.1
; CHECK-NEXT: }
-define void @testSignedI32Values(i32 %a) {
+define internal void @testSignedI32Values(i32 %a) {
entry:
switch i32 %a, label %labelDefault [
i32 0, label %label0
@@ -100,7 +100,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI32Values(i32 %a) {
+; CHECK-NEXT: define internal void @testSignedI32Values(i32 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 %a, label %labelDefault [
; CHECK-NEXT: i32 0, label %label0
@@ -125,7 +125,7 @@
; CHECK-NEXT: }
; Test values that cross signed i32 size boundaries.
-define void @testSignedI32Boundary(i32 %a) {
+define internal void @testSignedI32Boundary(i32 %a) {
entry:
switch i32 %a, label %exit [
i32 -2147483649, label %exit ; min signed i32 - 1
@@ -135,7 +135,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI32Boundary(i32 %a) {
+; CHECK-NEXT: define internal void @testSignedI32Boundary(i32 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 %a, label %exit [
; CHECK-NEXT: i32 2147483647, label %exit
@@ -145,7 +145,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @testUnsignedI32Values(i32 %a) {
+define internal void @testUnsignedI32Values(i32 %a) {
entry:
switch i32 %a, label %exit [
i32 0, label %exit
@@ -156,7 +156,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI32Values(i32 %a) {
+; CHECK-NEXT: define internal void @testUnsignedI32Values(i32 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 %a, label %exit [
; CHECK-NEXT: i32 0, label %exit
@@ -169,7 +169,7 @@
; CHECK-NEXT: }
; Test values that cross unsigned i32 boundaries.
-define void @testUnsignedI32Boundary(i32 %a) {
+define internal void @testUnsignedI32Boundary(i32 %a) {
entry:
switch i32 %a, label %exit [
i32 4294967296, label %exit ; max unsigned i32 + 1
@@ -178,7 +178,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI32Boundary(i32 %a) {
+; CHECK-NEXT: define internal void @testUnsignedI32Boundary(i32 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i32 %a, label %exit [
; CHECK-NEXT: i32 0, label %exit
@@ -187,7 +187,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @testSignedI64Values(i64 %a) {
+define internal void @testSignedI64Values(i64 %a) {
entry:
switch i64 %a, label %exit [
i64 0, label %exit
@@ -198,7 +198,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI64Values(i64 %a) {
+; CHECK-NEXT: define internal void @testSignedI64Values(i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i64 %a, label %exit [
; CHECK-NEXT: i64 0, label %exit
@@ -210,7 +210,7 @@
; CHECK-NEXT: }
; Test values that cross signed i64 size boundaries.
-define void @testSignedI64Boundary(i64 %a) {
+define internal void @testSignedI64Boundary(i64 %a) {
entry:
switch i64 %a, label %exit [
i64 0, label %exit
@@ -221,7 +221,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI64Boundary(i64 %a) {
+; CHECK-NEXT: define internal void @testSignedI64Boundary(i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i64 %a, label %exit [
; CHECK-NEXT: i64 0, label %exit
@@ -232,7 +232,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @testUnsignedI64Values(i64 %a) {
+define internal void @testUnsignedI64Values(i64 %a) {
entry:
switch i64 %a, label %exit [
i64 0, label %exit
@@ -243,7 +243,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI64Values(i64 %a) {
+; CHECK-NEXT: define internal void @testUnsignedI64Values(i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i64 %a, label %exit [
; CHECK-NEXT: i64 0, label %exit
@@ -255,7 +255,7 @@
; CHECK-NEXT: }
; Test values that cross unsigned i64 size boundaries.
-define void @testUnsignedI64Boundary(i64 %a) {
+define internal void @testUnsignedI64Boundary(i64 %a) {
entry:
switch i64 %a, label %exit [
i64 18446744073709551616, label %exit ; max unsigned i64 + 1
@@ -264,7 +264,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI64Boundary(i64 %a) {
+; CHECK-NEXT: define internal void @testUnsignedI64Boundary(i64 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i64 %a, label %exit [
; CHECK-NEXT: i64 0, label %exit
@@ -273,7 +273,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @testSignedI16Values(i32 %p) {
+define internal void @testSignedI16Values(i32 %p) {
entry:
%a = trunc i32 %p to i16
switch i16 %a, label %exit [
@@ -288,7 +288,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI16Values(i32 %p) {
+; CHECK-NEXT: define internal void @testSignedI16Values(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i16
; CHECK-NEXT: switch i16 %a, label %exit [
@@ -304,7 +304,7 @@
; CHECK-NEXT: }
; Test values that cross signed i16 size boundaries.
-define void @testSignedI16Boundary(i32 %p) {
+define internal void @testSignedI16Boundary(i32 %p) {
entry:
%a = trunc i32 %p to i16
switch i16 %a, label %exit [
@@ -315,7 +315,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI16Boundary(i32 %p) {
+; CHECK-NEXT: define internal void @testSignedI16Boundary(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i16
; CHECK-NEXT: switch i16 %a, label %exit [
@@ -326,7 +326,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @testUnsignedI16Values(i32 %p) {
+define internal void @testUnsignedI16Values(i32 %p) {
entry:
%a = trunc i32 %p to i16
switch i16 %a, label %exit [
@@ -338,7 +338,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI16Values(i32 %p) {
+; CHECK-NEXT: define internal void @testUnsignedI16Values(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i16
; CHECK-NEXT: switch i16 %a, label %exit [
@@ -352,7 +352,7 @@
; CHECK-NEXT: }
; Test values that cross unsigned i16 size boundaries.
-define void @testUnsignedI16Boundary(i32 %p) {
+define internal void @testUnsignedI16Boundary(i32 %p) {
entry:
%a = trunc i32 %p to i16
switch i16 %a, label %exit [
@@ -362,7 +362,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI16Boundary(i32 %p) {
+; CHECK-NEXT: define internal void @testUnsignedI16Boundary(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i16
; CHECK-NEXT: switch i16 %a, label %exit [
@@ -372,7 +372,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @testSignedI8Values(i32 %p) {
+define internal void @testSignedI8Values(i32 %p) {
entry:
%a = trunc i32 %p to i8
switch i8 %a, label %exit [
@@ -387,7 +387,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI8Values(i32 %p) {
+; CHECK-NEXT: define internal void @testSignedI8Values(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i8
; CHECK-NEXT: switch i8 %a, label %exit [
@@ -403,7 +403,7 @@
; CHECK-NEXT: }
; Test values that cross signed i8 size boundaries.
-define void @testSignedI8Boundary(i32 %p) {
+define internal void @testSignedI8Boundary(i32 %p) {
entry:
%a = trunc i32 %p to i8
switch i8 %a, label %exit [
@@ -414,7 +414,7 @@
ret void
}
-; CHECK-NEXT: define void @testSignedI8Boundary(i32 %p) {
+; CHECK-NEXT: define internal void @testSignedI8Boundary(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i8
; CHECK-NEXT: switch i8 %a, label %exit [
@@ -426,7 +426,7 @@
; CHECK-NEXT: }
-define void @testUnsignedI8Values(i32 %p) {
+define internal void @testUnsignedI8Values(i32 %p) {
entry:
%a = trunc i32 %p to i8
switch i8 %a, label %exit [
@@ -438,7 +438,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI8Values(i32 %p) {
+; CHECK-NEXT: define internal void @testUnsignedI8Values(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i8
; CHECK-NEXT: switch i8 %a, label %exit [
@@ -452,7 +452,7 @@
; CHECK-NEXT: }
; Test values that cross unsigned i8 size boundaries.
-define void @testUnsignedI8Boundary(i32 %p) {
+define internal void @testUnsignedI8Boundary(i32 %p) {
entry:
%a = trunc i32 %p to i8
switch i8 %a, label %exit [
@@ -462,7 +462,7 @@
ret void
}
-; CHECK-NEXT: define void @testUnsignedI8Boundary(i32 %p) {
+; CHECK-NEXT: define internal void @testUnsignedI8Boundary(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i8
; CHECK-NEXT: switch i8 %a, label %exit [
@@ -472,7 +472,7 @@
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @testI1Values(i32 %p) {
+define internal void @testI1Values(i32 %p) {
entry:
%a = trunc i32 %p to i1
switch i1 %a, label %exit [
@@ -483,7 +483,7 @@
ret void
}
-; CHECK-NEXT: define void @testI1Values(i32 %p) {
+; CHECK-NEXT: define internal void @testI1Values(i32 %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %a = trunc i32 %p to i1
; CHECK-NEXT: switch i1 %a, label %exit [
diff --git a/tests_lit/reader_tests/unnamed.ll b/tests_lit/reader_tests/unnamed.ll
index 3f84b01..b5db147 100644
--- a/tests_lit/reader_tests/unnamed.ll
+++ b/tests_lit/reader_tests/unnamed.ll
@@ -23,39 +23,39 @@
@1 = internal constant [10 x i8] c"Some stuff", align 1
@g = internal global [4 x i8] zeroinitializer, align 4
-define i32 @2(i32 %v) {
+define internal i32 @2(i32 %v) {
ret i32 %v
}
-; CHECK: define i32 @Function(i32 %__0) {
+; CHECK: define internal i32 @Function(i32 %__0) {
; CHECK-NEXT: __0:
; CHECK-NEXT: ret i32 %__0
; CHECK-NEXT: }
-define void @hg() {
+define internal void @hg() {
ret void
}
-; CHECK-NEXT: define void @hg() {
+; CHECK-NEXT: define internal void @hg() {
; CHECK-NEXT: __0:
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @3() {
+define internal void @3() {
ret void
}
-; CHECK-NEXT: define void @Function1() {
+; CHECK-NEXT: define internal void @Function1() {
; CHECK-NEXT: __0:
; CHECK-NEXT: ret void
; CHECK-NEXT: }
-define void @h5() {
+define internal void @h5() {
ret void
}
-; CHECK-NEXT: define void @h5() {
+; CHECK-NEXT: define internal void @h5() {
; CHECK-NEXT: __0:
; CHECK-NEXT: ret void
; CHECK-NEXT: }
diff --git a/unittest/IceParseInstsTest.cpp b/unittest/IceParseInstsTest.cpp
index d58ad5a..8b452d3 100644
--- a/unittest/IceParseInstsTest.cpp
+++ b/unittest/IceParseInstsTest.cpp
@@ -44,8 +44,8 @@
3, naclbitc::TYPE_CODE_VOID, Terminator,
3, naclbitc::TYPE_CODE_FUNCTION, 0, 1, 0, 0, Terminator,
0, naclbitc::BLK_CODE_EXIT, Terminator,
- 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 1, 0, Terminator,
- 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 0, Terminator,
+ 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 1, 3, Terminator,
+ 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 3, Terminator,
1, naclbitc::BLK_CODE_ENTER, naclbitc::FUNCTION_BLOCK_ID, 2, Terminator,
3, naclbitc::FUNC_CODE_DECLAREBLOCKS, 1, Terminator,
// Note: 100 is a bad value index in next line.
@@ -85,7 +85,7 @@
3, naclbitc::TYPE_CODE_FUNCTION, 0, 1, 0, Terminator,
3, naclbitc::TYPE_CODE_INTEGER, 8, Terminator,
0, naclbitc::BLK_CODE_EXIT, Terminator,
- 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 0, Terminator,
+ 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 3, Terminator,
1, naclbitc::BLK_CODE_ENTER, naclbitc::FUNCTION_BLOCK_ID, 2, Terminator,
3, naclbitc::FUNC_CODE_DECLAREBLOCKS, 1, Terminator,
3, naclbitc::FUNC_CODE_INST_ALLOCA, 1, getEncAlignPower(0), Terminator,
@@ -152,7 +152,7 @@
3, naclbitc::TYPE_CODE_INTEGER, 32, Terminator,
3, naclbitc::TYPE_CODE_FUNCTION, 0, 0, 0, Terminator,
0, naclbitc::BLK_CODE_EXIT, Terminator,
- 3, naclbitc::MODULE_CODE_FUNCTION, 1, 0, 0, 0, Terminator,
+ 3, naclbitc::MODULE_CODE_FUNCTION, 1, 0, 0, 3, Terminator,
1, naclbitc::BLK_CODE_ENTER, naclbitc::FUNCTION_BLOCK_ID, 2, Terminator,
3, naclbitc::FUNC_CODE_DECLAREBLOCKS, 1, Terminator,
3, naclbitc::FUNC_CODE_INST_LOAD, 1, getEncAlignPower(0), 0, Terminator,
@@ -234,7 +234,7 @@
3, naclbitc::TYPE_CODE_INTEGER, 32, Terminator,
3, naclbitc::TYPE_CODE_FUNCTION, 0, 0, 1, Terminator,
0, naclbitc::BLK_CODE_EXIT, Terminator,
- 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 0, Terminator,
+ 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 3, Terminator,
1, naclbitc::BLK_CODE_ENTER, naclbitc::FUNCTION_BLOCK_ID, 2, Terminator,
3, naclbitc::FUNC_CODE_DECLAREBLOCKS, 1, Terminator,
3, naclbitc::FUNC_CODE_INST_LOAD, 1, getEncAlignPower(0), 0, Terminator,
@@ -315,7 +315,7 @@
3, naclbitc::TYPE_CODE_INTEGER, 32, Terminator,
3, naclbitc::TYPE_CODE_FUNCTION, 0, 0, 1, 0, Terminator,
0, naclbitc::BLK_CODE_EXIT, Terminator,
- 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 0, Terminator,
+ 3, naclbitc::MODULE_CODE_FUNCTION, 2, 0, 0, 3, Terminator,
1, naclbitc::BLK_CODE_ENTER, naclbitc::FUNCTION_BLOCK_ID, 2, Terminator,
3, naclbitc::FUNC_CODE_DECLAREBLOCKS, 1, Terminator,
3, naclbitc::FUNC_CODE_INST_STORE, 2, 1, getEncAlignPower(0), Terminator,