Subzero: Changes needed for LLVM 3.7 integration.
1. Change Makefile.standalone from 3.6 to 3.7.
2. Update to new load instruction .ll syntax. This includes changing InstLoad::dump() to match.
BUG= none
R=jvoung@chromium.org
Review URL: https://codereview.chromium.org/1161543005
diff --git a/Makefile.standalone b/Makefile.standalone
index df4429c..024f32a 100644
--- a/Makefile.standalone
+++ b/Makefile.standalone
@@ -31,7 +31,7 @@
PNACL_BIN_PATH ?= $(shell readlink -e $(PNACL_TOOLCHAIN_ROOT)/bin)
# Hack to auto-detect autoconf versus cmake build of LLVM. If the LLVM tools
-# were dynamically linked with something like libLLVM-3.6svn.so, it is an
+# were dynamically linked with something like libLLVM-3.7svn.so, it is an
# autoconf build, otherwise it is a cmake build. AUTOCONF is set to 0 for
# cmake, nonzero for autoconf.
AUTOCONF ?= $(shell ldd $(PNACL_BIN_PATH)/opt | grep -c libLLVM-)
@@ -133,7 +133,7 @@
CLANG_FORMAT_PATH ?= $(PNACL_BIN_PATH)
else
# LLVM autoconf build
- LLVM_LIBS := -lLLVM-3.6svn
+ LLVM_LIBS := -lLLVM-3.7svn
GTEST_LIB_PATH ?= -L../../out/llvm_x86_64_linux_work/Release+Asserts/lib
CLANG_FORMAT_PATH ?= ../../out/llvm_x86_64_linux_work/Release+Asserts/bin
endif
diff --git a/src/IceInst.cpp b/src/IceInst.cpp
index 0e6be75..659d5e5 100644
--- a/src/IceInst.cpp
+++ b/src/IceInst.cpp
@@ -699,7 +699,7 @@
Ostream &Str = Func->getContext()->getStrDump();
dumpDest(Func);
Type Ty = getDest()->getType();
- Str << " = load " << Ty << "* ";
+ Str << " = load " << Ty << ", " << Ty << "* ";
dumpSources(Func);
Str << ", align " << typeAlignInBytes(Ty);
}
diff --git a/tests_lit/assembler/x86/opcode_register_encodings.ll b/tests_lit/assembler/x86/opcode_register_encodings.ll
index bc98b04..a232175 100644
--- a/tests_lit/assembler/x86/opcode_register_encodings.ll
+++ b/tests_lit/assembler/x86/opcode_register_encodings.ll
@@ -115,9 +115,9 @@
%addr_v16xI8 = inttoptr i32 %addr to <16 x i8>*
%addr2_v16xI8 = inttoptr i32 %addr2 to <16 x i8>*
%addr3_v16xI8 = inttoptr i32 %addr3 to <16 x i8>*
- %res1 = load <16 x i8>* %addr2_v16xI8, align 1
- %res2 = load <16 x i8>* %addr_v16xI8, align 1
- %res3 = load <16 x i8>* %addr3_v16xI8, align 1
+ %res1 = load <16 x i8>, <16 x i8>* %addr2_v16xI8, align 1
+ %res2 = load <16 x i8>, <16 x i8>* %addr_v16xI8, align 1
+ %res3 = load <16 x i8>, <16 x i8>* %addr3_v16xI8, align 1
%res12 = add <16 x i8> %res1, %res2
%res123 = add <16 x i8> %res12, %res3
ret <16 x i8> %res123
@@ -135,7 +135,7 @@
%__1 = ptrtoint i8* %ptr to i32
%x = add i32 %__1, %__1
%__3 = inttoptr i32 %x to i32*
- %v = load i32* %__3, align 1
+ %v = load i32, i32* %__3, align 1
%v_add = add i32 %v, 1
%ptr2 = call i8* @llvm.nacl.read.tp()
diff --git a/tests_lit/llvm2ice_tests/64bit.pnacl.ll b/tests_lit/llvm2ice_tests/64bit.pnacl.ll
index 6c50d18..4a52392 100644
--- a/tests_lit/llvm2ice_tests/64bit.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/64bit.pnacl.ll
@@ -1291,7 +1291,7 @@
define internal i64 @load64(i32 %a) {
entry:
%__1 = inttoptr i32 %a to i64*
- %v0 = load i64* %__1, align 1
+ %v0 = load i64, i64* %__1, align 1
ret i64 %v0
}
; CHECK-LABEL: load64
diff --git a/tests_lit/llvm2ice_tests/8bit.pnacl.ll b/tests_lit/llvm2ice_tests/8bit.pnacl.ll
index 81db96d..837cb0b 100644
--- a/tests_lit/llvm2ice_tests/8bit.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/8bit.pnacl.ll
@@ -253,7 +253,7 @@
entry:
%a_8 = trunc i32 %a to i8
%bptr = inttoptr i32 %b_iptr to i8*
- %b_8 = load i8* %bptr, align 1
+ %b_8 = load i8, i8* %bptr, align 1
%icmp = icmp ne i8 %b_8, %a_8
%ret = zext i1 %icmp to i32
ret i32 %ret
@@ -265,7 +265,7 @@
entry:
%a_8 = trunc i32 %a to i8
%bptr = inttoptr i32 %b_iptr to i8*
- %b_8 = load i8* %bptr, align 1
+ %b_8 = load i8, i8* %bptr, align 1
%icmp = icmp ne i8 %a_8, %b_8
%ret = zext i1 %icmp to i32
ret i32 %ret
@@ -334,7 +334,7 @@
define i32 @load_i8(i32 %addr_arg) {
entry:
%addr = inttoptr i32 %addr_arg to i8*
- %ret = load i8* %addr, align 1
+ %ret = load i8, i8* %addr, align 1
%ret2 = sub i8 %ret, 0
%ret_ext = zext i8 %ret2 to i32
ret i32 %ret_ext
@@ -345,7 +345,7 @@
define i32 @load_i8_global(i32 %addr_arg) {
entry:
%addr = bitcast [1 x i8]* @global8 to i8*
- %ret = load i8* %addr, align 1
+ %ret = load i8, i8* %addr, align 1
%ret2 = sub i8 %ret, 0
%ret_ext = zext i8 %ret2 to i32
ret i32 %ret_ext
diff --git a/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll b/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll
index d7d7beb..19959b0 100644
--- a/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll
+++ b/tests_lit/llvm2ice_tests/addr-opt-multi-def-var.ll
@@ -15,7 +15,7 @@
entry:
%gep = add i32 %ptr, 76
%gep.asptr = inttoptr i32 %gep to i32*
- %0 = load i32* %gep.asptr, align 1
+ %0 = load i32, i32* %gep.asptr, align 1
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.end5
@@ -31,11 +31,11 @@
%gep_array = mul i32 %k, 4
%gep2 = add i32 %1, %gep_array
%gep2.asptr = inttoptr i32 %gep2 to i32*
- %2 = load i32* %gep2.asptr, align 1
+ %2 = load i32, i32* %gep2.asptr, align 1
; The above load instruction is a good target for address mode
; optimization. Correct analysis would lead to dump output like:
; Starting computeAddressOpt for instruction:
-; [ 15] %__13 = load i32* %gep2.asptr, align 1
+; [ 15] %__13 = load i32, i32* %gep2.asptr, align 1
; Instruction: [ 14] %gep2.asptr = i32 %gep2
; results in Base=%gep2, Index=<null>, Shift=0, Offset=0
; Instruction: [ 13] %gep2 = add i32 %__9, %gep_array
@@ -47,7 +47,7 @@
;
; Incorrect, overly-aggressive analysis would lead to output like:
; Starting computeAddressOpt for instruction:
-; [ 15] %__13 = load i32* %gep2.asptr, align 1
+; [ 15] %__13 = load i32, i32* %gep2.asptr, align 1
; Instruction: [ 14] %gep2.asptr = i32 %gep2
; results in Base=%gep2, Index=<null>, Shift=0, Offset=0
; Instruction: [ 13] %gep2 = add i32 %__9, %gep_array
diff --git a/tests_lit/llvm2ice_tests/address-mode-opt.ll b/tests_lit/llvm2ice_tests/address-mode-opt.ll
index 6f196c9..1c929f8 100644
--- a/tests_lit/llvm2ice_tests/address-mode-opt.ll
+++ b/tests_lit/llvm2ice_tests/address-mode-opt.ll
@@ -10,7 +10,7 @@
%arg.int = ptrtoint float* %arg to i32
%addr.int = add i32 %arg.int, 200000
%addr.ptr = inttoptr i32 %addr.int to float*
- %addr.load = load float* %addr.ptr, align 4
+ %addr.load = load float, float* %addr.ptr, align 4
ret float %addr.load
; CHECK-LABEL: load_arg_plus_200000
; CHECK: movss xmm0,DWORD PTR [eax+0x30d40]
@@ -21,7 +21,7 @@
%arg.int = ptrtoint float* %arg to i32
%addr.int = add i32 200000, %arg.int
%addr.ptr = inttoptr i32 %addr.int to float*
- %addr.load = load float* %addr.ptr, align 4
+ %addr.load = load float, float* %addr.ptr, align 4
ret float %addr.load
; CHECK-LABEL: load_200000_plus_arg
; CHECK: movss xmm0,DWORD PTR [eax+0x30d40]
@@ -32,7 +32,7 @@
%arg.int = ptrtoint float* %arg to i32
%addr.int = sub i32 %arg.int, 200000
%addr.ptr = inttoptr i32 %addr.int to float*
- %addr.load = load float* %addr.ptr, align 4
+ %addr.load = load float, float* %addr.ptr, align 4
ret float %addr.load
; CHECK-LABEL: load_arg_minus_200000
; CHECK: movss xmm0,DWORD PTR [eax-0x30d40]
@@ -43,7 +43,7 @@
%arg.int = ptrtoint float* %arg to i32
%addr.int = sub i32 200000, %arg.int
%addr.ptr = inttoptr i32 %addr.int to float*
- %addr.load = load float* %addr.ptr, align 4
+ %addr.load = load float, float* %addr.ptr, align 4
ret float %addr.load
; CHECK-LABEL: load_200000_minus_arg
; CHECK: movss xmm0,DWORD PTR [e{{..}}]
@@ -53,7 +53,7 @@
entry:
%addr_sub = sub i32 %arg1_iptr, 200000
%addr_ptr = inttoptr i32 %addr_sub to <8 x i16>*
- %arg1 = load <8 x i16>* %addr_ptr, align 2
+ %arg1 = load <8 x i16>, <8 x i16>* %addr_ptr, align 2
%res_vec = mul <8 x i16> %arg0, %arg1
ret <8 x i16> %res_vec
; Address mode optimization is generally unsafe for SSE vector instructions.
@@ -65,7 +65,7 @@
entry:
%addr_sub = sub i32 %arg1_iptr, 200000
%addr_ptr = inttoptr i32 %addr_sub to <4 x i32>*
- %arg1 = load <4 x i32>* %addr_ptr, align 4
+ %arg1 = load <4 x i32>, <4 x i32>* %addr_ptr, align 4
%res = mul <4 x i32> %arg0, %arg1
ret <4 x i32> %res
; Address mode optimization is generally unsafe for SSE vector instructions.
@@ -83,7 +83,7 @@
%addr1.int = add i32 12, %arg.int
%addr2.int = sub i32 %addr1.int, 4
%addr2.ptr = inttoptr i32 %addr2.int to float*
- %addr2.load = load float* %addr2.ptr, align 4
+ %addr2.load = load float, float* %addr2.ptr, align 4
ret float %addr2.load
; CHECK-LABEL: address_mode_opt_chaining
; CHECK: movss xmm0,DWORD PTR [eax+0x8]
@@ -95,7 +95,7 @@
%addr1.int = add i32 2147483640, %arg.int
%addr2.int = add i32 %addr1.int, 2147483643
%addr2.ptr = inttoptr i32 %addr2.int to float*
- %addr2.load = load float* %addr2.ptr, align 4
+ %addr2.load = load float, float* %addr2.ptr, align 4
ret float %addr2.load
; CHECK-LABEL: address_mode_opt_chaining_overflow
; CHECK: 0x7ffffff8
@@ -108,7 +108,7 @@
%addr1.int = sub i32 %arg.int, 2147483640
%addr2.int = sub i32 %addr1.int, 2147483643
%addr2.ptr = inttoptr i32 %addr2.int to float*
- %addr2.load = load float* %addr2.ptr, align 4
+ %addr2.load = load float, float* %addr2.ptr, align 4
ret float %addr2.load
; CHECK-LABEL: address_mode_opt_chaining_overflow_sub
; CHECK: 0x7ffffff8
@@ -121,7 +121,7 @@
%addr1.int = sub i32 %arg.int, 2147483640
%addr2.int = add i32 %addr1.int, 2147483643
%addr2.ptr = inttoptr i32 %addr2.int to float*
- %addr2.load = load float* %addr2.ptr, align 4
+ %addr2.load = load float, float* %addr2.ptr, align 4
ret float %addr2.load
; CHECK-LABEL: address_mode_opt_chaining_no_overflow
; CHECK: movss xmm0,DWORD PTR [{{.*}}+0x3]
@@ -132,7 +132,7 @@
%arg.int = ptrtoint float* %arg to i32
%addr1.int = add i32 %arg.int, 2147483648
%addr1.ptr = inttoptr i32 %addr1.int to float*
- %addr1.load = load float* %addr1.ptr, align 4
+ %addr1.load = load float, float* %addr1.ptr, align 4
ret float %addr1.load
; CHECK-LABEL: address_mode_opt_add_pos_min_int
; CHECK: movss xmm0,DWORD PTR [{{.*}}-0x80000000]
@@ -143,7 +143,7 @@
%arg.int = ptrtoint float* %arg to i32
%addr1.int = sub i32 %arg.int, 2147483648
%addr1.ptr = inttoptr i32 %addr1.int to float*
- %addr1.load = load float* %addr1.ptr, align 4
+ %addr1.load = load float, float* %addr1.ptr, align 4
ret float %addr1.load
; CHECK-LABEL: address_mode_opt_sub_min_int
; CHECK: movss xmm0,DWORD PTR [{{.*}}-0x80000000]
diff --git a/tests_lit/llvm2ice_tests/callindirect.pnacl.ll b/tests_lit/llvm2ice_tests/callindirect.pnacl.ll
index b77ae53..632c19e 100644
--- a/tests_lit/llvm2ice_tests/callindirect.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/callindirect.pnacl.ll
@@ -44,7 +44,7 @@
define internal void @CallIndirectGlobal() {
entry:
%fp_ptr_i32 = bitcast [4 x i8]* @fp_v to i32*
- %fp_ptr = load i32* %fp_ptr_i32, align 1
+ %fp_ptr = load i32, i32* %fp_ptr_i32, align 1
%fp = inttoptr i32 %fp_ptr to void ()*
call void %fp()
call void %fp()
diff --git a/tests_lit/llvm2ice_tests/convert.ll b/tests_lit/llvm2ice_tests/convert.ll
index 7f03994..1676b5c 100644
--- a/tests_lit/llvm2ice_tests/convert.ll
+++ b/tests_lit/llvm2ice_tests/convert.ll
@@ -15,7 +15,7 @@
define void @from_int8() {
entry:
%__0 = bitcast [1 x i8]* @i8v to i8*
- %v0 = load i8* %__0, align 1
+ %v0 = load i8, i8* %__0, align 1
%v1 = sext i8 %v0 to i16
%__3 = bitcast [2 x i8]* @i16v to i16*
store i16 %v1, i16* %__3, align 1
@@ -41,7 +41,7 @@
define void @from_int16() {
entry:
%__0 = bitcast [2 x i8]* @i16v to i16*
- %v0 = load i16* %__0, align 1
+ %v0 = load i16, i16* %__0, align 1
%v1 = trunc i16 %v0 to i8
%__3 = bitcast [1 x i8]* @i8v to i8*
store i8 %v1, i8* %__3, align 1
@@ -65,7 +65,7 @@
define void @from_int32() {
entry:
%__0 = bitcast [4 x i8]* @i32v to i32*
- %v0 = load i32* %__0, align 1
+ %v0 = load i32, i32* %__0, align 1
%v1 = trunc i32 %v0 to i8
%__3 = bitcast [1 x i8]* @i8v to i8*
store i8 %v1, i8* %__3, align 1
@@ -87,7 +87,7 @@
define void @from_int64() {
entry:
%__0 = bitcast [8 x i8]* @i64v to i64*
- %v0 = load i64* %__0, align 1
+ %v0 = load i64, i64* %__0, align 1
%v1 = trunc i64 %v0 to i8
%__3 = bitcast [1 x i8]* @i8v to i8*
store i8 %v1, i8* %__3, align 1
@@ -109,7 +109,7 @@
define void @from_uint8() {
entry:
%__0 = bitcast [1 x i8]* @u8v to i8*
- %v0 = load i8* %__0, align 1
+ %v0 = load i8, i8* %__0, align 1
%v1 = zext i8 %v0 to i16
%__3 = bitcast [2 x i8]* @i16v to i16*
store i16 %v1, i16* %__3, align 1
@@ -134,7 +134,7 @@
define void @from_uint16() {
entry:
%__0 = bitcast [2 x i8]* @u16v to i16*
- %v0 = load i16* %__0, align 1
+ %v0 = load i16, i16* %__0, align 1
%v1 = trunc i16 %v0 to i8
%__3 = bitcast [1 x i8]* @i8v to i8*
store i8 %v1, i8* %__3, align 1
@@ -158,7 +158,7 @@
define void @from_uint32() {
entry:
%__0 = bitcast [4 x i8]* @u32v to i32*
- %v0 = load i32* %__0, align 1
+ %v0 = load i32, i32* %__0, align 1
%v1 = trunc i32 %v0 to i8
%__3 = bitcast [1 x i8]* @i8v to i8*
store i8 %v1, i8* %__3, align 1
@@ -180,7 +180,7 @@
define void @from_uint64() {
entry:
%__0 = bitcast [8 x i8]* @u64v to i64*
- %v0 = load i64* %__0, align 1
+ %v0 = load i64, i64* %__0, align 1
%v1 = trunc i64 %v0 to i8
%__3 = bitcast [1 x i8]* @i8v to i8*
store i8 %v1, i8* %__3, align 1
diff --git a/tests_lit/llvm2ice_tests/fp.pnacl.ll b/tests_lit/llvm2ice_tests/fp.pnacl.ll
index ddd51af..ef66b02 100644
--- a/tests_lit/llvm2ice_tests/fp.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/fp.pnacl.ll
@@ -1103,7 +1103,7 @@
define internal float @loadFloat(i32 %a) {
entry:
%__1 = inttoptr i32 %a to float*
- %v0 = load float* %__1, align 4
+ %v0 = load float, float* %__1, align 4
ret float %v0
}
; CHECK-LABEL: loadFloat
@@ -1113,7 +1113,7 @@
define internal double @loadDouble(i32 %a) {
entry:
%__1 = inttoptr i32 %a to double*
- %v0 = load double* %__1, align 8
+ %v0 = load double, double* %__1, align 8
ret double %v0
}
; CHECK-LABEL: loadDouble
diff --git a/tests_lit/llvm2ice_tests/ias-multi-reloc.ll b/tests_lit/llvm2ice_tests/ias-multi-reloc.ll
index 8de1b36..c5b0fc2 100644
--- a/tests_lit/llvm2ice_tests/ias-multi-reloc.ll
+++ b/tests_lit/llvm2ice_tests/ias-multi-reloc.ll
@@ -29,7 +29,7 @@
define internal void @add_in_place() {
entry:
%p_global_char.bc = bitcast [4 x i8]* @p_global_char to i32*
- %0 = load i32* %p_global_char.bc, align 1
+ %0 = load i32, i32* %p_global_char.bc, align 1
%expanded1 = ptrtoint [1 x i8]* @global_char to i32
%gep = add i32 %0, %expanded1
%p_global_char.bc3 = bitcast [4 x i8]* @p_global_char to i32*
@@ -43,7 +43,7 @@
define internal void @cmp_global_immediate() {
entry:
%p_global_char.bc = bitcast [4 x i8]* @p_global_char to i32*
- %0 = load i32* %p_global_char.bc, align 1
+ %0 = load i32, i32* %p_global_char.bc, align 1
%expanded1 = ptrtoint [1 x i8]* @global_char to i32
%cmp = icmp eq i32 %0, %expanded1
br i1 %cmp, label %if.then, label %if.end
diff --git a/tests_lit/llvm2ice_tests/load.ll b/tests_lit/llvm2ice_tests/load.ll
index caf99e3..7b88fb3 100644
--- a/tests_lit/llvm2ice_tests/load.ll
+++ b/tests_lit/llvm2ice_tests/load.ll
@@ -7,47 +7,47 @@
define void @load_i64(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i64*
- %iv = load i64* %__1, align 1
+ %iv = load i64, i64* %__1, align 1
ret void
; CHECK: Initial CFG
; CHECK: entry:
-; CHECK-NEXT: %iv = load i64* %addr_arg, align 1
+; CHECK-NEXT: %iv = load i64, i64* %addr_arg, align 1
; CHECK-NEXT: ret void
}
define void @load_i32(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i32*
- %iv = load i32* %__1, align 1
+ %iv = load i32, i32* %__1, align 1
ret void
; CHECK: Initial CFG
; CHECK: entry:
-; CHECK-NEXT: %iv = load i32* %addr_arg, align 1
+; CHECK-NEXT: %iv = load i32, i32* %addr_arg, align 1
; CHECK-NEXT: ret void
}
define void @load_i16(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i16*
- %iv = load i16* %__1, align 1
+ %iv = load i16, i16* %__1, align 1
ret void
; CHECK: Initial CFG
; CHECK: entry:
-; CHECK-NEXT: %iv = load i16* %addr_arg, align 1
+; CHECK-NEXT: %iv = load i16, i16* %addr_arg, align 1
; CHECK-NEXT: ret void
}
define void @load_i8(i32 %addr_arg) {
entry:
%__1 = inttoptr i32 %addr_arg to i8*
- %iv = load i8* %__1, align 1
+ %iv = load i8, i8* %__1, align 1
ret void
; CHECK: Initial CFG
; CHECK: entry:
-; CHECK-NEXT: %iv = load i8* %addr_arg, align 1
+; CHECK-NEXT: %iv = load i8, i8* %addr_arg, align 1
; CHECK-NEXT: ret void
}
diff --git a/tests_lit/llvm2ice_tests/load_cast.ll b/tests_lit/llvm2ice_tests/load_cast.ll
index 5395d04..07e7173 100644
--- a/tests_lit/llvm2ice_tests/load_cast.ll
+++ b/tests_lit/llvm2ice_tests/load_cast.ll
@@ -11,7 +11,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i8*
- %load = load i8* %addr, align 1
+ %load = load i8, i8* %addr, align 1
%result = zext i8 %load to i32
ret i32 %result
}
@@ -22,7 +22,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i8*
- %load = load i8* %addr, align 1
+ %load = load i8, i8* %addr, align 1
%tmp1 = zext i8 %load to i32
%tmp2 = zext i8 %load to i32
%result = add i32 %tmp1, %tmp2
@@ -36,7 +36,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i8*
- %load = load i8* %addr, align 1
+ %load = load i8, i8* %addr, align 1
%result = sext i8 %load to i32
ret i32 %result
}
@@ -47,7 +47,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i8*
- %load = load i8* %addr, align 1
+ %load = load i8, i8* %addr, align 1
%tmp1 = sext i8 %load to i32
%tmp2 = sext i8 %load to i32
%result = add i32 %tmp1, %tmp2
@@ -61,7 +61,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%result = fptrunc double %load to float
ret float %result
}
@@ -72,7 +72,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%tmp1 = fptrunc double %load to float
%tmp2 = fptrunc double %load to float
%result = fadd float %tmp1, %tmp2
@@ -86,7 +86,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to float*
- %load = load float* %addr, align 4
+ %load = load float, float* %addr, align 4
%result = fpext float %load to double
ret double %result
}
@@ -97,7 +97,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to float*
- %load = load float* %addr, align 4
+ %load = load float, float* %addr, align 4
%tmp1 = fpext float %load to double
%tmp2 = fpext float %load to double
%result = fadd double %tmp1, %tmp2
@@ -111,7 +111,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%result = fptoui double %load to i16
%result2 = zext i16 %result to i32
ret i32 %result2
@@ -123,7 +123,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%tmp1 = fptoui double %load to i16
%tmp2 = fptoui double %load to i16
%result = add i16 %tmp1, %tmp2
@@ -138,7 +138,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%result = fptosi double %load to i16
%result2 = zext i16 %result to i32
ret i32 %result2
@@ -150,7 +150,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%tmp1 = fptosi double %load to i16
%tmp2 = fptosi double %load to i16
%result = add i16 %tmp1, %tmp2
@@ -165,7 +165,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i16*
- %load = load i16* %addr, align 1
+ %load = load i16, i16* %addr, align 1
%result = uitofp i16 %load to double
ret double %result
}
@@ -176,7 +176,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i16*
- %load = load i16* %addr, align 1
+ %load = load i16, i16* %addr, align 1
%tmp1 = uitofp i16 %load to double
%tmp2 = uitofp i16 %load to double
%result = fadd double %tmp1, %tmp2
@@ -190,7 +190,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i16*
- %load = load i16* %addr, align 1
+ %load = load i16, i16* %addr, align 1
%result = sitofp i16 %load to double
ret double %result
}
@@ -201,7 +201,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i16*
- %load = load i16* %addr, align 1
+ %load = load i16, i16* %addr, align 1
%tmp1 = sitofp i16 %load to double
%tmp2 = sitofp i16 %load to double
%result = fadd double %tmp1, %tmp2
@@ -215,7 +215,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i64*
- %load = load i64* %addr, align 1
+ %load = load i64, i64* %addr, align 1
%result = bitcast i64 %load to double
ret double %result
}
@@ -226,7 +226,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to i64*
- %load = load i64* %addr, align 1
+ %load = load i64, i64* %addr, align 1
%tmp1 = bitcast i64 %load to double
%tmp2 = bitcast i64 %load to double
%result = fadd double %tmp1, %tmp2
@@ -240,7 +240,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%result = bitcast double %load to i64
ret i64 %result
}
@@ -254,7 +254,7 @@
entry:
%ptr = add i32 %arg, 200
%addr = inttoptr i32 %ptr to double*
- %load = load double* %addr, align 8
+ %load = load double, double* %addr, align 8
%tmp1 = bitcast double %load to i64
%tmp2 = bitcast double %load to i64
%result = add i64 %tmp1, %tmp2
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
index ec89067..1b33546 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll
@@ -26,12 +26,12 @@
call void @llvm.nacl.atomic.store.i32(i32 %l_a2, i32* %p_a, i32 6)
%p_b = bitcast [4 x i8]* @g32_b to i32*
- %l_b = load i32* %p_b, align 1
+ %l_b = load i32, i32* %p_b, align 1
%l_b2 = add i32 %l_b, 1
store i32 %l_b2, i32* %p_b, align 1
%p_c = bitcast [4 x i8]* @g32_c to i32*
- %l_c = load i32* %p_c, align 1
+ %l_c = load i32, i32* %p_c, align 1
%l_c2 = add i32 %l_c, 1
call void @llvm.nacl.atomic.fence.all()
store i32 %l_c2, i32* %p_c, align 1
@@ -66,13 +66,13 @@
call void @llvm.nacl.atomic.store.i32(i32 %l_a2, i32* %p_a, i32 6)
%p_b = bitcast [4 x i8]* @g32_b to i32*
- %l_b = load i32* %p_b, align 1
+ %l_b = load i32, i32* %p_b, align 1
%l_b2 = add i32 %l_b, 1
store i32 %l_b2, i32* %p_b, align 1
%p_c = bitcast [4 x i8]* @g32_c to i32*
call void @llvm.nacl.atomic.fence.all()
- %l_c = load i32* %p_c, align 1
+ %l_c = load i32, i32* %p_c, align 1
%l_c2 = add i32 %l_c, 1
store i32 %l_c2, i32* %p_c, align 1
@@ -107,13 +107,13 @@
call void @llvm.nacl.atomic.store.i32(i32 %l_a2, i32* %p_a, i32 6)
%p_b = bitcast [4 x i8]* @g32_b to i32*
- %l_b = load i32* %p_b, align 1
+ %l_b = load i32, i32* %p_b, align 1
call void @llvm.nacl.atomic.fence.all()
%l_b2 = add i32 %l_b, 1
store i32 %l_b2, i32* %p_b, align 1
%p_c = bitcast [4 x i8]* @g32_c to i32*
- %l_c = load i32* %p_c, align 1
+ %l_c = load i32, i32* %p_c, align 1
%l_c2 = add i32 %l_c, 1
store i32 %l_c2, i32* %p_c, align 1
@@ -143,22 +143,22 @@
define i32 @could_have_fused_loads() {
entry:
%ptr1 = bitcast [4 x i8]* @g32_d to i8*
- %b1 = load i8* %ptr1, align 1
+ %b1 = load i8, i8* %ptr1, align 1
%int_ptr2 = ptrtoint [4 x i8]* @g32_d to i32
%int_ptr_bump2 = add i32 %int_ptr2, 1
%ptr2 = inttoptr i32 %int_ptr_bump2 to i8*
- %b2 = load i8* %ptr2, align 1
+ %b2 = load i8, i8* %ptr2, align 1
%int_ptr_bump3 = add i32 %int_ptr2, 2
%ptr3 = inttoptr i32 %int_ptr_bump3 to i8*
- %b3 = load i8* %ptr3, align 1
+ %b3 = load i8, i8* %ptr3, align 1
call void @llvm.nacl.atomic.fence.all()
%int_ptr_bump4 = add i32 %int_ptr2, 3
%ptr4 = inttoptr i32 %int_ptr_bump4 to i8*
- %b4 = load i8* %ptr4, align 1
+ %b4 = load i8, i8* %ptr4, align 1
%b1.ext = zext i8 %b1 to i32
%b2.ext = zext i8 %b2 to i32
@@ -188,11 +188,11 @@
%cmp = icmp eq i32 %x, 1
br i1 %cmp, label %branch1, label %branch2
branch1:
- %y = load i32* %ptr, align 1
+ %y = load i32, i32* %ptr, align 1
ret i32 %y
branch2:
call void @llvm.nacl.atomic.fence.all()
- %z = load i32* %ptr, align 1
+ %z = load i32, i32* %ptr, align 1
ret i32 %z
}
; CHECK-LABEL: could_have_hoisted_loads
diff --git a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
index 67153dd..da56571 100644
--- a/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
+++ b/tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll
@@ -60,7 +60,7 @@
%__1 = ptrtoint i8* %ptr to i32
%x = add i32 %__1, %__1
%__3 = inttoptr i32 %x to i32*
- %v = load i32* %__3, align 1
+ %v = load i32, i32* %__3, align 1
%v_add = add i32 %v, 1
%ptr2 = call i8* @llvm.nacl.read.tp()
@@ -226,7 +226,7 @@
define float @test_sqrt_float_mergeable_load(float %x, i32 %iptr) {
entry:
%__2 = inttoptr i32 %iptr to float*
- %y = load float* %__2, align 4
+ %y = load float, float* %__2, align 4
%r5 = call float @llvm.sqrt.f32(float %y)
%r6 = fadd float %x, %r5
ret float %r6
@@ -253,7 +253,7 @@
define double @test_sqrt_double_mergeable_load(double %x, i32 %iptr) {
entry:
%__2 = inttoptr i32 %iptr to double*
- %y = load double* %__2, align 8
+ %y = load double, double* %__2, align 8
%r5 = call double @llvm.sqrt.f64(double %y)
%r6 = fadd double %x, %r5
ret double %r6
diff --git a/tests_lit/llvm2ice_tests/phi.ll b/tests_lit/llvm2ice_tests/phi.ll
index 2470a80..86da00d 100644
--- a/tests_lit/llvm2ice_tests/phi.ll
+++ b/tests_lit/llvm2ice_tests/phi.ll
@@ -59,7 +59,7 @@
; addressing mode optimization.
%interior__4 = add i32 %interior, 0
%__4 = inttoptr i32 %interior__4 to i32*
- %elt = load i32* %__4, align 1
+ %elt = load i32, i32* %__4, align 1
%cmp = icmp eq i32 %elt, 0
br i1 %cmp, label %exit, label %body
exit:
diff --git a/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll b/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll
index 6a1ad65..f3d4b5d 100644
--- a/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll
+++ b/tests_lit/llvm2ice_tests/regalloc_evict_non_overlap.ll
@@ -32,14 +32,14 @@
%tmp23 = add i32 undef, -1
%tmp24 = add i32 undef, undef
%undef.ptr = inttoptr i32 undef to i32*
- %tmp25 = load i32* %undef.ptr, align 1
+ %tmp25 = load i32, i32* %undef.ptr, align 1
%tmp26 = icmp eq i32 undef, %tmp22
br i1 %tmp26, label %bb34, label %bb32
bb27: ; preds = %bb42, %bb34
%tmp28 = icmp sgt i32 %tmp23, 0
%tmp29 = inttoptr i32 %tmp19 to i32*
- %tmp30 = load i32* %tmp29, align 1
+ %tmp30 = load i32, i32* %tmp29, align 1
br i1 %tmp28, label %bb21, label %bb46
bb32: ; preds = %bb21
@@ -57,7 +57,7 @@
bb42: ; preds = %bb35
%tmp43 = inttoptr i32 %tmp to i32*
- %tmp44 = load i32* %tmp43, align 1
+ %tmp44 = load i32, i32* %tmp43, align 1
%tmp45 = icmp eq i32 %tmp44, %tmp18
br i1 %tmp45, label %bb27, label %bb15
diff --git a/tests_lit/llvm2ice_tests/shift.ll b/tests_lit/llvm2ice_tests/shift.ll
index fec0d0f..1fd77a5 100644
--- a/tests_lit/llvm2ice_tests/shift.ll
+++ b/tests_lit/llvm2ice_tests/shift.ll
@@ -13,7 +13,7 @@
define void @conv1() {
entry:
%__0 = bitcast [4 x i8]* @u1 to i32*
- %v0 = load i32* %__0, align 1
+ %v0 = load i32, i32* %__0, align 1
%sext = shl i32 %v0, 24
%v1 = ashr i32 %sext, 24
%__4 = bitcast [4 x i8]* @i1 to i32*
@@ -27,7 +27,7 @@
define void @conv2() {
entry:
%__0 = bitcast [4 x i8]* @u1 to i32*
- %v0 = load i32* %__0, align 1
+ %v0 = load i32, i32* %__0, align 1
%sext1 = shl i32 %v0, 16
%v1 = ashr i32 %sext1, 16
%__4 = bitcast [4 x i8]* @i2 to i32*
diff --git a/tests_lit/llvm2ice_tests/simple-loop.ll b/tests_lit/llvm2ice_tests/simple-loop.ll
index 0b06c76..6d114fa 100644
--- a/tests_lit/llvm2ice_tests/simple-loop.ll
+++ b/tests_lit/llvm2ice_tests/simple-loop.ll
@@ -17,7 +17,7 @@
%gep_array = mul i32 %i.06, 4
%gep = add i32 %a, %gep_array
%__9 = inttoptr i32 %gep to i32*
- %v0 = load i32* %__9, align 1
+ %v0 = load i32, i32* %__9, align 1
%add = add i32 %v0, %sum.05
%inc = add i32 %i.06, 1
%cmp = icmp slt i32 %inc, %n
diff --git a/tests_lit/llvm2ice_tests/struct-arith.pnacl.ll b/tests_lit/llvm2ice_tests/struct-arith.pnacl.ll
index 4789d88..acab66b 100644
--- a/tests_lit/llvm2ice_tests/struct-arith.pnacl.ll
+++ b/tests_lit/llvm2ice_tests/struct-arith.pnacl.ll
@@ -7,35 +7,35 @@
define internal i32 @compute_important_function(i32 %v1, i32 %v2) {
entry:
%__2 = inttoptr i32 %v1 to i32*
- %_v0 = load i32* %__2, align 1
+ %_v0 = load i32, i32* %__2, align 1
; CHECK: entry:
-; CHECK-NEXT: %_v0 = load i32* {{.*}}, align 1
+; CHECK-NEXT: %_v0 = load i32, i32* {{.*}}, align 1
%__4 = inttoptr i32 %v2 to i32*
- %_v1 = load i32* %__4, align 1
+ %_v1 = load i32, i32* %__4, align 1
%gep = add i32 %v2, 12
%__7 = inttoptr i32 %gep to i32*
- %_v2 = load i32* %__7, align 1
+ %_v2 = load i32, i32* %__7, align 1
%mul = mul i32 %_v2, %_v1
%gep6 = add i32 %v1, 4
%__11 = inttoptr i32 %gep6 to i32*
- %_v3 = load i32* %__11, align 1
+ %_v3 = load i32, i32* %__11, align 1
%gep8 = add i32 %v2, 8
%__14 = inttoptr i32 %gep8 to i32*
- %_v4 = load i32* %__14, align 1
+ %_v4 = load i32, i32* %__14, align 1
%gep10 = add i32 %v2, 4
%__17 = inttoptr i32 %gep10 to i32*
- %_v5 = load i32* %__17, align 1
+ %_v5 = load i32, i32* %__17, align 1
%mul3 = mul i32 %_v5, %_v4
%gep12 = add i32 %v1, 8
%__21 = inttoptr i32 %gep12 to i32*
- %_v6 = load i32* %__21, align 1
+ %_v6 = load i32, i32* %__21, align 1
%mul7 = mul i32 %_v6, %_v3
%mul9 = mul i32 %mul7, %_v6
%gep14 = add i32 %v1, 12
%__26 = inttoptr i32 %gep14 to i32*
- %_v7 = load i32* %__26, align 1
+ %_v7 = load i32, i32* %__26, align 1
%mul11 = mul i32 %mul9, %_v7
%add4.neg = add i32 %mul, %_v0
%add = sub i32 %add4.neg, %_v3
diff --git a/tests_lit/llvm2ice_tests/vector-align.ll b/tests_lit/llvm2ice_tests/vector-align.ll
index 4964f6c..85f09fd 100644
--- a/tests_lit/llvm2ice_tests/vector-align.ll
+++ b/tests_lit/llvm2ice_tests/vector-align.ll
@@ -10,7 +10,7 @@
define <4 x i32> @test_add(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
- %loaded = load <4 x i32>* %addr, align 4
+ %loaded = load <4 x i32>, <4 x i32>* %addr, align 4
%result = add <4 x i32> %addend, %loaded
ret <4 x i32> %result
}
@@ -21,7 +21,7 @@
define <4 x i32> @test_and(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
- %loaded = load <4 x i32>* %addr, align 4
+ %loaded = load <4 x i32>, <4 x i32>* %addr, align 4
%result = and <4 x i32> %addend, %loaded
ret <4 x i32> %result
}
@@ -32,7 +32,7 @@
define <4 x i32> @test_or(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
- %loaded = load <4 x i32>* %addr, align 4
+ %loaded = load <4 x i32>, <4 x i32>* %addr, align 4
%result = or <4 x i32> %addend, %loaded
ret <4 x i32> %result
}
@@ -43,7 +43,7 @@
define <4 x i32> @test_xor(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
- %loaded = load <4 x i32>* %addr, align 4
+ %loaded = load <4 x i32>, <4 x i32>* %addr, align 4
%result = xor <4 x i32> %addend, %loaded
ret <4 x i32> %result
}
@@ -54,7 +54,7 @@
define <4 x i32> @test_sub(i32 %addr_i, <4 x i32> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x i32>*
- %loaded = load <4 x i32>* %addr, align 4
+ %loaded = load <4 x i32>, <4 x i32>* %addr, align 4
%result = sub <4 x i32> %addend, %loaded
ret <4 x i32> %result
}
@@ -65,7 +65,7 @@
define <4 x float> @test_fadd(i32 %addr_i, <4 x float> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x float>*
- %loaded = load <4 x float>* %addr, align 4
+ %loaded = load <4 x float>, <4 x float>* %addr, align 4
%result = fadd <4 x float> %addend, %loaded
ret <4 x float> %result
}
@@ -76,7 +76,7 @@
define <4 x float> @test_fsub(i32 %addr_i, <4 x float> %addend) {
entry:
%addr = inttoptr i32 %addr_i to <4 x float>*
- %loaded = load <4 x float>* %addr, align 4
+ %loaded = load <4 x float>, <4 x float>* %addr, align 4
%result = fsub <4 x float> %addend, %loaded
ret <4 x float> %result
}
diff --git a/tests_lit/reader_tests/extern_globals.ll b/tests_lit/reader_tests/extern_globals.ll
index 3b75451..9d5eb45 100644
--- a/tests_lit/reader_tests/extern_globals.ll
+++ b/tests_lit/reader_tests/extern_globals.ll
@@ -80,7 +80,7 @@
%NumArraysElements.bc = bitcast [4 x i8]* @NumArraysElements to i32*
; CHECK: %NumArraysElements.bc = bitcast i32 @NumArraysElements to i32
; CROSS: %NumArraysElements.bc = bitcast i32 @Subzero_NumArraysElements to i32
- %0 = load i32* %NumArraysElements.bc, align 1
+ %0 = load i32, i32* %NumArraysElements.bc, align 1
ret i32 %0
}
@@ -91,7 +91,7 @@
%NumArraysElements.bc = bitcast [4 x i8]* @NumArraysElements to i32*
; CHECK: %NumArraysElements.bc = bitcast i32 @NumArraysElements to i32
; CROSS: %NumArraysElements.bc = bitcast i32 @Subzero_NumArraysElements to i32
- %0 = load i32* %NumArraysElements.bc, align 1
+ %0 = load i32, i32* %NumArraysElements.bc, align 1
%cmp = icmp ugt i32 %0, %WhichArray
; CHECK: %cmp = icmp ugt i32 %__3, %WhichArray
; CROSS: %cmp = icmp ugt i32 %__3, %WhichArray
@@ -114,7 +114,7 @@
%gep = add i32 %expanded1, %gep_array
%gep1 = add i32 %gep, 4
%gep1.asptr = inttoptr i32 %gep1 to i32*
- %1 = load i32* %gep1.asptr, align 1
+ %1 = load i32, i32* %gep1.asptr, align 1
%Len.asptr3 = inttoptr i32 %Len to i32*
; CHECK: %Len.asptr3 = i32 %Len
; CROSS: %Len.asptr3 = i32 %Len
@@ -127,7 +127,7 @@
; CROSS: %expanded2 = i32 @Subzero_Arrays
%gep4 = add i32 %expanded2, %gep_array3
%gep4.asptr = inttoptr i32 %gep4 to i32*
- %2 = load i32* %gep4.asptr, align 1
+ %2 = load i32, i32* %gep4.asptr, align 1
br label %return
return: ; preds = %if.end, %if.then
@@ -170,7 +170,7 @@
%ExternName1.bc = bitcast [4 x i8]* @ExternName1 to i32*
; CHECK: %ExternName1.bc = bitcast i32 @ExternName1 to i32
; CROSS: %ExternName1.bc = bitcast i32 @ExternName1 to i32
- %0 = load i32* %ExternName1.bc, align 1
+ %0 = load i32, i32* %ExternName1.bc, align 1
%expanded6 = ptrtoint [80 x i8]* @_ZL8StructEx to i32
; CHECK: %expanded6 = i32 @_ZL8StructEx
; CROSS: %expanded6 = i32 @Subzero__ZL8StructEx
@@ -198,7 +198,7 @@
%ExternName4.bc = bitcast [4 x i8]* @ExternName4 to i32*
; CHECK: %ExternName4.bc = bitcast i32 @ExternName4 to i32
; CROSS: %ExternName4.bc = bitcast i32 @ExternName4 to i32
- %1 = load i32* %ExternName4.bc, align 1
+ %1 = load i32, i32* %ExternName4.bc, align 1
%expanded11 = ptrtoint [80 x i8]* @_ZL8StructEx to i32
; CHECK: %expanded11 = i32 @_ZL8StructEx
; CROSS: %expanded11 = i32 @Subzero__ZL8StructEx
@@ -208,7 +208,7 @@
%ExternName3.bc = bitcast [4 x i8]* @ExternName3 to i32*
; CHECK: %ExternName3.bc = bitcast i32 @ExternName3 to i32
; CROSS: %ExternName3.bc = bitcast i32 @ExternName3 to i32
- %2 = load i32* %ExternName3.bc, align 1
+ %2 = load i32, i32* %ExternName3.bc, align 1
%expanded13 = ptrtoint [80 x i8]* @_ZL8StructEx to i32
; CHECK: %expanded13 = i32 @_ZL8StructEx
; CROSS: %expanded13 = i32 @Subzero__ZL8StructEx
@@ -236,7 +236,7 @@
%ExternName2.bc = bitcast [4 x i8]* @ExternName2 to i32*
; CHECK: %ExternName2.bc = bitcast i32 @ExternName2 to i32
; CROSS: %ExternName2.bc = bitcast i32 @ExternName2 to i32
- %3 = load i32* %ExternName2.bc, align 1
+ %3 = load i32, i32* %ExternName2.bc, align 1
%expanded18 = ptrtoint [80 x i8]* @_ZL8StructEx to i32
; CHECK: %expanded18 = i32 @_ZL8StructEx
; CROSS: %expanded18 = i32 @Subzero__ZL8StructEx
@@ -246,7 +246,7 @@
%ExternName5.bc = bitcast [4 x i8]* @ExternName5 to i32*
; CHECK: %ExternName5.bc = bitcast i32 @ExternName5 to i32
; CROSS: %ExternName5.bc = bitcast i32 @ExternName5 to i32
- %4 = load i32* %ExternName5.bc, align 1
+ %4 = load i32, i32* %ExternName5.bc, align 1
%expanded20 = ptrtoint [80 x i8]* @_ZL8StructEx to i32
; CHECK: %expanded20 = i32 @_ZL8StructEx
; CROSS: %expanded20 = i32 @Subzero__ZL8StructEx
diff --git a/tests_lit/reader_tests/load.ll b/tests_lit/reader_tests/load.ll
index 75591ae..087e750 100644
--- a/tests_lit/reader_tests/load.ll
+++ b/tests_lit/reader_tests/load.ll
@@ -9,12 +9,12 @@
define i32 @load_i8(i32 %addr) {
entry:
%addr_i8 = inttoptr i32 %addr to i8*
- %v = load i8* %addr_i8, align 1
+ %v = load i8, i8* %addr_i8, align 1
%r = sext i8 %v to i32
ret i32 %r
; CHECK: __0:
-; CHECK-NEXT: %__1 = load i8* %__0, align 1
+; CHECK-NEXT: %__1 = load i8, i8* %__0, align 1
; CHECK-NEXT: %__2 = sext i8 %__1 to i32
; CHECK-NEXT: ret i32 %__2
}
@@ -22,12 +22,12 @@
define i32 @load_i16(i32 %addr) {
entry:
%addr_i16 = inttoptr i32 %addr to i16*
- %v = load i16* %addr_i16, align 1
+ %v = load i16, i16* %addr_i16, align 1
%r = sext i16 %v to i32
ret i32 %r
; CHECK: __0:
-; CHECK-NEXT: %__1 = load i16* %__0, align 1
+; CHECK-NEXT: %__1 = load i16, i16* %__0, align 1
; CHECK-NEXT: %__2 = sext i16 %__1 to i32
; CHECK-NEXT: ret i32 %__2
}
@@ -35,35 +35,35 @@
define i32 @load_i32(i32 %addr) {
entry:
%addr_i32 = inttoptr i32 %addr to i32*
- %v = load i32* %addr_i32, align 1
+ %v = load i32, i32* %addr_i32, align 1
ret i32 %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load i32* %__0, align 1
+; CHECK-NEXT: %__1 = load i32, i32* %__0, align 1
; CHECK-NEXT: ret i32 %__1
}
define i64 @load_i64(i32 %addr) {
entry:
%addr_i64 = inttoptr i32 %addr to i64*
- %v = load i64* %addr_i64, align 1
+ %v = load i64, i64* %addr_i64, align 1
ret i64 %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load i64* %__0, align 1
+; CHECK-NEXT: %__1 = load i64, i64* %__0, align 1
; CHECK-NEXT: ret i64 %__1
}
define float @load_float_a1(i32 %addr) {
entry:
%addr_float = inttoptr i32 %addr to float*
- %v = load float* %addr_float, align 1
+ %v = load float, float* %addr_float, align 1
ret float %v
; TODO(kschimpf) Fix load alignment in ICE to allow non-default.
; CHECK: __0:
-; CHECK-NEXT: %__1 = load float* %__0, align 4
+; CHECK-NEXT: %__1 = load float, float* %__0, align 4
; CHECK-NEXT: ret float %__1
}
@@ -71,24 +71,24 @@
define float @load_float_a4(i32 %addr) {
entry:
%addr_float = inttoptr i32 %addr to float*
- %v = load float* %addr_float, align 4
+ %v = load float, float* %addr_float, align 4
ret float %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load float* %__0, align 4
+; CHECK-NEXT: %__1 = load float, float* %__0, align 4
; CHECK-NEXT: ret float %__1
}
define double @load_double_a1(i32 %addr) {
entry:
%addr_double = inttoptr i32 %addr to double*
- %v = load double* %addr_double, align 1
+ %v = load double, double* %addr_double, align 1
ret double %v
; TODO(kschimpf) Fix load alignment in ICE to allow non-default.
; CHECK: __0:
-; CHECK-NEXT: %__1 = load double* %__0, align 8
+; CHECK-NEXT: %__1 = load double, double* %__0, align 8
; CHECK-NEXT: ret double %__1
}
@@ -96,55 +96,55 @@
define double @load_double_a8(i32 %addr) {
entry:
%addr_double = inttoptr i32 %addr to double*
- %v = load double* %addr_double, align 8
+ %v = load double, double* %addr_double, align 8
ret double %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load double* %__0, align 8
+; CHECK-NEXT: %__1 = load double, double* %__0, align 8
; CHECK-NEXT: ret double %__1
}
define <16 x i8> @load_v16xI8(i32 %addr) {
entry:
%addr_v16xI8 = inttoptr i32 %addr to <16 x i8>*
- %v = load <16 x i8>* %addr_v16xI8, align 1
+ %v = load <16 x i8>, <16 x i8>* %addr_v16xI8, align 1
ret <16 x i8> %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load <16 x i8>* %__0, align 1
+; CHECK-NEXT: %__1 = load <16 x i8>, <16 x i8>* %__0, align 1
; CHECK-NEXT: ret <16 x i8> %__1
}
define <8 x i16> @load_v8xI16(i32 %addr) {
entry:
%addr_v8xI16 = inttoptr i32 %addr to <8 x i16>*
- %v = load <8 x i16>* %addr_v8xI16, align 2
+ %v = load <8 x i16>, <8 x i16>* %addr_v8xI16, align 2
ret <8 x i16> %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load <8 x i16>* %__0, align 2
+; CHECK-NEXT: %__1 = load <8 x i16>, <8 x i16>* %__0, align 2
; CHECK-NEXT: ret <8 x i16> %__1
}
define <4 x i32> @load_v4xI32(i32 %addr) {
entry:
%addr_v4xI32 = inttoptr i32 %addr to <4 x i32>*
- %v = load <4 x i32>* %addr_v4xI32, align 4
+ %v = load <4 x i32>, <4 x i32>* %addr_v4xI32, align 4
ret <4 x i32> %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load <4 x i32>* %__0, align 4
+; CHECK-NEXT: %__1 = load <4 x i32>, <4 x i32>* %__0, align 4
; CHECK-NEXT: ret <4 x i32> %__1
}
define <4 x float> @load_v4xFloat(i32 %addr) {
entry:
%addr_v4xFloat = inttoptr i32 %addr to <4 x float>*
- %v = load <4 x float>* %addr_v4xFloat, align 4
+ %v = load <4 x float>, <4 x float>* %addr_v4xFloat, align 4
ret <4 x float> %v
; CHECK: __0:
-; CHECK-NEXT: %__1 = load <4 x float>* %__0, align 4
+; CHECK-NEXT: %__1 = load <4 x float>, <4 x float>* %__0, align 4
; CHECK-NEXT: ret <4 x float> %__1
}