blob: 01dfb27bd1f0d3a8a7c1b8bd080c9ceca844e15c [file] [log] [blame]
Karl Schimpf8df26f32014-09-19 09:33:26 -07001; Test parsing NaCl atomic instructions.
2
Karl Schimpfb6c96af2014-11-17 10:58:39 -08003; REQUIRES: allow_dump
4
Karl Schimpf2a5324a2014-09-25 09:37:49 -07005; RUN: %p2i -i %s --insts | FileCheck %s
Karl Schimpf6fcbddd2014-11-06 09:49:24 -08006; RUN: %if --need=allow_disable_ir_gen --command \
7; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \
8; RUN: | %if --need=allow_disable_ir_gen --command \
9; RUN: FileCheck --check-prefix=NOIR %s
Karl Schimpf8df26f32014-09-19 09:33:26 -070010
11declare i8 @llvm.nacl.atomic.load.i8(i8*, i32)
12declare i16 @llvm.nacl.atomic.load.i16(i16*, i32)
13declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
14declare i64 @llvm.nacl.atomic.load.i64(i64*, i32)
15declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32)
16declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32)
17declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32)
18declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32)
19declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32)
20declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32)
21declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32)
22declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32)
23declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32)
24declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32)
25declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32)
26declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32)
27declare void @llvm.nacl.atomic.fence(i32)
28declare void @llvm.nacl.atomic.fence.all()
29declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*)
30
31;;; Load
32
33define i32 @test_atomic_load_8(i32 %iptr) {
34entry:
35 %ptr = inttoptr i32 %iptr to i8*
36 ; parameter value "6" is for the sequential consistency memory order.
37 %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6)
38 %r = zext i8 %i to i32
39 ret i32 %r
40}
41
42; CHECK: define i32 @test_atomic_load_8(i32 %iptr) {
43; CHECK-NEXT: entry:
44; CHECK-NEXT: %i = call i8 @llvm.nacl.atomic.load.i8(i32 %iptr, i32 6)
45; CHECK-NEXT: %r = zext i8 %i to i32
46; CHECK-NEXT: ret i32 %r
47; CHECK-NEXT: }
48
49define i32 @test_atomic_load_16(i32 %iptr) {
50entry:
51 %ptr = inttoptr i32 %iptr to i16*
52 %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6)
53 %r = zext i16 %i to i32
54 ret i32 %r
55}
56
57; CHECK-NEXT: define i32 @test_atomic_load_16(i32 %iptr) {
58; CHECK-NEXT: entry:
59; CHECK-NEXT: %i = call i16 @llvm.nacl.atomic.load.i16(i32 %iptr, i32 6)
60; CHECK-NEXT: %r = zext i16 %i to i32
61; CHECK-NEXT: ret i32 %r
62; CHECK-NEXT: }
63
64define i32 @test_atomic_load_32(i32 %iptr) {
65entry:
66 %ptr = inttoptr i32 %iptr to i32*
67 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
68 ret i32 %r
69}
70
71; CHECK-NEXT: define i32 @test_atomic_load_32(i32 %iptr) {
72; CHECK-NEXT: entry:
73; CHECK-NEXT: %r = call i32 @llvm.nacl.atomic.load.i32(i32 %iptr, i32 6)
74; CHECK-NEXT: ret i32 %r
75; CHECK-NEXT: }
76
77define i64 @test_atomic_load_64(i32 %iptr) {
78entry:
79 %ptr = inttoptr i32 %iptr to i64*
80 %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
81 ret i64 %r
82}
83
84; CHECK-NEXT: define i64 @test_atomic_load_64(i32 %iptr) {
85; CHECK-NEXT: entry:
86; CHECK-NEXT: %r = call i64 @llvm.nacl.atomic.load.i64(i32 %iptr, i32 6)
87; CHECK-NEXT: ret i64 %r
88; CHECK-NEXT: }
89
90;;; Store
91
92define void @test_atomic_store_8(i32 %iptr, i32 %v) {
93entry:
94 %truncv = trunc i32 %v to i8
95 %ptr = inttoptr i32 %iptr to i8*
96 call void @llvm.nacl.atomic.store.i8(i8 %truncv, i8* %ptr, i32 6)
97 ret void
98}
99
100; CHECK-NEXT: define void @test_atomic_store_8(i32 %iptr, i32 %v) {
101; CHECK-NEXT: entry:
102; CHECK-NEXT: %truncv = trunc i32 %v to i8
103; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %truncv, i32 %iptr, i32 6)
104; CHECK-NEXT: ret void
105; CHECK-NEXT: }
106
107define void @test_atomic_store_16(i32 %iptr, i32 %v) {
108entry:
109 %truncv = trunc i32 %v to i16
110 %ptr = inttoptr i32 %iptr to i16*
111 call void @llvm.nacl.atomic.store.i16(i16 %truncv, i16* %ptr, i32 6)
112 ret void
113}
114
115; CHECK-NEXT: define void @test_atomic_store_16(i32 %iptr, i32 %v) {
116; CHECK-NEXT: entry:
117; CHECK-NEXT: %truncv = trunc i32 %v to i16
118; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %truncv, i32 %iptr, i32 6)
119; CHECK-NEXT: ret void
120; CHECK-NEXT: }
121
122define void @test_atomic_store_32(i32 %iptr, i32 %v) {
123entry:
124 %ptr = inttoptr i32 %iptr to i32*
125 call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6)
126 ret void
127}
128
129; CHECK-NEXT: define void @test_atomic_store_32(i32 %iptr, i32 %v) {
130; CHECK-NEXT: entry:
131; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %v, i32 %iptr, i32 6)
132; CHECK-NEXT: ret void
133; CHECK-NEXT: }
134
135define void @test_atomic_store_64(i32 %iptr, i64 %v) {
136entry:
137 %ptr = inttoptr i32 %iptr to i64*
138 call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6)
139 ret void
140}
141
142; CHECK-NEXT: define void @test_atomic_store_64(i32 %iptr, i64 %v) {
143; CHECK-NEXT: entry:
144; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %v, i32 %iptr, i32 6)
145; CHECK-NEXT: ret void
146; CHECK-NEXT: }
147
148define void @test_atomic_store_64_const(i32 %iptr) {
149entry:
150 %ptr = inttoptr i32 %iptr to i64*
151 call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6)
152 ret void
153}
154
155; CHECK-NEXT: define void @test_atomic_store_64_const(i32 %iptr) {
156; CHECK-NEXT: entry:
157; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i32 %iptr, i32 6)
158; CHECK-NEXT: ret void
159; CHECK-NEXT: }
160
161;;; RMW
162
163;; add
164
165define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
166entry:
167 %trunc = trunc i32 %v to i8
168 %ptr = inttoptr i32 %iptr to i8*
169 ; "1" is an atomic add, and "6" is sequential consistency.
170 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6)
171 %a_ext = zext i8 %a to i32
172 ret i32 %a_ext
173}
174
175; CHECK-NEXT: define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
176; CHECK-NEXT: entry:
177; CHECK-NEXT: %trunc = trunc i32 %v to i8
178; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i32 %iptr, i8 %trunc, i32 6)
179; CHECK-NEXT: %a_ext = zext i8 %a to i32
180; CHECK-NEXT: ret i32 %a_ext
181; CHECK-NEXT: }
182
183define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
184entry:
185 %trunc = trunc i32 %v to i16
186 %ptr = inttoptr i32 %iptr to i16*
187 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6)
188 %a_ext = zext i16 %a to i32
189 ret i32 %a_ext
190}
191
192; CHECK-NEXT: define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
193; CHECK-NEXT: entry:
194; CHECK-NEXT: %trunc = trunc i32 %v to i16
195; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i32 %iptr, i16 %trunc, i32 6)
196; CHECK-NEXT: %a_ext = zext i16 %a to i32
197; CHECK-NEXT: ret i32 %a_ext
198; CHECK-NEXT: }
199
200define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
201entry:
202 %ptr = inttoptr i32 %iptr to i32*
203 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
204 ret i32 %a
205}
206
207; CHECK-NEXT: define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
208; CHECK-NEXT: entry:
209; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32 %iptr, i32 %v, i32 6)
210; CHECK-NEXT: ret i32 %a
211; CHECK-NEXT: }
212
213define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
214entry:
215 %ptr = inttoptr i32 %iptr to i64*
216 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
217 ret i64 %a
218}
219
220; CHECK-NEXT: define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
221; CHECK-NEXT: entry:
222; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i32 %iptr, i64 %v, i32 6)
223; CHECK-NEXT: ret i64 %a
224; CHECK-NEXT: }
225
226;; sub
227
228define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
229entry:
230 %trunc = trunc i32 %v to i8
231 %ptr = inttoptr i32 %iptr to i8*
232 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6)
233 %a_ext = zext i8 %a to i32
234 ret i32 %a_ext
235}
236
237; CHECK-NEXT: define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
238; CHECK-NEXT: entry:
239; CHECK-NEXT: %trunc = trunc i32 %v to i8
240; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i32 %iptr, i8 %trunc, i32 6)
241; CHECK-NEXT: %a_ext = zext i8 %a to i32
242; CHECK-NEXT: ret i32 %a_ext
243; CHECK-NEXT: }
244
245define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
246entry:
247 %trunc = trunc i32 %v to i16
248 %ptr = inttoptr i32 %iptr to i16*
249 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6)
250 %a_ext = zext i16 %a to i32
251 ret i32 %a_ext
252}
253
254; CHECK-NEXT: define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
255; CHECK-NEXT: entry:
256; CHECK-NEXT: %trunc = trunc i32 %v to i16
257; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i32 %iptr, i16 %trunc, i32 6)
258; CHECK-NEXT: %a_ext = zext i16 %a to i32
259; CHECK-NEXT: ret i32 %a_ext
260; CHECK-NEXT: }
261
262define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
263entry:
264 %ptr = inttoptr i32 %iptr to i32*
265 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
266 ret i32 %a
267}
268
269; CHECK-NEXT: define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
270; CHECK-NEXT: entry:
271; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32 %iptr, i32 %v, i32 6)
272; CHECK-NEXT: ret i32 %a
273; CHECK-NEXT: }
274
275define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
276entry:
277 %ptr = inttoptr i32 %iptr to i64*
278 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6)
279 ret i64 %a
280}
281
282; CHECK-NEXT: define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
283; CHECK-NEXT: entry:
284; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i32 %iptr, i64 %v, i32 6)
285; CHECK-NEXT: ret i64 %a
286; CHECK-NEXT: }
287
288;; or
289
290define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
291entry:
292 %trunc = trunc i32 %v to i8
293 %ptr = inttoptr i32 %iptr to i8*
294 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6)
295 %a_ext = zext i8 %a to i32
296 ret i32 %a_ext
297}
298
299; CHECK-NEXT: define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
300; CHECK-NEXT: entry:
301; CHECK-NEXT: %trunc = trunc i32 %v to i8
302; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i32 %iptr, i8 %trunc, i32 6)
303; CHECK-NEXT: %a_ext = zext i8 %a to i32
304; CHECK-NEXT: ret i32 %a_ext
305; CHECK-NEXT: }
306
307define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
308entry:
309 %trunc = trunc i32 %v to i16
310 %ptr = inttoptr i32 %iptr to i16*
311 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6)
312 %a_ext = zext i16 %a to i32
313 ret i32 %a_ext
314}
315
316; CHECK-NEXT: define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
317; CHECK-NEXT: entry:
318; CHECK-NEXT: %trunc = trunc i32 %v to i16
319; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i32 %iptr, i16 %trunc, i32 6)
320; CHECK-NEXT: %a_ext = zext i16 %a to i32
321; CHECK-NEXT: ret i32 %a_ext
322; CHECK-NEXT: }
323
324define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
325entry:
326 %ptr = inttoptr i32 %iptr to i32*
327 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
328 ret i32 %a
329}
330
331; CHECK-NEXT: define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
332; CHECK-NEXT: entry:
333; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32 %iptr, i32 %v, i32 6)
334; CHECK-NEXT: ret i32 %a
335; CHECK-NEXT: }
336
337define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
338entry:
339 %ptr = inttoptr i32 %iptr to i64*
340 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6)
341 ret i64 %a
342}
343
344; CHECK-NEXT: define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
345; CHECK-NEXT: entry:
346; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i32 %iptr, i64 %v, i32 6)
347; CHECK-NEXT: ret i64 %a
348; CHECK-NEXT: }
349
350;; and
351
352define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
353entry:
354 %trunc = trunc i32 %v to i8
355 %ptr = inttoptr i32 %iptr to i8*
356 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6)
357 %a_ext = zext i8 %a to i32
358 ret i32 %a_ext
359}
360
361; CHECK-NEXT: define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
362; CHECK-NEXT: entry:
363; CHECK-NEXT: %trunc = trunc i32 %v to i8
364; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i32 %iptr, i8 %trunc, i32 6)
365; CHECK-NEXT: %a_ext = zext i8 %a to i32
366; CHECK-NEXT: ret i32 %a_ext
367; CHECK-NEXT: }
368
369define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
370entry:
371 %trunc = trunc i32 %v to i16
372 %ptr = inttoptr i32 %iptr to i16*
373 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6)
374 %a_ext = zext i16 %a to i32
375 ret i32 %a_ext
376}
377
378; CHECK-NEXT: define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
379; CHECK-NEXT: entry:
380; CHECK-NEXT: %trunc = trunc i32 %v to i16
381; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i32 %iptr, i16 %trunc, i32 6)
382; CHECK-NEXT: %a_ext = zext i16 %a to i32
383; CHECK-NEXT: ret i32 %a_ext
384; CHECK-NEXT: }
385
386define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
387entry:
388 %ptr = inttoptr i32 %iptr to i32*
389 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
390 ret i32 %a
391}
392
393; CHECK-NEXT: define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
394; CHECK-NEXT: entry:
395; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32 %iptr, i32 %v, i32 6)
396; CHECK-NEXT: ret i32 %a
397; CHECK-NEXT: }
398
399define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
400entry:
401 %ptr = inttoptr i32 %iptr to i64*
402 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6)
403 ret i64 %a
404}
405
406; CHECK-NEXT: define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
407; CHECK-NEXT: entry:
408; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i32 %iptr, i64 %v, i32 6)
409; CHECK-NEXT: ret i64 %a
410; CHECK-NEXT: }
411
412;; xor
413
414define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
415entry:
416 %trunc = trunc i32 %v to i8
417 %ptr = inttoptr i32 %iptr to i8*
418 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6)
419 %a_ext = zext i8 %a to i32
420 ret i32 %a_ext
421}
422
423; CHECK-NEXT: define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
424; CHECK-NEXT: entry:
425; CHECK-NEXT: %trunc = trunc i32 %v to i8
426; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i32 %iptr, i8 %trunc, i32 6)
427; CHECK-NEXT: %a_ext = zext i8 %a to i32
428; CHECK-NEXT: ret i32 %a_ext
429; CHECK-NEXT: }
430
431define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
432entry:
433 %trunc = trunc i32 %v to i16
434 %ptr = inttoptr i32 %iptr to i16*
435 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6)
436 %a_ext = zext i16 %a to i32
437 ret i32 %a_ext
438}
439
440; CHECK-NEXT: define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
441; CHECK-NEXT: entry:
442; CHECK-NEXT: %trunc = trunc i32 %v to i16
443; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i32 %iptr, i16 %trunc, i32 6)
444; CHECK-NEXT: %a_ext = zext i16 %a to i32
445; CHECK-NEXT: ret i32 %a_ext
446; CHECK-NEXT: }
447
448define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
449entry:
450 %ptr = inttoptr i32 %iptr to i32*
451 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
452 ret i32 %a
453}
454
455; CHECK-NEXT: define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
456; CHECK-NEXT: entry:
457; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32 %iptr, i32 %v, i32 6)
458; CHECK-NEXT: ret i32 %a
459; CHECK-NEXT: }
460
461define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
462entry:
463 %ptr = inttoptr i32 %iptr to i64*
464 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6)
465 ret i64 %a
466}
467
468; CHECK-NEXT: define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
469; CHECK-NEXT: entry:
470; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i32 %iptr, i64 %v, i32 6)
471; CHECK-NEXT: ret i64 %a
472; CHECK-NEXT: }
473
474;; exchange
475
476define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
477entry:
478 %trunc = trunc i32 %v to i8
479 %ptr = inttoptr i32 %iptr to i8*
480 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6)
481 %a_ext = zext i8 %a to i32
482 ret i32 %a_ext
483}
484
485; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
486; CHECK-NEXT: entry:
487; CHECK-NEXT: %trunc = trunc i32 %v to i8
488; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i32 %iptr, i8 %trunc, i32 6)
489; CHECK-NEXT: %a_ext = zext i8 %a to i32
490; CHECK-NEXT: ret i32 %a_ext
491; CHECK-NEXT: }
492
493define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
494entry:
495 %trunc = trunc i32 %v to i16
496 %ptr = inttoptr i32 %iptr to i16*
497 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %trunc, i32 6)
498 %a_ext = zext i16 %a to i32
499 ret i32 %a_ext
500}
501
502; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
503; CHECK-NEXT: entry:
504; CHECK-NEXT: %trunc = trunc i32 %v to i16
505; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i32 %iptr, i16 %trunc, i32 6)
506; CHECK-NEXT: %a_ext = zext i16 %a to i32
507; CHECK-NEXT: ret i32 %a_ext
508; CHECK-NEXT: }
509
510define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
511entry:
512 %ptr = inttoptr i32 %iptr to i32*
513 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
514 ret i32 %a
515}
516
517; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
518; CHECK-NEXT: entry:
519; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32 %iptr, i32 %v, i32 6)
520; CHECK-NEXT: ret i32 %a
521; CHECK-NEXT: }
522
523define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
524entry:
525 %ptr = inttoptr i32 %iptr to i64*
526 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6)
527 ret i64 %a
528}
529
530; CHECK-NEXT: define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
531; CHECK-NEXT: entry:
532; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i32 %iptr, i64 %v, i32 6)
533; CHECK-NEXT: ret i64 %a
534; CHECK-NEXT: }
535
536;;;; Cmpxchg
537
538define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
539entry:
540 %trunc_exp = trunc i32 %expected to i8
541 %trunc_des = trunc i32 %desired to i8
542 %ptr = inttoptr i32 %iptr to i8*
543 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp,
544 i8 %trunc_des, i32 6, i32 6)
545 %old_ext = zext i8 %old to i32
546 ret i32 %old_ext
547}
548
549; CHECK-NEXT: define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
550; CHECK-NEXT: entry:
551; CHECK-NEXT: %trunc_exp = trunc i32 %expected to i8
552; CHECK-NEXT: %trunc_des = trunc i32 %desired to i8
553; CHECK-NEXT: %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i32 %iptr, i8 %trunc_exp, i8 %trunc_des, i32 6, i32 6)
554; CHECK-NEXT: %old_ext = zext i8 %old to i32
555; CHECK-NEXT: ret i32 %old_ext
556; CHECK-NEXT: }
557
558define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
559entry:
560 %trunc_exp = trunc i32 %expected to i16
561 %trunc_des = trunc i32 %desired to i16
562 %ptr = inttoptr i32 %iptr to i16*
563 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp,
564 i16 %trunc_des, i32 6, i32 6)
565 %old_ext = zext i16 %old to i32
566 ret i32 %old_ext
567}
568
569; CHECK-NEXT: define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
570; CHECK-NEXT: entry:
571; CHECK-NEXT: %trunc_exp = trunc i32 %expected to i16
572; CHECK-NEXT: %trunc_des = trunc i32 %desired to i16
573; CHECK-NEXT: %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i32 %iptr, i16 %trunc_exp, i16 %trunc_des, i32 6, i32 6)
574; CHECK-NEXT: %old_ext = zext i16 %old to i32
575; CHECK-NEXT: ret i32 %old_ext
576; CHECK-NEXT: }
577
578define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
579entry:
580 %ptr = inttoptr i32 %iptr to i32*
581 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
582 i32 %desired, i32 6, i32 6)
583 ret i32 %old
584}
585
586; CHECK-NEXT: define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
587; CHECK-NEXT: entry:
588; CHECK-NEXT: %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32 %iptr, i32 %expected, i32 %desired, i32 6, i32 6)
589; CHECK-NEXT: ret i32 %old
590; CHECK-NEXT: }
591
592define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
593entry:
594 %ptr = inttoptr i32 %iptr to i64*
595 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
596 i64 %desired, i32 6, i32 6)
597 ret i64 %old
598}
599
600; CHECK-NEXT: define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
601; CHECK-NEXT: entry:
602; CHECK-NEXT: %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i32 %iptr, i64 %expected, i64 %desired, i32 6, i32 6)
603; CHECK-NEXT: ret i64 %old
604; CHECK-NEXT: }
605
606;;;; Fence and is-lock-free.
607
608define void @test_atomic_fence() {
609entry:
610 call void @llvm.nacl.atomic.fence(i32 6)
611 ret void
612}
613
614; CHECK-NEXT: define void @test_atomic_fence() {
615; CHECK-NEXT: entry:
616; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6)
617; CHECK-NEXT: ret void
618; CHECK-NEXT: }
619
620define void @test_atomic_fence_all() {
621entry:
622 call void @llvm.nacl.atomic.fence.all()
623 ret void
624}
625
626; CHECK-NEXT: define void @test_atomic_fence_all() {
627; CHECK-NEXT: entry:
628; CHECK-NEXT: call void @llvm.nacl.atomic.fence.all()
629; CHECK-NEXT: ret void
630; CHECK-NEXT: }
631
632define i32 @test_atomic_is_lock_free(i32 %iptr) {
633entry:
634 %ptr = inttoptr i32 %iptr to i8*
635 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
636 %r = zext i1 %i to i32
637 ret i32 %r
638}
639
640; CHECK-NEXT: define i32 @test_atomic_is_lock_free(i32 %iptr) {
641; CHECK-NEXT: entry:
642; CHECK-NEXT: %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i32 %iptr)
643; CHECK-NEXT: %r = zext i1 %i to i32
644; CHECK-NEXT: ret i32 %r
645; CHECK-NEXT: }
646
Karl Schimpf6fcbddd2014-11-06 09:49:24 -0800647; NOIR: Total across all functions