| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512vl | FileCheck %s |
| |
| declare <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| declare <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| declare <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| declare <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| declare <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| declare <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| declare <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| declare <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| declare <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| declare <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| declare <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| declare <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| declare <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| declare <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| declare <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| declare <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| declare <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| declare <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| declare <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| declare <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| declare <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| declare <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| declare <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| declare <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| |
| define <2 x double> @floor_v2f64(<2 x double> %p) { |
| ; CHECK-LABEL: floor_v2f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $9, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @floor_v4f32(<4 x float> %p) { |
| ; CHECK-LABEL: floor_v4f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $9, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @floor_v4f64(<4 x double> %p){ |
| ; CHECK-LABEL: floor_v4f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $9, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @floor_v8f32(<8 x float> %p) { |
| ; CHECK-LABEL: floor_v8f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $9, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @floor_v8f64(<8 x double> %p){ |
| ; CHECK-LABEL: floor_v8f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $9, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @floor_v16f32(<16 x float> %p) { |
| ; CHECK-LABEL: floor_v16f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $9, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @floor_v2f64_load(<2 x double>* %ptr) { |
| ; CHECK-LABEL: floor_v2f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $9, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @floor_v4f32_load(<4 x float>* %ptr) { |
| ; CHECK-LABEL: floor_v4f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $9, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @floor_v4f64_load(<4 x double>* %ptr){ |
| ; CHECK-LABEL: floor_v4f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $9, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @floor_v8f32_load(<8 x float>* %ptr) { |
| ; CHECK-LABEL: floor_v8f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $9, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @floor_v8f64_load(<8 x double>* %ptr){ |
| ; CHECK-LABEL: floor_v8f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @floor_v16f32_load(<16 x float>* %ptr) { |
| ; CHECK-LABEL: floor_v16f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @floor_v2f64_mask(<2 x double> %p, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v2f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @floor_v4f32_mask(<4 x float> %p, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v4f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @floor_v4f64_mask(<4 x double> %p, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v4f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovapd %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @floor_v8f32_mask(<8 x float> %p, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v8f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovaps %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @floor_v8f64_mask(<8 x double> %p, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v8f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @floor_v16f32_mask(<16 x float> %p, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v16f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @floor_v2f64_maskz(<2 x double> %p, <2 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v2f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @floor_v4f32_maskz(<4 x float> %p, <4 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v4f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @floor_v4f64_maskz(<4 x double> %p, <4 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v4f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @floor_v8f32_maskz(<8 x float> %p, <8 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v8f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @floor_v8f64_maskz(<8 x double> %p, <8 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v8f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @floor_v16f32_maskz(<16 x float> %p, <16 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v16f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @floor_v2f64_mask_load(<2 x double>* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v2f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @floor_v4f32_mask_load(<4 x float>* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v4f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @floor_v4f64_mask_load(<4 x double>* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v4f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @floor_v8f32_mask_load(<8 x float>* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v8f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @floor_v8f64_mask_load(<8 x double>* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v8f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @floor_v16f32_mask_load(<16 x float>* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v16f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @floor_v2f64_maskz_load(<2 x double>* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v2f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @floor_v4f32_maskz_load(<4 x float>* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v4f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @floor_v4f64_maskz_load(<4 x double>* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v4f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @floor_v8f32_maskz_load(<8 x float>* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v8f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @floor_v8f64_maskz_load(<8 x double>* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v8f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @floor_v16f32_maskz_load(<16 x float>* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v16f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @floor_v2f64_broadcast(double* %ptr) { |
| ; CHECK-LABEL: floor_v2f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to2}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @floor_v4f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: floor_v4f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to4}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @floor_v4f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: floor_v4f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to4}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @floor_v8f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: floor_v8f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to8}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @floor_v8f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: floor_v8f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to8}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @floor_v16f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: floor_v16f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to16}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @floor_v2f64_mask_broadcast(double* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v2f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to2}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @floor_v4f32_mask_broadcast(float* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v4f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to4}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @floor_v4f64_mask_broadcast(double* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v4f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to4}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @floor_v8f32_mask_broadcast(float* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v8f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to8}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @floor_v8f64_mask_broadcast(double* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v8f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to8}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @floor_v16f32_mask_broadcast(float* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v16f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to16}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @floor_v2f64_maskz_broadcast(double* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v2f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to2}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @floor_v4f32_maskz_broadcast(float* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v4f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to4}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @floor_v4f64_maskz_broadcast(double* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v4f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to4}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @floor_v8f32_maskz_broadcast(float* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v8f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to8}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @floor_v8f64_maskz_broadcast(double* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: floor_v8f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $9, (%rdi){1to8}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @floor_v16f32_maskz_broadcast(float* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: floor_v16f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $9, (%rdi){1to16}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @ceil_v2f64(<2 x double> %p) { |
| ; CHECK-LABEL: ceil_v2f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $10, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @ceil_v4f32(<4 x float> %p) { |
| ; CHECK-LABEL: ceil_v4f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $10, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @ceil_v4f64(<4 x double> %p){ |
| ; CHECK-LABEL: ceil_v4f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $10, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @ceil_v8f32(<8 x float> %p) { |
| ; CHECK-LABEL: ceil_v8f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $10, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @ceil_v8f64(<8 x double> %p){ |
| ; CHECK-LABEL: ceil_v8f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $10, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @ceil_v16f32(<16 x float> %p) { |
| ; CHECK-LABEL: ceil_v16f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $10, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @ceil_v2f64_load(<2 x double>* %ptr) { |
| ; CHECK-LABEL: ceil_v2f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $10, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @ceil_v4f32_load(<4 x float>* %ptr) { |
| ; CHECK-LABEL: ceil_v4f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $10, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @ceil_v4f64_load(<4 x double>* %ptr){ |
| ; CHECK-LABEL: ceil_v4f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $10, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @ceil_v8f32_load(<8 x float>* %ptr) { |
| ; CHECK-LABEL: ceil_v8f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $10, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @ceil_v8f64_load(<8 x double>* %ptr){ |
| ; CHECK-LABEL: ceil_v8f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @ceil_v16f32_load(<16 x float>* %ptr) { |
| ; CHECK-LABEL: ceil_v16f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @ceil_v2f64_mask(<2 x double> %p, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v2f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @ceil_v4f32_mask(<4 x float> %p, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v4f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @ceil_v4f64_mask(<4 x double> %p, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v4f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovapd %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @ceil_v8f32_mask(<8 x float> %p, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v8f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovaps %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @ceil_v8f64_mask(<8 x double> %p, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v8f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @ceil_v16f32_mask(<16 x float> %p, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v16f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @ceil_v2f64_maskz(<2 x double> %p, <2 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v2f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @ceil_v4f32_maskz(<4 x float> %p, <4 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v4f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @ceil_v4f64_maskz(<4 x double> %p, <4 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v4f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @ceil_v8f32_maskz(<8 x float> %p, <8 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v8f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @ceil_v8f64_maskz(<8 x double> %p, <8 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v8f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @ceil_v16f32_maskz(<16 x float> %p, <16 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v16f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @ceil_v2f64_mask_load(<2 x double>* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v2f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @ceil_v4f32_mask_load(<4 x float>* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v4f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @ceil_v4f64_mask_load(<4 x double>* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v4f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @ceil_v8f32_mask_load(<8 x float>* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v8f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @ceil_v8f64_mask_load(<8 x double>* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v8f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @ceil_v16f32_mask_load(<16 x float>* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v16f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @ceil_v2f64_maskz_load(<2 x double>* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v2f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @ceil_v4f32_maskz_load(<4 x float>* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v4f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @ceil_v4f64_maskz_load(<4 x double>* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v4f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @ceil_v8f32_maskz_load(<8 x float>* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v8f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @ceil_v8f64_maskz_load(<8 x double>* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v8f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @ceil_v16f32_maskz_load(<16 x float>* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v16f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @ceil_v2f64_broadcast(double* %ptr) { |
| ; CHECK-LABEL: ceil_v2f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to2}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @ceil_v4f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: ceil_v4f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to4}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @ceil_v4f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: ceil_v4f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to4}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @ceil_v8f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: ceil_v8f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to8}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @ceil_v8f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: ceil_v8f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to8}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @ceil_v16f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: ceil_v16f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to16}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @ceil_v2f64_mask_broadcast(double* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v2f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to2}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @ceil_v4f32_mask_broadcast(float* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v4f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to4}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @ceil_v4f64_mask_broadcast(double* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v4f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to4}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @ceil_v8f32_mask_broadcast(float* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v8f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to8}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @ceil_v8f64_mask_broadcast(double* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v8f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to8}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @ceil_v16f32_mask_broadcast(float* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v16f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to16}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @ceil_v2f64_maskz_broadcast(double* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v2f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to2}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @ceil_v4f32_maskz_broadcast(float* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v4f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to4}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @ceil_v4f64_maskz_broadcast(double* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v4f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to4}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @ceil_v8f32_maskz_broadcast(float* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v8f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to8}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @ceil_v8f64_maskz_broadcast(double* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: ceil_v8f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $10, (%rdi){1to8}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @ceil_v16f32_maskz_broadcast(float* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: ceil_v16f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $10, (%rdi){1to16}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @trunc_v2f64(<2 x double> %p) { |
| ; CHECK-LABEL: trunc_v2f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $11, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @trunc_v4f32(<4 x float> %p) { |
| ; CHECK-LABEL: trunc_v4f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $11, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @trunc_v4f64(<4 x double> %p){ |
| ; CHECK-LABEL: trunc_v4f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $11, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @trunc_v8f32(<8 x float> %p) { |
| ; CHECK-LABEL: trunc_v8f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $11, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @trunc_v8f64(<8 x double> %p){ |
| ; CHECK-LABEL: trunc_v8f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $11, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @trunc_v16f32(<16 x float> %p) { |
| ; CHECK-LABEL: trunc_v16f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $11, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @trunc_v2f64_load(<2 x double>* %ptr) { |
| ; CHECK-LABEL: trunc_v2f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $11, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @trunc_v4f32_load(<4 x float>* %ptr) { |
| ; CHECK-LABEL: trunc_v4f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $11, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @trunc_v4f64_load(<4 x double>* %ptr){ |
| ; CHECK-LABEL: trunc_v4f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $11, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @trunc_v8f32_load(<8 x float>* %ptr) { |
| ; CHECK-LABEL: trunc_v8f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $11, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @trunc_v8f64_load(<8 x double>* %ptr){ |
| ; CHECK-LABEL: trunc_v8f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @trunc_v16f32_load(<16 x float>* %ptr) { |
| ; CHECK-LABEL: trunc_v16f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @trunc_v2f64_mask(<2 x double> %p, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v2f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @trunc_v4f32_mask(<4 x float> %p, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v4f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @trunc_v4f64_mask(<4 x double> %p, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v4f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovapd %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @trunc_v8f32_mask(<8 x float> %p, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v8f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovaps %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @trunc_v8f64_mask(<8 x double> %p, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v8f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @trunc_v16f32_mask(<16 x float> %p, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v16f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @trunc_v2f64_maskz(<2 x double> %p, <2 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v2f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @trunc_v4f32_maskz(<4 x float> %p, <4 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v4f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @trunc_v4f64_maskz(<4 x double> %p, <4 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v4f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @trunc_v8f32_maskz(<8 x float> %p, <8 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v8f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @trunc_v8f64_maskz(<8 x double> %p, <8 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v8f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @trunc_v16f32_maskz(<16 x float> %p, <16 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v16f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @trunc_v2f64_mask_load(<2 x double>* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v2f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @trunc_v4f32_mask_load(<4 x float>* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v4f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @trunc_v4f64_mask_load(<4 x double>* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v4f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @trunc_v8f32_mask_load(<8 x float>* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v8f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @trunc_v8f64_mask_load(<8 x double>* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v8f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @trunc_v16f32_mask_load(<16 x float>* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v16f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @trunc_v2f64_maskz_load(<2 x double>* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v2f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @trunc_v4f32_maskz_load(<4 x float>* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v4f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @trunc_v4f64_maskz_load(<4 x double>* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v4f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @trunc_v8f32_maskz_load(<8 x float>* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v8f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @trunc_v8f64_maskz_load(<8 x double>* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v8f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @trunc_v16f32_maskz_load(<16 x float>* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v16f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @trunc_v2f64_broadcast(double* %ptr) { |
| ; CHECK-LABEL: trunc_v2f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to2}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @trunc_v4f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: trunc_v4f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to4}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @trunc_v4f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: trunc_v4f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to4}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @trunc_v8f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: trunc_v8f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to8}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @trunc_v8f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: trunc_v8f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to8}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @trunc_v16f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: trunc_v16f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to16}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @trunc_v2f64_mask_broadcast(double* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v2f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to2}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @trunc_v4f32_mask_broadcast(float* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v4f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to4}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @trunc_v4f64_mask_broadcast(double* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v4f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to4}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @trunc_v8f32_mask_broadcast(float* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v8f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to8}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @trunc_v8f64_mask_broadcast(double* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v8f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to8}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @trunc_v16f32_mask_broadcast(float* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v16f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to16}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @trunc_v2f64_maskz_broadcast(double* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v2f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to2}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @trunc_v4f32_maskz_broadcast(float* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v4f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to4}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @trunc_v4f64_maskz_broadcast(double* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v4f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to4}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @trunc_v8f32_maskz_broadcast(float* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v8f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to8}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @trunc_v8f64_maskz_broadcast(double* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: trunc_v8f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $11, (%rdi){1to8}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @trunc_v16f32_maskz_broadcast(float* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: trunc_v16f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $11, (%rdi){1to16}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @rint_v2f64(<2 x double> %p) { |
| ; CHECK-LABEL: rint_v2f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $4, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @rint_v4f32(<4 x float> %p) { |
| ; CHECK-LABEL: rint_v4f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $4, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @rint_v4f64(<4 x double> %p){ |
| ; CHECK-LABEL: rint_v4f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $4, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @rint_v8f32(<8 x float> %p) { |
| ; CHECK-LABEL: rint_v8f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $4, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @rint_v8f64(<8 x double> %p){ |
| ; CHECK-LABEL: rint_v8f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $4, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @rint_v16f32(<16 x float> %p) { |
| ; CHECK-LABEL: rint_v16f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $4, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @rint_v2f64_load(<2 x double>* %ptr) { |
| ; CHECK-LABEL: rint_v2f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $4, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @rint_v4f32_load(<4 x float>* %ptr) { |
| ; CHECK-LABEL: rint_v4f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $4, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @rint_v4f64_load(<4 x double>* %ptr){ |
| ; CHECK-LABEL: rint_v4f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $4, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @rint_v8f32_load(<8 x float>* %ptr) { |
| ; CHECK-LABEL: rint_v8f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $4, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @rint_v8f64_load(<8 x double>* %ptr){ |
| ; CHECK-LABEL: rint_v8f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @rint_v16f32_load(<16 x float>* %ptr) { |
| ; CHECK-LABEL: rint_v16f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @rint_v2f64_mask(<2 x double> %p, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v2f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @rint_v4f32_mask(<4 x float> %p, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v4f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @rint_v4f64_mask(<4 x double> %p, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v4f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovapd %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @rint_v8f32_mask(<8 x float> %p, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v8f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovaps %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @rint_v8f64_mask(<8 x double> %p, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v8f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @rint_v16f32_mask(<16 x float> %p, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v16f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @rint_v2f64_maskz(<2 x double> %p, <2 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v2f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @rint_v4f32_maskz(<4 x float> %p, <4 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v4f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @rint_v4f64_maskz(<4 x double> %p, <4 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v4f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @rint_v8f32_maskz(<8 x float> %p, <8 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v8f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @rint_v8f64_maskz(<8 x double> %p, <8 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v8f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @rint_v16f32_maskz(<16 x float> %p, <16 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v16f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @rint_v2f64_mask_load(<2 x double>* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v2f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @rint_v4f32_mask_load(<4 x float>* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v4f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @rint_v4f64_mask_load(<4 x double>* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v4f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @rint_v8f32_mask_load(<8 x float>* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v8f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @rint_v8f64_mask_load(<8 x double>* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v8f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @rint_v16f32_mask_load(<16 x float>* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v16f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @rint_v2f64_maskz_load(<2 x double>* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v2f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @rint_v4f32_maskz_load(<4 x float>* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v4f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @rint_v4f64_maskz_load(<4 x double>* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v4f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @rint_v8f32_maskz_load(<8 x float>* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v8f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @rint_v8f64_maskz_load(<8 x double>* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v8f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @rint_v16f32_maskz_load(<16 x float>* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v16f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @rint_v2f64_broadcast(double* %ptr) { |
| ; CHECK-LABEL: rint_v2f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to2}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @rint_v4f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: rint_v4f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to4}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @rint_v4f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: rint_v4f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to4}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @rint_v8f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: rint_v8f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to8}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @rint_v8f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: rint_v8f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to8}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @rint_v16f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: rint_v16f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to16}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @rint_v2f64_mask_broadcast(double* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v2f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to2}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @rint_v4f32_mask_broadcast(float* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v4f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to4}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @rint_v4f64_mask_broadcast(double* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v4f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to4}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @rint_v8f32_mask_broadcast(float* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v8f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to8}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @rint_v8f64_mask_broadcast(double* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v8f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to8}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @rint_v16f32_mask_broadcast(float* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v16f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to16}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @rint_v2f64_maskz_broadcast(double* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v2f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to2}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @rint_v4f32_maskz_broadcast(float* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v4f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to4}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @rint_v4f64_maskz_broadcast(double* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v4f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to4}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @rint_v8f32_maskz_broadcast(float* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v8f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to8}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @rint_v8f64_maskz_broadcast(double* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: rint_v8f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $4, (%rdi){1to8}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @rint_v16f32_maskz_broadcast(float* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: rint_v16f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $4, (%rdi){1to16}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @nearbyint_v2f64(<2 x double> %p) { |
| ; CHECK-LABEL: nearbyint_v2f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $12, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @nearbyint_v4f32(<4 x float> %p) { |
| ; CHECK-LABEL: nearbyint_v4f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $12, %xmm0, %xmm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @nearbyint_v4f64(<4 x double> %p){ |
| ; CHECK-LABEL: nearbyint_v4f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $12, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @nearbyint_v8f32(<8 x float> %p) { |
| ; CHECK-LABEL: nearbyint_v8f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $12, %ymm0, %ymm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @nearbyint_v8f64(<8 x double> %p){ |
| ; CHECK-LABEL: nearbyint_v8f64: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $12, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @nearbyint_v16f32(<16 x float> %p) { |
| ; CHECK-LABEL: nearbyint_v16f32: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $12, %zmm0, %zmm0 |
| ; CHECK-NEXT: retq |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @nearbyint_v2f64_load(<2 x double>* %ptr) { |
| ; CHECK-LABEL: nearbyint_v2f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $12, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @nearbyint_v4f32_load(<4 x float>* %ptr) { |
| ; CHECK-LABEL: nearbyint_v4f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $12, (%rdi), %xmm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @nearbyint_v4f64_load(<4 x double>* %ptr){ |
| ; CHECK-LABEL: nearbyint_v4f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundpd $12, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @nearbyint_v8f32_load(<8 x float>* %ptr) { |
| ; CHECK-LABEL: nearbyint_v8f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vroundps $12, (%rdi), %ymm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @nearbyint_v8f64_load(<8 x double>* %ptr){ |
| ; CHECK-LABEL: nearbyint_v8f64_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @nearbyint_v16f32_load(<16 x float>* %ptr) { |
| ; CHECK-LABEL: nearbyint_v16f32_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi), %zmm0 |
| ; CHECK-NEXT: retq |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @nearbyint_v2f64_mask(<2 x double> %p, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v2f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @nearbyint_v4f32_mask(<4 x float> %p, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm2, %xmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, %xmm0, %xmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %xmm1, %xmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @nearbyint_v4f64_mask(<4 x double> %p, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovapd %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @nearbyint_v8f32_mask(<8 x float> %p, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm2, %ymm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, %ymm0, %ymm1 {%k1} |
| ; CHECK-NEXT: vmovaps %ymm1, %ymm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @nearbyint_v8f64_mask(<8 x double> %p, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f64_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovapd %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @nearbyint_v16f32_mask(<16 x float> %p, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v16f32_mask: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm2, %zmm2, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, %zmm0, %zmm1 {%k1} |
| ; CHECK-NEXT: vmovaps %zmm1, %zmm0 |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @nearbyint_v2f64_maskz(<2 x double> %p, <2 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v2f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @nearbyint_v4f32_maskz(<4 x float> %p, <4 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, %xmm0, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @nearbyint_v4f64_maskz(<4 x double> %p, <4 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @nearbyint_v8f32_maskz(<8 x float> %p, <8 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, %ymm0, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @nearbyint_v8f64_maskz(<8 x double> %p, <8 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f64_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @nearbyint_v16f32_maskz(<16 x float> %p, <16 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v16f32_maskz: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, %zmm0, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @nearbyint_v2f64_mask_load(<2 x double>* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v2f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @nearbyint_v4f32_mask_load(<4 x float>* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi), %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @nearbyint_v4f64_mask_load(<4 x double>* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @nearbyint_v8f32_mask_load(<8 x float>* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi), %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @nearbyint_v8f64_mask_load(<8 x double>* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f64_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @nearbyint_v16f32_mask_load(<16 x float>* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v16f32_mask_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi), %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @nearbyint_v2f64_maskz_load(<2 x double>* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v2f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %p = load <2 x double>, <2 x double>* %ptr |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @nearbyint_v4f32_maskz_load(<4 x float>* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi), %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %p = load <4 x float>, <4 x float>* %ptr |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @nearbyint_v4f64_maskz_load(<4 x double>* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %p = load <4 x double>, <4 x double>* %ptr |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @nearbyint_v8f32_maskz_load(<8 x float>* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi), %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %p = load <8 x float>, <8 x float>* %ptr |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @nearbyint_v8f64_maskz_load(<8 x double>* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f64_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %p = load <8 x double>, <8 x double>* %ptr |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @nearbyint_v16f32_maskz_load(<16 x float>* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v16f32_maskz_load: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi), %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %p = load <16 x float>, <16 x float>* %ptr |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @nearbyint_v2f64_broadcast(double* %ptr) { |
| ; CHECK-LABEL: nearbyint_v2f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to2}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| ret <2 x double> %t |
| } |
| |
| define <4 x float> @nearbyint_v4f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: nearbyint_v4f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to4}, %xmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| ret <4 x float> %t |
| } |
| |
| define <4 x double> @nearbyint_v4f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: nearbyint_v4f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to4}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| ret <4 x double> %t |
| } |
| |
| define <8 x float> @nearbyint_v8f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: nearbyint_v8f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to8}, %ymm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| ret <8 x float> %t |
| } |
| |
| define <8 x double> @nearbyint_v8f64_broadcast(double* %ptr){ |
| ; CHECK-LABEL: nearbyint_v8f64_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to8}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| ret <8 x double> %t |
| } |
| |
| define <16 x float> @nearbyint_v16f32_broadcast(float* %ptr) { |
| ; CHECK-LABEL: nearbyint_v16f32_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to16}, %zmm0 |
| ; CHECK-NEXT: retq |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| ret <16 x float> %t |
| } |
| |
| define <2 x double> @nearbyint_v2f64_mask_broadcast(double* %ptr, <2 x double> %passthru, <2 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v2f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to2}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> %passthru |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @nearbyint_v4f32_mask_broadcast(float* %ptr, <4 x float> %passthru, <4 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to4}, %xmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> %passthru |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @nearbyint_v4f64_mask_broadcast(double* %ptr, <4 x double> %passthru, <4 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to4}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> %passthru |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @nearbyint_v8f32_mask_broadcast(float* %ptr, <8 x float> %passthru, <8 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm1, %ymm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to8}, %ymm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> %passthru |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @nearbyint_v8f64_mask_broadcast(double* %ptr, <8 x double> %passthru, <8 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f64_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to8}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> %passthru |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @nearbyint_v16f32_mask_broadcast(float* %ptr, <16 x float> %passthru, <16 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v16f32_mask_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm1, %zmm1, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to16}, %zmm0 {%k1} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> %passthru |
| ret <16 x float> %s |
| } |
| |
| define <2 x double> @nearbyint_v2f64_maskz_broadcast(double* %ptr, <2 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v2f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to2}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <2 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <2 x double> undef, double %ps, i32 0 |
| %p = shufflevector <2 x double> %pins, <2 x double> undef, <2 x i32> zeroinitializer |
| %t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p) |
| %s = select <2 x i1> %c, <2 x double> %t, <2 x double> zeroinitializer |
| ret <2 x double> %s |
| } |
| |
| define <4 x float> @nearbyint_v4f32_maskz_broadcast(float* %ptr, <4 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to4}, %xmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <4 x float> undef, float %ps, i32 0 |
| %p = shufflevector <4 x float> %pins, <4 x float> undef, <4 x i32> zeroinitializer |
| %t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p) |
| %s = select <4 x i1> %c, <4 x float> %t, <4 x float> zeroinitializer |
| ret <4 x float> %s |
| } |
| |
| define <4 x double> @nearbyint_v4f64_maskz_broadcast(double* %ptr, <4 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v4f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to4}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <4 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <4 x double> undef, double %ps, i32 0 |
| %p = shufflevector <4 x double> %pins, <4 x double> undef, <4 x i32> zeroinitializer |
| %t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p) |
| %s = select <4 x i1> %c, <4 x double> %t, <4 x double> zeroinitializer |
| ret <4 x double> %s |
| } |
| |
| define <8 x float> @nearbyint_v8f32_maskz_broadcast(float* %ptr, <8 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %ymm0, %ymm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to8}, %ymm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <8 x float> undef, float %ps, i32 0 |
| %p = shufflevector <8 x float> %pins, <8 x float> undef, <8 x i32> zeroinitializer |
| %t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p) |
| %s = select <8 x i1> %c, <8 x float> %t, <8 x float> zeroinitializer |
| ret <8 x float> %s |
| } |
| |
| define <8 x double> @nearbyint_v8f64_maskz_broadcast(double* %ptr, <8 x i64> %cmp) { |
| ; CHECK-LABEL: nearbyint_v8f64_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmq %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscalepd $12, (%rdi){1to8}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <8 x i64> %cmp, zeroinitializer |
| %ps = load double, double* %ptr |
| %pins = insertelement <8 x double> undef, double %ps, i32 0 |
| %p = shufflevector <8 x double> %pins, <8 x double> undef, <8 x i32> zeroinitializer |
| %t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p) |
| %s = select <8 x i1> %c, <8 x double> %t, <8 x double> zeroinitializer |
| ret <8 x double> %s |
| } |
| |
| define <16 x float> @nearbyint_v16f32_maskz_broadcast(float* %ptr, <16 x i32> %cmp) { |
| ; CHECK-LABEL: nearbyint_v16f32_maskz_broadcast: |
| ; CHECK: ## %bb.0: |
| ; CHECK-NEXT: vptestnmd %zmm0, %zmm0, %k1 |
| ; CHECK-NEXT: vrndscaleps $12, (%rdi){1to16}, %zmm0 {%k1} {z} |
| ; CHECK-NEXT: retq |
| %c = icmp eq <16 x i32> %cmp, zeroinitializer |
| %ps = load float, float* %ptr |
| %pins = insertelement <16 x float> undef, float %ps, i32 0 |
| %p = shufflevector <16 x float> %pins, <16 x float> undef, <16 x i32> zeroinitializer |
| %t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p) |
| %s = select <16 x i1> %c, <16 x float> %t, <16 x float> zeroinitializer |
| ret <16 x float> %s |
| } |