| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,NOBW,NOVBMI,AVX512F |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX512,NOVBMI,AVX512BW |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vbmi | FileCheck %s --check-prefixes=AVX512,AVX512BW,VBMI |
| |
| define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind { |
| ; AVX512-LABEL: var_shuffle_v8i64: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0 |
| ; AVX512-NEXT: retq |
| %index0 = extractelement <8 x i64> %indices, i32 0 |
| %index1 = extractelement <8 x i64> %indices, i32 1 |
| %index2 = extractelement <8 x i64> %indices, i32 2 |
| %index3 = extractelement <8 x i64> %indices, i32 3 |
| %index4 = extractelement <8 x i64> %indices, i32 4 |
| %index5 = extractelement <8 x i64> %indices, i32 5 |
| %index6 = extractelement <8 x i64> %indices, i32 6 |
| %index7 = extractelement <8 x i64> %indices, i32 7 |
| %v0 = extractelement <8 x i64> %v, i64 %index0 |
| %v1 = extractelement <8 x i64> %v, i64 %index1 |
| %v2 = extractelement <8 x i64> %v, i64 %index2 |
| %v3 = extractelement <8 x i64> %v, i64 %index3 |
| %v4 = extractelement <8 x i64> %v, i64 %index4 |
| %v5 = extractelement <8 x i64> %v, i64 %index5 |
| %v6 = extractelement <8 x i64> %v, i64 %index6 |
| %v7 = extractelement <8 x i64> %v, i64 %index7 |
| %ret0 = insertelement <8 x i64> undef, i64 %v0, i32 0 |
| %ret1 = insertelement <8 x i64> %ret0, i64 %v1, i32 1 |
| %ret2 = insertelement <8 x i64> %ret1, i64 %v2, i32 2 |
| %ret3 = insertelement <8 x i64> %ret2, i64 %v3, i32 3 |
| %ret4 = insertelement <8 x i64> %ret3, i64 %v4, i32 4 |
| %ret5 = insertelement <8 x i64> %ret4, i64 %v5, i32 5 |
| %ret6 = insertelement <8 x i64> %ret5, i64 %v6, i32 6 |
| %ret7 = insertelement <8 x i64> %ret6, i64 %v7, i32 7 |
| ret <8 x i64> %ret7 |
| } |
| |
| define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwind { |
| ; AVX512-LABEL: var_shuffle_v16i32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0 |
| ; AVX512-NEXT: retq |
| %index0 = extractelement <16 x i32> %indices, i32 0 |
| %index1 = extractelement <16 x i32> %indices, i32 1 |
| %index2 = extractelement <16 x i32> %indices, i32 2 |
| %index3 = extractelement <16 x i32> %indices, i32 3 |
| %index4 = extractelement <16 x i32> %indices, i32 4 |
| %index5 = extractelement <16 x i32> %indices, i32 5 |
| %index6 = extractelement <16 x i32> %indices, i32 6 |
| %index7 = extractelement <16 x i32> %indices, i32 7 |
| %index8 = extractelement <16 x i32> %indices, i32 8 |
| %index9 = extractelement <16 x i32> %indices, i32 9 |
| %index10 = extractelement <16 x i32> %indices, i32 10 |
| %index11 = extractelement <16 x i32> %indices, i32 11 |
| %index12 = extractelement <16 x i32> %indices, i32 12 |
| %index13 = extractelement <16 x i32> %indices, i32 13 |
| %index14 = extractelement <16 x i32> %indices, i32 14 |
| %index15 = extractelement <16 x i32> %indices, i32 15 |
| %v0 = extractelement <16 x i32> %v, i32 %index0 |
| %v1 = extractelement <16 x i32> %v, i32 %index1 |
| %v2 = extractelement <16 x i32> %v, i32 %index2 |
| %v3 = extractelement <16 x i32> %v, i32 %index3 |
| %v4 = extractelement <16 x i32> %v, i32 %index4 |
| %v5 = extractelement <16 x i32> %v, i32 %index5 |
| %v6 = extractelement <16 x i32> %v, i32 %index6 |
| %v7 = extractelement <16 x i32> %v, i32 %index7 |
| %v8 = extractelement <16 x i32> %v, i32 %index8 |
| %v9 = extractelement <16 x i32> %v, i32 %index9 |
| %v10 = extractelement <16 x i32> %v, i32 %index10 |
| %v11 = extractelement <16 x i32> %v, i32 %index11 |
| %v12 = extractelement <16 x i32> %v, i32 %index12 |
| %v13 = extractelement <16 x i32> %v, i32 %index13 |
| %v14 = extractelement <16 x i32> %v, i32 %index14 |
| %v15 = extractelement <16 x i32> %v, i32 %index15 |
| %ret0 = insertelement <16 x i32> undef, i32 %v0, i32 0 |
| %ret1 = insertelement <16 x i32> %ret0, i32 %v1, i32 1 |
| %ret2 = insertelement <16 x i32> %ret1, i32 %v2, i32 2 |
| %ret3 = insertelement <16 x i32> %ret2, i32 %v3, i32 3 |
| %ret4 = insertelement <16 x i32> %ret3, i32 %v4, i32 4 |
| %ret5 = insertelement <16 x i32> %ret4, i32 %v5, i32 5 |
| %ret6 = insertelement <16 x i32> %ret5, i32 %v6, i32 6 |
| %ret7 = insertelement <16 x i32> %ret6, i32 %v7, i32 7 |
| %ret8 = insertelement <16 x i32> %ret7, i32 %v8, i32 8 |
| %ret9 = insertelement <16 x i32> %ret8, i32 %v9, i32 9 |
| %ret10 = insertelement <16 x i32> %ret9, i32 %v10, i32 10 |
| %ret11 = insertelement <16 x i32> %ret10, i32 %v11, i32 11 |
| %ret12 = insertelement <16 x i32> %ret11, i32 %v12, i32 12 |
| %ret13 = insertelement <16 x i32> %ret12, i32 %v13, i32 13 |
| %ret14 = insertelement <16 x i32> %ret13, i32 %v14, i32 14 |
| %ret15 = insertelement <16 x i32> %ret14, i32 %v15, i32 15 |
| ret <16 x i32> %ret15 |
| } |
| |
| define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwind { |
| ; NOBW-LABEL: var_shuffle_v32i16: |
| ; NOBW: # %bb.0: |
| ; NOBW-NEXT: pushq %rbp |
| ; NOBW-NEXT: movq %rsp, %rbp |
| ; NOBW-NEXT: andq $-64, %rsp |
| ; NOBW-NEXT: subq $2112, %rsp # imm = 0x840 |
| ; NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4 |
| ; NOBW-NEXT: vmovd %xmm4, %eax |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, (%rsp) |
| ; NOBW-NEXT: movzwl 1472(%rsp,%rax,2), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm0 |
| ; NOBW-NEXT: vpextrw $1, %xmm4, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $1, 1408(%rsp,%rax,2), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrw $2, %xmm4, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $2, 1344(%rsp,%rax,2), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrw $3, %xmm4, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $3, 1280(%rsp,%rax,2), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrw $4, %xmm4, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $4, 1216(%rsp,%rax,2), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrw $5, %xmm4, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $5, 1152(%rsp,%rax,2), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrw $6, %xmm4, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $6, 1088(%rsp,%rax,2), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrw $7, %xmm4, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $7, 1024(%rsp,%rax,2), %xmm0, %xmm0 |
| ; NOBW-NEXT: vmovd %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: movzwl 1984(%rsp,%rax,2), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm1 |
| ; NOBW-NEXT: vpextrw $1, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $1, 1920(%rsp,%rax,2), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrw $2, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $2, 1856(%rsp,%rax,2), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrw $3, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $3, 1792(%rsp,%rax,2), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrw $4, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $4, 1728(%rsp,%rax,2), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrw $5, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $5, 1664(%rsp,%rax,2), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrw $6, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $6, 1600(%rsp,%rax,2), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrw $7, %xmm2, %eax |
| ; NOBW-NEXT: vextracti128 $1, %ymm3, %xmm2 |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $7, 1536(%rsp,%rax,2), %xmm1, %xmm1 |
| ; NOBW-NEXT: vmovd %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: movzwl 448(%rsp,%rax,2), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm4 |
| ; NOBW-NEXT: vpextrw $1, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $1, 384(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $2, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $2, 320(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $3, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $3, 256(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $4, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $4, 192(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $5, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $5, 128(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $6, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $6, 64(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $7, %xmm2, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm4, %xmm2 |
| ; NOBW-NEXT: vmovd %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: movzwl 960(%rsp,%rax,2), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm4 |
| ; NOBW-NEXT: vpextrw $1, %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $1, 896(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $2, %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $2, 832(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $3, %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $3, 768(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $4, %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $4, 704(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $5, %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $5, 640(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $6, %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $6, 576(%rsp,%rax,2), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrw $7, %xmm3, %eax |
| ; NOBW-NEXT: andl $31, %eax |
| ; NOBW-NEXT: vpinsrw $7, 512(%rsp,%rax,2), %xmm4, %xmm3 |
| ; NOBW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 |
| ; NOBW-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm1 |
| ; NOBW-NEXT: movq %rbp, %rsp |
| ; NOBW-NEXT: popq %rbp |
| ; NOBW-NEXT: retq |
| ; |
| ; AVX512BW-LABEL: var_shuffle_v32i16: |
| ; AVX512BW: # %bb.0: |
| ; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0 |
| ; AVX512BW-NEXT: retq |
| %index0 = extractelement <32 x i16> %indices, i32 0 |
| %index1 = extractelement <32 x i16> %indices, i32 1 |
| %index2 = extractelement <32 x i16> %indices, i32 2 |
| %index3 = extractelement <32 x i16> %indices, i32 3 |
| %index4 = extractelement <32 x i16> %indices, i32 4 |
| %index5 = extractelement <32 x i16> %indices, i32 5 |
| %index6 = extractelement <32 x i16> %indices, i32 6 |
| %index7 = extractelement <32 x i16> %indices, i32 7 |
| %index8 = extractelement <32 x i16> %indices, i32 8 |
| %index9 = extractelement <32 x i16> %indices, i32 9 |
| %index10 = extractelement <32 x i16> %indices, i32 10 |
| %index11 = extractelement <32 x i16> %indices, i32 11 |
| %index12 = extractelement <32 x i16> %indices, i32 12 |
| %index13 = extractelement <32 x i16> %indices, i32 13 |
| %index14 = extractelement <32 x i16> %indices, i32 14 |
| %index15 = extractelement <32 x i16> %indices, i32 15 |
| %index16 = extractelement <32 x i16> %indices, i32 16 |
| %index17 = extractelement <32 x i16> %indices, i32 17 |
| %index18 = extractelement <32 x i16> %indices, i32 18 |
| %index19 = extractelement <32 x i16> %indices, i32 19 |
| %index20 = extractelement <32 x i16> %indices, i32 20 |
| %index21 = extractelement <32 x i16> %indices, i32 21 |
| %index22 = extractelement <32 x i16> %indices, i32 22 |
| %index23 = extractelement <32 x i16> %indices, i32 23 |
| %index24 = extractelement <32 x i16> %indices, i32 24 |
| %index25 = extractelement <32 x i16> %indices, i32 25 |
| %index26 = extractelement <32 x i16> %indices, i32 26 |
| %index27 = extractelement <32 x i16> %indices, i32 27 |
| %index28 = extractelement <32 x i16> %indices, i32 28 |
| %index29 = extractelement <32 x i16> %indices, i32 29 |
| %index30 = extractelement <32 x i16> %indices, i32 30 |
| %index31 = extractelement <32 x i16> %indices, i32 31 |
| %v0 = extractelement <32 x i16> %v, i16 %index0 |
| %v1 = extractelement <32 x i16> %v, i16 %index1 |
| %v2 = extractelement <32 x i16> %v, i16 %index2 |
| %v3 = extractelement <32 x i16> %v, i16 %index3 |
| %v4 = extractelement <32 x i16> %v, i16 %index4 |
| %v5 = extractelement <32 x i16> %v, i16 %index5 |
| %v6 = extractelement <32 x i16> %v, i16 %index6 |
| %v7 = extractelement <32 x i16> %v, i16 %index7 |
| %v8 = extractelement <32 x i16> %v, i16 %index8 |
| %v9 = extractelement <32 x i16> %v, i16 %index9 |
| %v10 = extractelement <32 x i16> %v, i16 %index10 |
| %v11 = extractelement <32 x i16> %v, i16 %index11 |
| %v12 = extractelement <32 x i16> %v, i16 %index12 |
| %v13 = extractelement <32 x i16> %v, i16 %index13 |
| %v14 = extractelement <32 x i16> %v, i16 %index14 |
| %v15 = extractelement <32 x i16> %v, i16 %index15 |
| %v16 = extractelement <32 x i16> %v, i16 %index16 |
| %v17 = extractelement <32 x i16> %v, i16 %index17 |
| %v18 = extractelement <32 x i16> %v, i16 %index18 |
| %v19 = extractelement <32 x i16> %v, i16 %index19 |
| %v20 = extractelement <32 x i16> %v, i16 %index20 |
| %v21 = extractelement <32 x i16> %v, i16 %index21 |
| %v22 = extractelement <32 x i16> %v, i16 %index22 |
| %v23 = extractelement <32 x i16> %v, i16 %index23 |
| %v24 = extractelement <32 x i16> %v, i16 %index24 |
| %v25 = extractelement <32 x i16> %v, i16 %index25 |
| %v26 = extractelement <32 x i16> %v, i16 %index26 |
| %v27 = extractelement <32 x i16> %v, i16 %index27 |
| %v28 = extractelement <32 x i16> %v, i16 %index28 |
| %v29 = extractelement <32 x i16> %v, i16 %index29 |
| %v30 = extractelement <32 x i16> %v, i16 %index30 |
| %v31 = extractelement <32 x i16> %v, i16 %index31 |
| %ret0 = insertelement <32 x i16> undef, i16 %v0, i32 0 |
| %ret1 = insertelement <32 x i16> %ret0, i16 %v1, i32 1 |
| %ret2 = insertelement <32 x i16> %ret1, i16 %v2, i32 2 |
| %ret3 = insertelement <32 x i16> %ret2, i16 %v3, i32 3 |
| %ret4 = insertelement <32 x i16> %ret3, i16 %v4, i32 4 |
| %ret5 = insertelement <32 x i16> %ret4, i16 %v5, i32 5 |
| %ret6 = insertelement <32 x i16> %ret5, i16 %v6, i32 6 |
| %ret7 = insertelement <32 x i16> %ret6, i16 %v7, i32 7 |
| %ret8 = insertelement <32 x i16> %ret7, i16 %v8, i32 8 |
| %ret9 = insertelement <32 x i16> %ret8, i16 %v9, i32 9 |
| %ret10 = insertelement <32 x i16> %ret9, i16 %v10, i32 10 |
| %ret11 = insertelement <32 x i16> %ret10, i16 %v11, i32 11 |
| %ret12 = insertelement <32 x i16> %ret11, i16 %v12, i32 12 |
| %ret13 = insertelement <32 x i16> %ret12, i16 %v13, i32 13 |
| %ret14 = insertelement <32 x i16> %ret13, i16 %v14, i32 14 |
| %ret15 = insertelement <32 x i16> %ret14, i16 %v15, i32 15 |
| %ret16 = insertelement <32 x i16> %ret15, i16 %v16, i32 16 |
| %ret17 = insertelement <32 x i16> %ret16, i16 %v17, i32 17 |
| %ret18 = insertelement <32 x i16> %ret17, i16 %v18, i32 18 |
| %ret19 = insertelement <32 x i16> %ret18, i16 %v19, i32 19 |
| %ret20 = insertelement <32 x i16> %ret19, i16 %v20, i32 20 |
| %ret21 = insertelement <32 x i16> %ret20, i16 %v21, i32 21 |
| %ret22 = insertelement <32 x i16> %ret21, i16 %v22, i32 22 |
| %ret23 = insertelement <32 x i16> %ret22, i16 %v23, i32 23 |
| %ret24 = insertelement <32 x i16> %ret23, i16 %v24, i32 24 |
| %ret25 = insertelement <32 x i16> %ret24, i16 %v25, i32 25 |
| %ret26 = insertelement <32 x i16> %ret25, i16 %v26, i32 26 |
| %ret27 = insertelement <32 x i16> %ret26, i16 %v27, i32 27 |
| %ret28 = insertelement <32 x i16> %ret27, i16 %v28, i32 28 |
| %ret29 = insertelement <32 x i16> %ret28, i16 %v29, i32 29 |
| %ret30 = insertelement <32 x i16> %ret29, i16 %v30, i32 30 |
| %ret31 = insertelement <32 x i16> %ret30, i16 %v31, i32 31 |
| ret <32 x i16> %ret31 |
| } |
| |
| define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind { |
| ; NOBW-LABEL: var_shuffle_v64i8: |
| ; NOBW: # %bb.0: |
| ; NOBW-NEXT: pushq %rbp |
| ; NOBW-NEXT: movq %rsp, %rbp |
| ; NOBW-NEXT: andq $-64, %rsp |
| ; NOBW-NEXT: subq $4160, %rsp # imm = 0x1040 |
| ; NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4 |
| ; NOBW-NEXT: vpextrb $0, %xmm4, %eax |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) |
| ; NOBW-NEXT: vmovaps %ymm0, (%rsp) |
| ; NOBW-NEXT: movzbl 3008(%rsp,%rax), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm0 |
| ; NOBW-NEXT: vpextrb $1, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $1, 2944(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $2, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $2, 2880(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $3, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $3, 2816(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $4, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $4, 2752(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $5, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $5, 2688(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $6, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $6, 2624(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $7, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $7, 2560(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $8, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $8, 2496(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $9, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $9, 2432(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $10, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $10, 2368(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $11, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $11, 2304(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $12, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $12, 2240(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $13, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $13, 2176(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $14, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $14, 2112(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $15, %xmm4, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $15, 2048(%rsp,%rax), %xmm0, %xmm0 |
| ; NOBW-NEXT: vpextrb $0, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: movzbl 4032(%rsp,%rax), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm1 |
| ; NOBW-NEXT: vpextrb $1, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $1, 3968(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $2, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $2, 3904(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $3, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $3, 3840(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $4, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $4, 3776(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $5, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $5, 3712(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $6, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $6, 3648(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $7, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $7, 3584(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $8, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $8, 3520(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $9, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $9, 3456(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $10, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $10, 3392(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $11, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $11, 3328(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $12, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $12, 3264(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $13, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $13, 3200(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $14, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $14, 3136(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $15, %xmm2, %eax |
| ; NOBW-NEXT: vextracti128 $1, %ymm3, %xmm2 |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $15, 3072(%rsp,%rax), %xmm1, %xmm1 |
| ; NOBW-NEXT: vpextrb $0, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: movzbl 960(%rsp,%rax), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm4 |
| ; NOBW-NEXT: vpextrb $1, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $1, 896(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $2, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $2, 832(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $3, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $3, 768(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $4, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $4, 704(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $5, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $5, 640(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $6, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $6, 576(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $7, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $7, 512(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $8, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $8, 448(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $9, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $9, 384(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $10, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $10, 320(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $11, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $11, 256(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $12, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $12, 192(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $13, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $13, 128(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $14, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $14, 64(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $15, %xmm2, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $15, (%rsp,%rax), %xmm4, %xmm2 |
| ; NOBW-NEXT: vpextrb $0, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: movzbl 1984(%rsp,%rax), %eax |
| ; NOBW-NEXT: vmovd %eax, %xmm4 |
| ; NOBW-NEXT: vpextrb $1, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $1, 1920(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $2, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $2, 1856(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $3, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $3, 1792(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $4, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $4, 1728(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $5, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $5, 1664(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $6, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $6, 1600(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $7, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $7, 1536(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $8, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $8, 1472(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $9, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $9, 1408(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $10, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $10, 1344(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $11, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $11, 1280(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $12, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $12, 1216(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $13, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $13, 1152(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $14, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $14, 1088(%rsp,%rax), %xmm4, %xmm4 |
| ; NOBW-NEXT: vpextrb $15, %xmm3, %eax |
| ; NOBW-NEXT: andl $63, %eax |
| ; NOBW-NEXT: vpinsrb $15, 1024(%rsp,%rax), %xmm4, %xmm3 |
| ; NOBW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 |
| ; NOBW-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm1 |
| ; NOBW-NEXT: movq %rbp, %rsp |
| ; NOBW-NEXT: popq %rbp |
| ; NOBW-NEXT: retq |
| ; |
| ; VBMI-LABEL: var_shuffle_v64i8: |
| ; VBMI: # %bb.0: |
| ; VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm0 |
| ; VBMI-NEXT: retq |
| %index0 = extractelement <64 x i8> %indices, i32 0 |
| %index1 = extractelement <64 x i8> %indices, i32 1 |
| %index2 = extractelement <64 x i8> %indices, i32 2 |
| %index3 = extractelement <64 x i8> %indices, i32 3 |
| %index4 = extractelement <64 x i8> %indices, i32 4 |
| %index5 = extractelement <64 x i8> %indices, i32 5 |
| %index6 = extractelement <64 x i8> %indices, i32 6 |
| %index7 = extractelement <64 x i8> %indices, i32 7 |
| %index8 = extractelement <64 x i8> %indices, i32 8 |
| %index9 = extractelement <64 x i8> %indices, i32 9 |
| %index10 = extractelement <64 x i8> %indices, i32 10 |
| %index11 = extractelement <64 x i8> %indices, i32 11 |
| %index12 = extractelement <64 x i8> %indices, i32 12 |
| %index13 = extractelement <64 x i8> %indices, i32 13 |
| %index14 = extractelement <64 x i8> %indices, i32 14 |
| %index15 = extractelement <64 x i8> %indices, i32 15 |
| %index16 = extractelement <64 x i8> %indices, i32 16 |
| %index17 = extractelement <64 x i8> %indices, i32 17 |
| %index18 = extractelement <64 x i8> %indices, i32 18 |
| %index19 = extractelement <64 x i8> %indices, i32 19 |
| %index20 = extractelement <64 x i8> %indices, i32 20 |
| %index21 = extractelement <64 x i8> %indices, i32 21 |
| %index22 = extractelement <64 x i8> %indices, i32 22 |
| %index23 = extractelement <64 x i8> %indices, i32 23 |
| %index24 = extractelement <64 x i8> %indices, i32 24 |
| %index25 = extractelement <64 x i8> %indices, i32 25 |
| %index26 = extractelement <64 x i8> %indices, i32 26 |
| %index27 = extractelement <64 x i8> %indices, i32 27 |
| %index28 = extractelement <64 x i8> %indices, i32 28 |
| %index29 = extractelement <64 x i8> %indices, i32 29 |
| %index30 = extractelement <64 x i8> %indices, i32 30 |
| %index31 = extractelement <64 x i8> %indices, i32 31 |
| %index32 = extractelement <64 x i8> %indices, i32 32 |
| %index33 = extractelement <64 x i8> %indices, i32 33 |
| %index34 = extractelement <64 x i8> %indices, i32 34 |
| %index35 = extractelement <64 x i8> %indices, i32 35 |
| %index36 = extractelement <64 x i8> %indices, i32 36 |
| %index37 = extractelement <64 x i8> %indices, i32 37 |
| %index38 = extractelement <64 x i8> %indices, i32 38 |
| %index39 = extractelement <64 x i8> %indices, i32 39 |
| %index40 = extractelement <64 x i8> %indices, i32 40 |
| %index41 = extractelement <64 x i8> %indices, i32 41 |
| %index42 = extractelement <64 x i8> %indices, i32 42 |
| %index43 = extractelement <64 x i8> %indices, i32 43 |
| %index44 = extractelement <64 x i8> %indices, i32 44 |
| %index45 = extractelement <64 x i8> %indices, i32 45 |
| %index46 = extractelement <64 x i8> %indices, i32 46 |
| %index47 = extractelement <64 x i8> %indices, i32 47 |
| %index48 = extractelement <64 x i8> %indices, i32 48 |
| %index49 = extractelement <64 x i8> %indices, i32 49 |
| %index50 = extractelement <64 x i8> %indices, i32 50 |
| %index51 = extractelement <64 x i8> %indices, i32 51 |
| %index52 = extractelement <64 x i8> %indices, i32 52 |
| %index53 = extractelement <64 x i8> %indices, i32 53 |
| %index54 = extractelement <64 x i8> %indices, i32 54 |
| %index55 = extractelement <64 x i8> %indices, i32 55 |
| %index56 = extractelement <64 x i8> %indices, i32 56 |
| %index57 = extractelement <64 x i8> %indices, i32 57 |
| %index58 = extractelement <64 x i8> %indices, i32 58 |
| %index59 = extractelement <64 x i8> %indices, i32 59 |
| %index60 = extractelement <64 x i8> %indices, i32 60 |
| %index61 = extractelement <64 x i8> %indices, i32 61 |
| %index62 = extractelement <64 x i8> %indices, i32 62 |
| %index63 = extractelement <64 x i8> %indices, i32 63 |
| %v0 = extractelement <64 x i8> %v, i8 %index0 |
| %v1 = extractelement <64 x i8> %v, i8 %index1 |
| %v2 = extractelement <64 x i8> %v, i8 %index2 |
| %v3 = extractelement <64 x i8> %v, i8 %index3 |
| %v4 = extractelement <64 x i8> %v, i8 %index4 |
| %v5 = extractelement <64 x i8> %v, i8 %index5 |
| %v6 = extractelement <64 x i8> %v, i8 %index6 |
| %v7 = extractelement <64 x i8> %v, i8 %index7 |
| %v8 = extractelement <64 x i8> %v, i8 %index8 |
| %v9 = extractelement <64 x i8> %v, i8 %index9 |
| %v10 = extractelement <64 x i8> %v, i8 %index10 |
| %v11 = extractelement <64 x i8> %v, i8 %index11 |
| %v12 = extractelement <64 x i8> %v, i8 %index12 |
| %v13 = extractelement <64 x i8> %v, i8 %index13 |
| %v14 = extractelement <64 x i8> %v, i8 %index14 |
| %v15 = extractelement <64 x i8> %v, i8 %index15 |
| %v16 = extractelement <64 x i8> %v, i8 %index16 |
| %v17 = extractelement <64 x i8> %v, i8 %index17 |
| %v18 = extractelement <64 x i8> %v, i8 %index18 |
| %v19 = extractelement <64 x i8> %v, i8 %index19 |
| %v20 = extractelement <64 x i8> %v, i8 %index20 |
| %v21 = extractelement <64 x i8> %v, i8 %index21 |
| %v22 = extractelement <64 x i8> %v, i8 %index22 |
| %v23 = extractelement <64 x i8> %v, i8 %index23 |
| %v24 = extractelement <64 x i8> %v, i8 %index24 |
| %v25 = extractelement <64 x i8> %v, i8 %index25 |
| %v26 = extractelement <64 x i8> %v, i8 %index26 |
| %v27 = extractelement <64 x i8> %v, i8 %index27 |
| %v28 = extractelement <64 x i8> %v, i8 %index28 |
| %v29 = extractelement <64 x i8> %v, i8 %index29 |
| %v30 = extractelement <64 x i8> %v, i8 %index30 |
| %v31 = extractelement <64 x i8> %v, i8 %index31 |
| %v32 = extractelement <64 x i8> %v, i8 %index32 |
| %v33 = extractelement <64 x i8> %v, i8 %index33 |
| %v34 = extractelement <64 x i8> %v, i8 %index34 |
| %v35 = extractelement <64 x i8> %v, i8 %index35 |
| %v36 = extractelement <64 x i8> %v, i8 %index36 |
| %v37 = extractelement <64 x i8> %v, i8 %index37 |
| %v38 = extractelement <64 x i8> %v, i8 %index38 |
| %v39 = extractelement <64 x i8> %v, i8 %index39 |
| %v40 = extractelement <64 x i8> %v, i8 %index40 |
| %v41 = extractelement <64 x i8> %v, i8 %index41 |
| %v42 = extractelement <64 x i8> %v, i8 %index42 |
| %v43 = extractelement <64 x i8> %v, i8 %index43 |
| %v44 = extractelement <64 x i8> %v, i8 %index44 |
| %v45 = extractelement <64 x i8> %v, i8 %index45 |
| %v46 = extractelement <64 x i8> %v, i8 %index46 |
| %v47 = extractelement <64 x i8> %v, i8 %index47 |
| %v48 = extractelement <64 x i8> %v, i8 %index48 |
| %v49 = extractelement <64 x i8> %v, i8 %index49 |
| %v50 = extractelement <64 x i8> %v, i8 %index50 |
| %v51 = extractelement <64 x i8> %v, i8 %index51 |
| %v52 = extractelement <64 x i8> %v, i8 %index52 |
| %v53 = extractelement <64 x i8> %v, i8 %index53 |
| %v54 = extractelement <64 x i8> %v, i8 %index54 |
| %v55 = extractelement <64 x i8> %v, i8 %index55 |
| %v56 = extractelement <64 x i8> %v, i8 %index56 |
| %v57 = extractelement <64 x i8> %v, i8 %index57 |
| %v58 = extractelement <64 x i8> %v, i8 %index58 |
| %v59 = extractelement <64 x i8> %v, i8 %index59 |
| %v60 = extractelement <64 x i8> %v, i8 %index60 |
| %v61 = extractelement <64 x i8> %v, i8 %index61 |
| %v62 = extractelement <64 x i8> %v, i8 %index62 |
| %v63 = extractelement <64 x i8> %v, i8 %index63 |
| %ret0 = insertelement <64 x i8> undef, i8 %v0, i32 0 |
| %ret1 = insertelement <64 x i8> %ret0, i8 %v1, i32 1 |
| %ret2 = insertelement <64 x i8> %ret1, i8 %v2, i32 2 |
| %ret3 = insertelement <64 x i8> %ret2, i8 %v3, i32 3 |
| %ret4 = insertelement <64 x i8> %ret3, i8 %v4, i32 4 |
| %ret5 = insertelement <64 x i8> %ret4, i8 %v5, i32 5 |
| %ret6 = insertelement <64 x i8> %ret5, i8 %v6, i32 6 |
| %ret7 = insertelement <64 x i8> %ret6, i8 %v7, i32 7 |
| %ret8 = insertelement <64 x i8> %ret7, i8 %v8, i32 8 |
| %ret9 = insertelement <64 x i8> %ret8, i8 %v9, i32 9 |
| %ret10 = insertelement <64 x i8> %ret9, i8 %v10, i32 10 |
| %ret11 = insertelement <64 x i8> %ret10, i8 %v11, i32 11 |
| %ret12 = insertelement <64 x i8> %ret11, i8 %v12, i32 12 |
| %ret13 = insertelement <64 x i8> %ret12, i8 %v13, i32 13 |
| %ret14 = insertelement <64 x i8> %ret13, i8 %v14, i32 14 |
| %ret15 = insertelement <64 x i8> %ret14, i8 %v15, i32 15 |
| %ret16 = insertelement <64 x i8> %ret15, i8 %v16, i32 16 |
| %ret17 = insertelement <64 x i8> %ret16, i8 %v17, i32 17 |
| %ret18 = insertelement <64 x i8> %ret17, i8 %v18, i32 18 |
| %ret19 = insertelement <64 x i8> %ret18, i8 %v19, i32 19 |
| %ret20 = insertelement <64 x i8> %ret19, i8 %v20, i32 20 |
| %ret21 = insertelement <64 x i8> %ret20, i8 %v21, i32 21 |
| %ret22 = insertelement <64 x i8> %ret21, i8 %v22, i32 22 |
| %ret23 = insertelement <64 x i8> %ret22, i8 %v23, i32 23 |
| %ret24 = insertelement <64 x i8> %ret23, i8 %v24, i32 24 |
| %ret25 = insertelement <64 x i8> %ret24, i8 %v25, i32 25 |
| %ret26 = insertelement <64 x i8> %ret25, i8 %v26, i32 26 |
| %ret27 = insertelement <64 x i8> %ret26, i8 %v27, i32 27 |
| %ret28 = insertelement <64 x i8> %ret27, i8 %v28, i32 28 |
| %ret29 = insertelement <64 x i8> %ret28, i8 %v29, i32 29 |
| %ret30 = insertelement <64 x i8> %ret29, i8 %v30, i32 30 |
| %ret31 = insertelement <64 x i8> %ret30, i8 %v31, i32 31 |
| %ret32 = insertelement <64 x i8> %ret31, i8 %v32, i32 32 |
| %ret33 = insertelement <64 x i8> %ret32, i8 %v33, i32 33 |
| %ret34 = insertelement <64 x i8> %ret33, i8 %v34, i32 34 |
| %ret35 = insertelement <64 x i8> %ret34, i8 %v35, i32 35 |
| %ret36 = insertelement <64 x i8> %ret35, i8 %v36, i32 36 |
| %ret37 = insertelement <64 x i8> %ret36, i8 %v37, i32 37 |
| %ret38 = insertelement <64 x i8> %ret37, i8 %v38, i32 38 |
| %ret39 = insertelement <64 x i8> %ret38, i8 %v39, i32 39 |
| %ret40 = insertelement <64 x i8> %ret39, i8 %v40, i32 40 |
| %ret41 = insertelement <64 x i8> %ret40, i8 %v41, i32 41 |
| %ret42 = insertelement <64 x i8> %ret41, i8 %v42, i32 42 |
| %ret43 = insertelement <64 x i8> %ret42, i8 %v43, i32 43 |
| %ret44 = insertelement <64 x i8> %ret43, i8 %v44, i32 44 |
| %ret45 = insertelement <64 x i8> %ret44, i8 %v45, i32 45 |
| %ret46 = insertelement <64 x i8> %ret45, i8 %v46, i32 46 |
| %ret47 = insertelement <64 x i8> %ret46, i8 %v47, i32 47 |
| %ret48 = insertelement <64 x i8> %ret47, i8 %v48, i32 48 |
| %ret49 = insertelement <64 x i8> %ret48, i8 %v49, i32 49 |
| %ret50 = insertelement <64 x i8> %ret49, i8 %v50, i32 50 |
| %ret51 = insertelement <64 x i8> %ret50, i8 %v51, i32 51 |
| %ret52 = insertelement <64 x i8> %ret51, i8 %v52, i32 52 |
| %ret53 = insertelement <64 x i8> %ret52, i8 %v53, i32 53 |
| %ret54 = insertelement <64 x i8> %ret53, i8 %v54, i32 54 |
| %ret55 = insertelement <64 x i8> %ret54, i8 %v55, i32 55 |
| %ret56 = insertelement <64 x i8> %ret55, i8 %v56, i32 56 |
| %ret57 = insertelement <64 x i8> %ret56, i8 %v57, i32 57 |
| %ret58 = insertelement <64 x i8> %ret57, i8 %v58, i32 58 |
| %ret59 = insertelement <64 x i8> %ret58, i8 %v59, i32 59 |
| %ret60 = insertelement <64 x i8> %ret59, i8 %v60, i32 60 |
| %ret61 = insertelement <64 x i8> %ret60, i8 %v61, i32 61 |
| %ret62 = insertelement <64 x i8> %ret61, i8 %v62, i32 62 |
| %ret63 = insertelement <64 x i8> %ret62, i8 %v63, i32 63 |
| ret <64 x i8> %ret63 |
| } |
| |
| define <8 x double> @var_shuffle_v8f64(<8 x double> %v, <8 x i64> %indices) nounwind { |
| ; AVX512-LABEL: var_shuffle_v8f64: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0 |
| ; AVX512-NEXT: retq |
| %index0 = extractelement <8 x i64> %indices, i32 0 |
| %index1 = extractelement <8 x i64> %indices, i32 1 |
| %index2 = extractelement <8 x i64> %indices, i32 2 |
| %index3 = extractelement <8 x i64> %indices, i32 3 |
| %index4 = extractelement <8 x i64> %indices, i32 4 |
| %index5 = extractelement <8 x i64> %indices, i32 5 |
| %index6 = extractelement <8 x i64> %indices, i32 6 |
| %index7 = extractelement <8 x i64> %indices, i32 7 |
| %v0 = extractelement <8 x double> %v, i64 %index0 |
| %v1 = extractelement <8 x double> %v, i64 %index1 |
| %v2 = extractelement <8 x double> %v, i64 %index2 |
| %v3 = extractelement <8 x double> %v, i64 %index3 |
| %v4 = extractelement <8 x double> %v, i64 %index4 |
| %v5 = extractelement <8 x double> %v, i64 %index5 |
| %v6 = extractelement <8 x double> %v, i64 %index6 |
| %v7 = extractelement <8 x double> %v, i64 %index7 |
| %ret0 = insertelement <8 x double> undef, double %v0, i32 0 |
| %ret1 = insertelement <8 x double> %ret0, double %v1, i32 1 |
| %ret2 = insertelement <8 x double> %ret1, double %v2, i32 2 |
| %ret3 = insertelement <8 x double> %ret2, double %v3, i32 3 |
| %ret4 = insertelement <8 x double> %ret3, double %v4, i32 4 |
| %ret5 = insertelement <8 x double> %ret4, double %v5, i32 5 |
| %ret6 = insertelement <8 x double> %ret5, double %v6, i32 6 |
| %ret7 = insertelement <8 x double> %ret6, double %v7, i32 7 |
| ret <8 x double> %ret7 |
| } |
| |
| define <16 x float> @var_shuffle_v16f32(<16 x float> %v, <16 x i32> %indices) nounwind { |
| ; AVX512-LABEL: var_shuffle_v16f32: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0 |
| ; AVX512-NEXT: retq |
| %index0 = extractelement <16 x i32> %indices, i32 0 |
| %index1 = extractelement <16 x i32> %indices, i32 1 |
| %index2 = extractelement <16 x i32> %indices, i32 2 |
| %index3 = extractelement <16 x i32> %indices, i32 3 |
| %index4 = extractelement <16 x i32> %indices, i32 4 |
| %index5 = extractelement <16 x i32> %indices, i32 5 |
| %index6 = extractelement <16 x i32> %indices, i32 6 |
| %index7 = extractelement <16 x i32> %indices, i32 7 |
| %index8 = extractelement <16 x i32> %indices, i32 8 |
| %index9 = extractelement <16 x i32> %indices, i32 9 |
| %index10 = extractelement <16 x i32> %indices, i32 10 |
| %index11 = extractelement <16 x i32> %indices, i32 11 |
| %index12 = extractelement <16 x i32> %indices, i32 12 |
| %index13 = extractelement <16 x i32> %indices, i32 13 |
| %index14 = extractelement <16 x i32> %indices, i32 14 |
| %index15 = extractelement <16 x i32> %indices, i32 15 |
| %v0 = extractelement <16 x float> %v, i32 %index0 |
| %v1 = extractelement <16 x float> %v, i32 %index1 |
| %v2 = extractelement <16 x float> %v, i32 %index2 |
| %v3 = extractelement <16 x float> %v, i32 %index3 |
| %v4 = extractelement <16 x float> %v, i32 %index4 |
| %v5 = extractelement <16 x float> %v, i32 %index5 |
| %v6 = extractelement <16 x float> %v, i32 %index6 |
| %v7 = extractelement <16 x float> %v, i32 %index7 |
| %v8 = extractelement <16 x float> %v, i32 %index8 |
| %v9 = extractelement <16 x float> %v, i32 %index9 |
| %v10 = extractelement <16 x float> %v, i32 %index10 |
| %v11 = extractelement <16 x float> %v, i32 %index11 |
| %v12 = extractelement <16 x float> %v, i32 %index12 |
| %v13 = extractelement <16 x float> %v, i32 %index13 |
| %v14 = extractelement <16 x float> %v, i32 %index14 |
| %v15 = extractelement <16 x float> %v, i32 %index15 |
| %ret0 = insertelement <16 x float> undef, float %v0, i32 0 |
| %ret1 = insertelement <16 x float> %ret0, float %v1, i32 1 |
| %ret2 = insertelement <16 x float> %ret1, float %v2, i32 2 |
| %ret3 = insertelement <16 x float> %ret2, float %v3, i32 3 |
| %ret4 = insertelement <16 x float> %ret3, float %v4, i32 4 |
| %ret5 = insertelement <16 x float> %ret4, float %v5, i32 5 |
| %ret6 = insertelement <16 x float> %ret5, float %v6, i32 6 |
| %ret7 = insertelement <16 x float> %ret6, float %v7, i32 7 |
| %ret8 = insertelement <16 x float> %ret7, float %v8, i32 8 |
| %ret9 = insertelement <16 x float> %ret8, float %v9, i32 9 |
| %ret10 = insertelement <16 x float> %ret9, float %v10, i32 10 |
| %ret11 = insertelement <16 x float> %ret10, float %v11, i32 11 |
| %ret12 = insertelement <16 x float> %ret11, float %v12, i32 12 |
| %ret13 = insertelement <16 x float> %ret12, float %v13, i32 13 |
| %ret14 = insertelement <16 x float> %ret13, float %v14, i32 14 |
| %ret15 = insertelement <16 x float> %ret14, float %v15, i32 15 |
| ret <16 x float> %ret15 |
| } |