| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=i686-- -mattr=sse2 | FileCheck %s --check-prefixes=ANY,X32-SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=ANY,X64-AVX2 |
| |
| declare i8 @llvm.fshl.i8(i8, i8, i8) |
| declare i16 @llvm.fshl.i16(i16, i16, i16) |
| declare i32 @llvm.fshl.i32(i32, i32, i32) |
| declare i64 @llvm.fshl.i64(i64, i64, i64) |
| declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| |
| declare i8 @llvm.fshr.i8(i8, i8, i8) |
| declare i16 @llvm.fshr.i16(i16, i16, i16) |
| declare i32 @llvm.fshr.i32(i32, i32, i32) |
| declare i64 @llvm.fshr.i64(i64, i64, i64) |
| declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| |
| ; General case - all operands can be variables - x86 has shld, but that's not matched. |
| |
| define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) nounwind { |
| ; X32-SSE2-LABEL: fshl_i32: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: pushl %edi |
| ; X32-SSE2-NEXT: pushl %esi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-SSE2-NEXT: movl $32, %ecx |
| ; X32-SSE2-NEXT: subl %edx, %ecx |
| ; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X32-SSE2-NEXT: shrl %cl, %edi |
| ; X32-SSE2-NEXT: andl $31, %edx |
| ; X32-SSE2-NEXT: movl %esi, %eax |
| ; X32-SSE2-NEXT: movl %edx, %ecx |
| ; X32-SSE2-NEXT: shll %cl, %eax |
| ; X32-SSE2-NEXT: orl %edi, %eax |
| ; X32-SSE2-NEXT: testl %edx, %edx |
| ; X32-SSE2-NEXT: cmovel %esi, %eax |
| ; X32-SSE2-NEXT: popl %esi |
| ; X32-SSE2-NEXT: popl %edi |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshl_i32: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: movl $32, %ecx |
| ; X64-AVX2-NEXT: subl %edx, %ecx |
| ; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-AVX2-NEXT: shrl %cl, %esi |
| ; X64-AVX2-NEXT: andl $31, %edx |
| ; X64-AVX2-NEXT: movl %edi, %eax |
| ; X64-AVX2-NEXT: movl %edx, %ecx |
| ; X64-AVX2-NEXT: shll %cl, %eax |
| ; X64-AVX2-NEXT: orl %esi, %eax |
| ; X64-AVX2-NEXT: testl %edx, %edx |
| ; X64-AVX2-NEXT: cmovel %edi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %z) |
| ret i32 %f |
| } |
| |
| ; Verify that weird types are minimally supported. |
| declare i37 @llvm.fshl.i37(i37, i37, i37) |
| define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) nounwind { |
| ; X32-SSE2-LABEL: fshl_i37: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: pushl %ebp |
| ; X32-SSE2-NEXT: pushl %ebx |
| ; X32-SSE2-NEXT: pushl %edi |
| ; X32-SSE2-NEXT: pushl %esi |
| ; X32-SSE2-NEXT: subl $8, %esp |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-SSE2-NEXT: andl $31, %esi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: movl %eax, %ebp |
| ; X32-SSE2-NEXT: andl $31, %ebp |
| ; X32-SSE2-NEXT: movl $37, %ecx |
| ; X32-SSE2-NEXT: subl %ebx, %ecx |
| ; X32-SSE2-NEXT: movl $0, %edx |
| ; X32-SSE2-NEXT: sbbl %eax, %edx |
| ; X32-SSE2-NEXT: andl $31, %edx |
| ; X32-SSE2-NEXT: pushl $0 |
| ; X32-SSE2-NEXT: pushl $37 |
| ; X32-SSE2-NEXT: pushl %edx |
| ; X32-SSE2-NEXT: pushl %ecx |
| ; X32-SSE2-NEXT: calll __umoddi3 |
| ; X32-SSE2-NEXT: addl $16, %esp |
| ; X32-SSE2-NEXT: movl %eax, (%esp) # 4-byte Spill |
| ; X32-SSE2-NEXT: movl %eax, %ecx |
| ; X32-SSE2-NEXT: shrdl %cl, %esi, %edi |
| ; X32-SSE2-NEXT: pushl $0 |
| ; X32-SSE2-NEXT: pushl $37 |
| ; X32-SSE2-NEXT: pushl %ebp |
| ; X32-SSE2-NEXT: pushl %ebx |
| ; X32-SSE2-NEXT: calll __umoddi3 |
| ; X32-SSE2-NEXT: addl $16, %esp |
| ; X32-SSE2-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-SSE2-NEXT: movl %edx, %ebp |
| ; X32-SSE2-NEXT: movl %eax, %ecx |
| ; X32-SSE2-NEXT: shll %cl, %ebp |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X32-SSE2-NEXT: shldl %cl, %edx, %ebx |
| ; X32-SSE2-NEXT: testb $32, %al |
| ; X32-SSE2-NEXT: cmovnel %ebp, %ebx |
| ; X32-SSE2-NEXT: movl $0, %edx |
| ; X32-SSE2-NEXT: cmovnel %edx, %ebp |
| ; X32-SSE2-NEXT: movl (%esp), %ecx # 4-byte Reload |
| ; X32-SSE2-NEXT: shrl %cl, %esi |
| ; X32-SSE2-NEXT: testb $32, %cl |
| ; X32-SSE2-NEXT: cmovnel %esi, %edi |
| ; X32-SSE2-NEXT: cmovnel %edx, %esi |
| ; X32-SSE2-NEXT: orl %ebx, %esi |
| ; X32-SSE2-NEXT: orl %ebp, %edi |
| ; X32-SSE2-NEXT: orl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill |
| ; X32-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %edi |
| ; X32-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %esi |
| ; X32-SSE2-NEXT: movl %edi, %eax |
| ; X32-SSE2-NEXT: movl %esi, %edx |
| ; X32-SSE2-NEXT: addl $8, %esp |
| ; X32-SSE2-NEXT: popl %esi |
| ; X32-SSE2-NEXT: popl %edi |
| ; X32-SSE2-NEXT: popl %ebx |
| ; X32-SSE2-NEXT: popl %ebp |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshl_i37: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: pushq %rbx |
| ; X64-AVX2-NEXT: movq %rdx, %r10 |
| ; X64-AVX2-NEXT: movabsq $137438953471, %r8 # imm = 0x1FFFFFFFFF |
| ; X64-AVX2-NEXT: andq %r8, %rsi |
| ; X64-AVX2-NEXT: movl $37, %r9d |
| ; X64-AVX2-NEXT: subq %rdx, %r9 |
| ; X64-AVX2-NEXT: andq %r8, %r10 |
| ; X64-AVX2-NEXT: movabsq $-2492803253203993461, %r11 # imm = 0xDD67C8A60DD67C8B |
| ; X64-AVX2-NEXT: movq %r10, %rax |
| ; X64-AVX2-NEXT: mulq %r11 |
| ; X64-AVX2-NEXT: shrq $5, %rdx |
| ; X64-AVX2-NEXT: leaq (%rdx,%rdx,8), %rax |
| ; X64-AVX2-NEXT: leaq (%rdx,%rax,4), %rax |
| ; X64-AVX2-NEXT: subq %rax, %r10 |
| ; X64-AVX2-NEXT: movq %rdi, %rbx |
| ; X64-AVX2-NEXT: movl %r10d, %ecx |
| ; X64-AVX2-NEXT: shlq %cl, %rbx |
| ; X64-AVX2-NEXT: andq %r9, %r8 |
| ; X64-AVX2-NEXT: movq %r8, %rax |
| ; X64-AVX2-NEXT: mulq %r11 |
| ; X64-AVX2-NEXT: shrq $5, %rdx |
| ; X64-AVX2-NEXT: leaq (%rdx,%rdx,8), %rax |
| ; X64-AVX2-NEXT: leal (%rdx,%rax,4), %eax |
| ; X64-AVX2-NEXT: subl %eax, %r9d |
| ; X64-AVX2-NEXT: movl %r9d, %ecx |
| ; X64-AVX2-NEXT: shrq %cl, %rsi |
| ; X64-AVX2-NEXT: orq %rbx, %rsi |
| ; X64-AVX2-NEXT: testq %r10, %r10 |
| ; X64-AVX2-NEXT: cmoveq %rdi, %rsi |
| ; X64-AVX2-NEXT: movq %rsi, %rax |
| ; X64-AVX2-NEXT: popq %rbx |
| ; X64-AVX2-NEXT: retq |
| %f = call i37 @llvm.fshl.i37(i37 %x, i37 %y, i37 %z) |
| ret i37 %f |
| } |
| |
| ; extract(concat(0b1110000, 0b1111111) << 2) = 0b1000011 |
| |
| declare i7 @llvm.fshl.i7(i7, i7, i7) |
| define i7 @fshl_i7_const_fold() { |
| ; ANY-LABEL: fshl_i7_const_fold: |
| ; ANY: # %bb.0: |
| ; ANY-NEXT: movb $67, %al |
| ; ANY-NEXT: ret{{[l|q]}} |
| %f = call i7 @llvm.fshl.i7(i7 112, i7 127, i7 2) |
| ret i7 %f |
| } |
| |
| ; With constant shift amount, this is 'shld' with constant operand. |
| |
| define i32 @fshl_i32_const_shift(i32 %x, i32 %y) nounwind { |
| ; X32-SSE2-LABEL: fshl_i32_const_shift: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: shldl $9, %ecx, %eax |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshl_i32_const_shift: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: shldl $9, %esi, %edi |
| ; X64-AVX2-NEXT: movl %edi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 9) |
| ret i32 %f |
| } |
| |
| ; Check modulo math on shift amount. |
| |
| define i32 @fshl_i32_const_overshift(i32 %x, i32 %y) nounwind { |
| ; X32-SSE2-LABEL: fshl_i32_const_overshift: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: shldl $9, %ecx, %eax |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshl_i32_const_overshift: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: shldl $9, %esi, %edi |
| ; X64-AVX2-NEXT: movl %edi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 41) |
| ret i32 %f |
| } |
| |
| ; 64-bit should also work. |
| |
| define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) nounwind { |
| ; X32-SSE2-LABEL: fshl_i64_const_overshift: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-SSE2-NEXT: shldl $9, %ecx, %edx |
| ; X32-SSE2-NEXT: shrdl $23, %ecx, %eax |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshl_i64_const_overshift: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: shldq $41, %rsi, %rdi |
| ; X64-AVX2-NEXT: movq %rdi, %rax |
| ; X64-AVX2-NEXT: retq |
| %f = call i64 @llvm.fshl.i64(i64 %x, i64 %y, i64 105) |
| ret i64 %f |
| } |
| |
| ; This should work without any node-specific logic. |
| |
| define i8 @fshl_i8_const_fold() nounwind { |
| ; ANY-LABEL: fshl_i8_const_fold: |
| ; ANY: # %bb.0: |
| ; ANY-NEXT: movb $-128, %al |
| ; ANY-NEXT: ret{{[l|q]}} |
| %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 7) |
| ret i8 %f |
| } |
| |
| ; Repeat everything for funnel shift right. |
| |
| ; General case - all operands can be variables - x86 has 'shrd', but this doesn't match. |
| |
| define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) nounwind { |
| ; X32-SSE2-LABEL: fshr_i32: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: pushl %ebx |
| ; X32-SSE2-NEXT: pushl %edi |
| ; X32-SSE2-NEXT: pushl %esi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-SSE2-NEXT: movl $32, %ebx |
| ; X32-SSE2-NEXT: subl %edx, %ebx |
| ; X32-SSE2-NEXT: andl $31, %edx |
| ; X32-SSE2-NEXT: movl %esi, %edi |
| ; X32-SSE2-NEXT: movl %edx, %ecx |
| ; X32-SSE2-NEXT: shrl %cl, %edi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: movl %ebx, %ecx |
| ; X32-SSE2-NEXT: shll %cl, %eax |
| ; X32-SSE2-NEXT: orl %edi, %eax |
| ; X32-SSE2-NEXT: testl %edx, %edx |
| ; X32-SSE2-NEXT: cmovel %esi, %eax |
| ; X32-SSE2-NEXT: popl %esi |
| ; X32-SSE2-NEXT: popl %edi |
| ; X32-SSE2-NEXT: popl %ebx |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshr_i32: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: movl $32, %r8d |
| ; X64-AVX2-NEXT: subl %edx, %r8d |
| ; X64-AVX2-NEXT: andl $31, %edx |
| ; X64-AVX2-NEXT: movl %esi, %eax |
| ; X64-AVX2-NEXT: movl %edx, %ecx |
| ; X64-AVX2-NEXT: shrl %cl, %eax |
| ; X64-AVX2-NEXT: movl %r8d, %ecx |
| ; X64-AVX2-NEXT: shll %cl, %edi |
| ; X64-AVX2-NEXT: orl %eax, %edi |
| ; X64-AVX2-NEXT: testl %edx, %edx |
| ; X64-AVX2-NEXT: cmovel %esi, %edi |
| ; X64-AVX2-NEXT: movl %edi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z) |
| ret i32 %f |
| } |
| |
| ; Verify that weird types are minimally supported. |
| declare i37 @llvm.fshr.i37(i37, i37, i37) |
| define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) nounwind { |
| ; X32-SSE2-LABEL: fshr_i37: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: pushl %ebp |
| ; X32-SSE2-NEXT: pushl %ebx |
| ; X32-SSE2-NEXT: pushl %edi |
| ; X32-SSE2-NEXT: pushl %esi |
| ; X32-SSE2-NEXT: pushl %eax |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X32-SSE2-NEXT: andl $31, %esi |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: andl $31, %eax |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X32-SSE2-NEXT: pushl $0 |
| ; X32-SSE2-NEXT: pushl $37 |
| ; X32-SSE2-NEXT: pushl %eax |
| ; X32-SSE2-NEXT: pushl %ebp |
| ; X32-SSE2-NEXT: calll __umoddi3 |
| ; X32-SSE2-NEXT: addl $16, %esp |
| ; X32-SSE2-NEXT: movl %eax, %ebx |
| ; X32-SSE2-NEXT: movl %edx, (%esp) # 4-byte Spill |
| ; X32-SSE2-NEXT: movl $37, %eax |
| ; X32-SSE2-NEXT: subl %ebp, %eax |
| ; X32-SSE2-NEXT: movl $0, %edx |
| ; X32-SSE2-NEXT: sbbl {{[0-9]+}}(%esp), %edx |
| ; X32-SSE2-NEXT: andl $31, %edx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X32-SSE2-NEXT: movl %ebx, %ecx |
| ; X32-SSE2-NEXT: shrdl %cl, %esi, %ebp |
| ; X32-SSE2-NEXT: pushl $0 |
| ; X32-SSE2-NEXT: pushl $37 |
| ; X32-SSE2-NEXT: pushl %edx |
| ; X32-SSE2-NEXT: pushl %eax |
| ; X32-SSE2-NEXT: calll __umoddi3 |
| ; X32-SSE2-NEXT: addl $16, %esp |
| ; X32-SSE2-NEXT: movl %eax, %ecx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-SSE2-NEXT: movl %edx, %eax |
| ; X32-SSE2-NEXT: shll %cl, %eax |
| ; X32-SSE2-NEXT: shldl %cl, %edx, %edi |
| ; X32-SSE2-NEXT: testb $32, %cl |
| ; X32-SSE2-NEXT: cmovnel %eax, %edi |
| ; X32-SSE2-NEXT: movl $0, %edx |
| ; X32-SSE2-NEXT: cmovnel %edx, %eax |
| ; X32-SSE2-NEXT: movl %ebx, %ecx |
| ; X32-SSE2-NEXT: shrl %cl, %esi |
| ; X32-SSE2-NEXT: testb $32, %bl |
| ; X32-SSE2-NEXT: cmovnel %esi, %ebp |
| ; X32-SSE2-NEXT: cmovnel %edx, %esi |
| ; X32-SSE2-NEXT: orl %edi, %esi |
| ; X32-SSE2-NEXT: orl %eax, %ebp |
| ; X32-SSE2-NEXT: orl %ebx, (%esp) # 4-byte Folded Spill |
| ; X32-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %ebp |
| ; X32-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %esi |
| ; X32-SSE2-NEXT: movl %ebp, %eax |
| ; X32-SSE2-NEXT: movl %esi, %edx |
| ; X32-SSE2-NEXT: addl $4, %esp |
| ; X32-SSE2-NEXT: popl %esi |
| ; X32-SSE2-NEXT: popl %edi |
| ; X32-SSE2-NEXT: popl %ebx |
| ; X32-SSE2-NEXT: popl %ebp |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshr_i37: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: pushq %rbx |
| ; X64-AVX2-NEXT: movq %rdx, %r10 |
| ; X64-AVX2-NEXT: movabsq $137438953471, %r8 # imm = 0x1FFFFFFFFF |
| ; X64-AVX2-NEXT: movq %rsi, %r11 |
| ; X64-AVX2-NEXT: andq %r8, %r11 |
| ; X64-AVX2-NEXT: movl $37, %r9d |
| ; X64-AVX2-NEXT: subq %rdx, %r9 |
| ; X64-AVX2-NEXT: andq %r8, %r10 |
| ; X64-AVX2-NEXT: movabsq $-2492803253203993461, %rbx # imm = 0xDD67C8A60DD67C8B |
| ; X64-AVX2-NEXT: movq %r10, %rax |
| ; X64-AVX2-NEXT: mulq %rbx |
| ; X64-AVX2-NEXT: shrq $5, %rdx |
| ; X64-AVX2-NEXT: leaq (%rdx,%rdx,8), %rax |
| ; X64-AVX2-NEXT: leaq (%rdx,%rax,4), %rax |
| ; X64-AVX2-NEXT: subq %rax, %r10 |
| ; X64-AVX2-NEXT: movl %r10d, %ecx |
| ; X64-AVX2-NEXT: shrq %cl, %r11 |
| ; X64-AVX2-NEXT: andq %r9, %r8 |
| ; X64-AVX2-NEXT: movq %r8, %rax |
| ; X64-AVX2-NEXT: mulq %rbx |
| ; X64-AVX2-NEXT: shrq $5, %rdx |
| ; X64-AVX2-NEXT: leaq (%rdx,%rdx,8), %rax |
| ; X64-AVX2-NEXT: leal (%rdx,%rax,4), %eax |
| ; X64-AVX2-NEXT: subl %eax, %r9d |
| ; X64-AVX2-NEXT: movl %r9d, %ecx |
| ; X64-AVX2-NEXT: shlq %cl, %rdi |
| ; X64-AVX2-NEXT: orq %r11, %rdi |
| ; X64-AVX2-NEXT: testq %r10, %r10 |
| ; X64-AVX2-NEXT: cmoveq %rsi, %rdi |
| ; X64-AVX2-NEXT: movq %rdi, %rax |
| ; X64-AVX2-NEXT: popq %rbx |
| ; X64-AVX2-NEXT: retq |
| %f = call i37 @llvm.fshr.i37(i37 %x, i37 %y, i37 %z) |
| ret i37 %f |
| } |
| |
| ; extract(concat(0b1110000, 0b1111111) >> 2) = 0b0011111 |
| |
| declare i7 @llvm.fshr.i7(i7, i7, i7) |
| define i7 @fshr_i7_const_fold() nounwind { |
| ; ANY-LABEL: fshr_i7_const_fold: |
| ; ANY: # %bb.0: |
| ; ANY-NEXT: movb $31, %al |
| ; ANY-NEXT: ret{{[l|q]}} |
| %f = call i7 @llvm.fshr.i7(i7 112, i7 127, i7 2) |
| ret i7 %f |
| } |
| |
| ; With constant shift amount, this is 'shrd' or 'shld'. |
| |
| define i32 @fshr_i32_const_shift(i32 %x, i32 %y) nounwind { |
| ; X32-SSE2-LABEL: fshr_i32_const_shift: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: shldl $23, %ecx, %eax |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshr_i32_const_shift: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: shldl $23, %esi, %edi |
| ; X64-AVX2-NEXT: movl %edi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 9) |
| ret i32 %f |
| } |
| |
| ; Check modulo math on shift amount. 41-32=9, but right-shift became left, so 32-9=23. |
| |
| define i32 @fshr_i32_const_overshift(i32 %x, i32 %y) nounwind { |
| ; X32-SSE2-LABEL: fshr_i32_const_overshift: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: shldl $23, %ecx, %eax |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshr_i32_const_overshift: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: shldl $23, %esi, %edi |
| ; X64-AVX2-NEXT: movl %edi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 41) |
| ret i32 %f |
| } |
| |
| ; 64-bit should also work. 105-64 = 41, but right-shift became left, so 64-41=23. |
| |
| define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) nounwind { |
| ; X32-SSE2-LABEL: fshr_i64_const_overshift: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X32-SSE2-NEXT: shrdl $9, %ecx, %eax |
| ; X32-SSE2-NEXT: shldl $23, %ecx, %edx |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshr_i64_const_overshift: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: shldq $23, %rsi, %rdi |
| ; X64-AVX2-NEXT: movq %rdi, %rax |
| ; X64-AVX2-NEXT: retq |
| %f = call i64 @llvm.fshr.i64(i64 %x, i64 %y, i64 105) |
| ret i64 %f |
| } |
| |
| ; This should work without any node-specific logic. |
| |
| define i8 @fshr_i8_const_fold() nounwind { |
| ; ANY-LABEL: fshr_i8_const_fold: |
| ; ANY: # %bb.0: |
| ; ANY-NEXT: movb $-2, %al |
| ; ANY-NEXT: ret{{[l|q]}} |
| %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 7) |
| ret i8 %f |
| } |
| |
| define i32 @fshl_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind { |
| ; X32-SSE2-LABEL: fshl_i32_shift_by_bitwidth: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshl_i32_shift_by_bitwidth: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: movl %edi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 32) |
| ret i32 %f |
| } |
| |
| define i32 @fshr_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind { |
| ; X32-SSE2-LABEL: fshr_i32_shift_by_bitwidth: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshr_i32_shift_by_bitwidth: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: movl %esi, %eax |
| ; X64-AVX2-NEXT: retq |
| %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 32) |
| ret i32 %f |
| } |
| |
| define <4 x i32> @fshl_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) nounwind { |
| ; ANY-LABEL: fshl_v4i32_shift_by_bitwidth: |
| ; ANY: # %bb.0: |
| ; ANY-NEXT: ret{{[l|q]}} |
| %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>) |
| ret <4 x i32> %f |
| } |
| |
| define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) nounwind { |
| ; X32-SSE2-LABEL: fshr_v4i32_shift_by_bitwidth: |
| ; X32-SSE2: # %bb.0: |
| ; X32-SSE2-NEXT: movaps %xmm1, %xmm0 |
| ; X32-SSE2-NEXT: retl |
| ; |
| ; X64-AVX2-LABEL: fshr_v4i32_shift_by_bitwidth: |
| ; X64-AVX2: # %bb.0: |
| ; X64-AVX2-NEXT: vmovaps %xmm1, %xmm0 |
| ; X64-AVX2-NEXT: retq |
| %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>) |
| ret <4 x i32> %f |
| } |
| |