| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections | FileCheck %s --check-prefix=X64 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections -mattr=+retpoline | FileCheck %s --check-prefix=X64-RETPOLINE |
| ; |
| ; FIXME: Add support for 32-bit. |
| |
| @global_fnptr = external global i32 ()* |
| |
| @global_blockaddrs = constant [4 x i8*] [ |
| i8* blockaddress(@test_indirectbr_global, %bb0), |
| i8* blockaddress(@test_indirectbr_global, %bb1), |
| i8* blockaddress(@test_indirectbr_global, %bb2), |
| i8* blockaddress(@test_indirectbr_global, %bb3) |
| ] |
| |
| define i32 @test_indirect_call(i32 ()** %ptr) nounwind { |
| ; X64-LABEL: test_indirect_call: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movq %rsp, %rax |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: sarq $63, %rax |
| ; X64-NEXT: movq (%rdi), %rcx |
| ; X64-NEXT: orq %rax, %rcx |
| ; X64-NEXT: shlq $47, %rax |
| ; X64-NEXT: orq %rax, %rsp |
| ; X64-NEXT: callq *%rcx |
| ; X64-NEXT: movq %rsp, %rcx |
| ; X64-NEXT: sarq $63, %rcx |
| ; X64-NEXT: shlq $47, %rcx |
| ; X64-NEXT: orq %rcx, %rsp |
| ; X64-NEXT: popq %rcx |
| ; X64-NEXT: retq |
| ; |
| ; X64-RETPOLINE-LABEL: test_indirect_call: |
| ; X64-RETPOLINE: # %bb.0: # %entry |
| ; X64-RETPOLINE-NEXT: pushq %rax |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rax |
| ; X64-RETPOLINE-NEXT: movq $-1, %rcx |
| ; X64-RETPOLINE-NEXT: sarq $63, %rax |
| ; X64-RETPOLINE-NEXT: movq (%rdi), %r11 |
| ; X64-RETPOLINE-NEXT: orq %rax, %r11 |
| ; X64-RETPOLINE-NEXT: shlq $47, %rax |
| ; X64-RETPOLINE-NEXT: orq %rax, %rsp |
| ; X64-RETPOLINE-NEXT: callq __llvm_retpoline_r11 |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rcx |
| ; X64-RETPOLINE-NEXT: sarq $63, %rcx |
| ; X64-RETPOLINE-NEXT: shlq $47, %rcx |
| ; X64-RETPOLINE-NEXT: orq %rcx, %rsp |
| ; X64-RETPOLINE-NEXT: popq %rcx |
| ; X64-RETPOLINE-NEXT: retq |
| entry: |
| %fp = load i32 ()*, i32 ()** %ptr |
| %v = call i32 %fp() |
| ret i32 %v |
| } |
| |
| define i32 @test_indirect_tail_call(i32 ()** %ptr) nounwind { |
| ; X64-LABEL: test_indirect_tail_call: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: movq %rsp, %rax |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: sarq $63, %rax |
| ; X64-NEXT: movq (%rdi), %rcx |
| ; X64-NEXT: orq %rax, %rcx |
| ; X64-NEXT: shlq $47, %rax |
| ; X64-NEXT: orq %rax, %rsp |
| ; X64-NEXT: jmpq *%rcx # TAILCALL |
| ; |
| ; X64-RETPOLINE-LABEL: test_indirect_tail_call: |
| ; X64-RETPOLINE: # %bb.0: # %entry |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rax |
| ; X64-RETPOLINE-NEXT: movq $-1, %rcx |
| ; X64-RETPOLINE-NEXT: sarq $63, %rax |
| ; X64-RETPOLINE-NEXT: movq (%rdi), %r11 |
| ; X64-RETPOLINE-NEXT: orq %rax, %r11 |
| ; X64-RETPOLINE-NEXT: shlq $47, %rax |
| ; X64-RETPOLINE-NEXT: orq %rax, %rsp |
| ; X64-RETPOLINE-NEXT: jmp __llvm_retpoline_r11 # TAILCALL |
| entry: |
| %fp = load i32 ()*, i32 ()** %ptr |
| %v = tail call i32 %fp() |
| ret i32 %v |
| } |
| |
| define i32 @test_indirect_call_global() nounwind { |
| ; X64-LABEL: test_indirect_call_global: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movq %rsp, %rax |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: sarq $63, %rax |
| ; X64-NEXT: movq {{.*}}(%rip), %rcx |
| ; X64-NEXT: orq %rax, %rcx |
| ; X64-NEXT: shlq $47, %rax |
| ; X64-NEXT: orq %rax, %rsp |
| ; X64-NEXT: callq *%rcx |
| ; X64-NEXT: movq %rsp, %rcx |
| ; X64-NEXT: sarq $63, %rcx |
| ; X64-NEXT: shlq $47, %rcx |
| ; X64-NEXT: orq %rcx, %rsp |
| ; X64-NEXT: popq %rcx |
| ; X64-NEXT: retq |
| ; |
| ; X64-RETPOLINE-LABEL: test_indirect_call_global: |
| ; X64-RETPOLINE: # %bb.0: # %entry |
| ; X64-RETPOLINE-NEXT: pushq %rax |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rax |
| ; X64-RETPOLINE-NEXT: movq $-1, %rcx |
| ; X64-RETPOLINE-NEXT: sarq $63, %rax |
| ; X64-RETPOLINE-NEXT: movq {{.*}}(%rip), %r11 |
| ; X64-RETPOLINE-NEXT: shlq $47, %rax |
| ; X64-RETPOLINE-NEXT: orq %rax, %rsp |
| ; X64-RETPOLINE-NEXT: callq __llvm_retpoline_r11 |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rcx |
| ; X64-RETPOLINE-NEXT: sarq $63, %rcx |
| ; X64-RETPOLINE-NEXT: shlq $47, %rcx |
| ; X64-RETPOLINE-NEXT: orq %rcx, %rsp |
| ; X64-RETPOLINE-NEXT: popq %rcx |
| ; X64-RETPOLINE-NEXT: retq |
| entry: |
| %fp = load i32 ()*, i32 ()** @global_fnptr |
| %v = call i32 %fp() |
| ret i32 %v |
| } |
| |
| define i32 @test_indirect_tail_call_global() nounwind { |
| ; X64-LABEL: test_indirect_tail_call_global: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: movq %rsp, %rax |
| ; X64-NEXT: movq $-1, %rcx |
| ; X64-NEXT: sarq $63, %rax |
| ; X64-NEXT: movq {{.*}}(%rip), %rcx |
| ; X64-NEXT: orq %rax, %rcx |
| ; X64-NEXT: shlq $47, %rax |
| ; X64-NEXT: orq %rax, %rsp |
| ; X64-NEXT: jmpq *%rcx # TAILCALL |
| ; |
| ; X64-RETPOLINE-LABEL: test_indirect_tail_call_global: |
| ; X64-RETPOLINE: # %bb.0: # %entry |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rax |
| ; X64-RETPOLINE-NEXT: movq $-1, %rcx |
| ; X64-RETPOLINE-NEXT: sarq $63, %rax |
| ; X64-RETPOLINE-NEXT: movq {{.*}}(%rip), %r11 |
| ; X64-RETPOLINE-NEXT: shlq $47, %rax |
| ; X64-RETPOLINE-NEXT: orq %rax, %rsp |
| ; X64-RETPOLINE-NEXT: jmp __llvm_retpoline_r11 # TAILCALL |
| entry: |
| %fp = load i32 ()*, i32 ()** @global_fnptr |
| %v = tail call i32 %fp() |
| ret i32 %v |
| } |
| |
| define i32 @test_indirectbr(i8** %ptr) nounwind { |
| ; X64-LABEL: test_indirectbr: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: movq %rsp, %rcx |
| ; X64-NEXT: movq $-1, %rax |
| ; X64-NEXT: sarq $63, %rcx |
| ; X64-NEXT: movq (%rdi), %rax |
| ; X64-NEXT: orq %rcx, %rax |
| ; X64-NEXT: jmpq *%rax |
| ; X64-NEXT: .LBB4_1: # %bb0 |
| ; X64-NEXT: movl $2, %eax |
| ; X64-NEXT: jmp .LBB4_2 |
| ; X64-NEXT: .LBB4_4: # %bb2 |
| ; X64-NEXT: movl $13, %eax |
| ; X64-NEXT: jmp .LBB4_2 |
| ; X64-NEXT: .LBB4_5: # %bb3 |
| ; X64-NEXT: movl $42, %eax |
| ; X64-NEXT: jmp .LBB4_2 |
| ; X64-NEXT: .LBB4_3: # %bb1 |
| ; X64-NEXT: movl $7, %eax |
| ; X64-NEXT: .LBB4_2: # %bb0 |
| ; X64-NEXT: shlq $47, %rcx |
| ; X64-NEXT: orq %rcx, %rsp |
| ; X64-NEXT: retq |
| ; |
| ; X64-RETPOLINE-LABEL: test_indirectbr: |
| ; X64-RETPOLINE: # %bb.0: # %entry |
| entry: |
| %a = load i8*, i8** %ptr |
| indirectbr i8* %a, [ label %bb0, label %bb1, label %bb2, label %bb3 ] |
| |
| bb0: |
| ret i32 2 |
| |
| bb1: |
| ret i32 7 |
| |
| bb2: |
| ret i32 13 |
| |
| bb3: |
| ret i32 42 |
| } |
| |
| define i32 @test_indirectbr_global(i32 %idx) nounwind { |
| ; X64-LABEL: test_indirectbr_global: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: movq %rsp, %rcx |
| ; X64-NEXT: movq $-1, %rax |
| ; X64-NEXT: sarq $63, %rcx |
| ; X64-NEXT: movslq %edi, %rax |
| ; X64-NEXT: movq global_blockaddrs(,%rax,8), %rax |
| ; X64-NEXT: orq %rcx, %rax |
| ; X64-NEXT: jmpq *%rax |
| ; X64-NEXT: .Ltmp0: # Block address taken |
| ; X64-NEXT: .LBB5_1: # %bb0 |
| ; X64-NEXT: movl $2, %eax |
| ; X64-NEXT: jmp .LBB5_2 |
| ; X64-NEXT: .Ltmp1: # Block address taken |
| ; X64-NEXT: .LBB5_4: # %bb2 |
| ; X64-NEXT: movl $13, %eax |
| ; X64-NEXT: jmp .LBB5_2 |
| ; X64-NEXT: .Ltmp2: # Block address taken |
| ; X64-NEXT: .LBB5_5: # %bb3 |
| ; X64-NEXT: movl $42, %eax |
| ; X64-NEXT: jmp .LBB5_2 |
| ; X64-NEXT: .Ltmp3: # Block address taken |
| ; X64-NEXT: .LBB5_3: # %bb1 |
| ; X64-NEXT: movl $7, %eax |
| ; X64-NEXT: .LBB5_2: # %bb0 |
| ; X64-NEXT: shlq $47, %rcx |
| ; X64-NEXT: orq %rcx, %rsp |
| ; X64-NEXT: retq |
| ; |
| ; X64-RETPOLINE-LABEL: test_indirectbr_global: |
| ; X64-RETPOLINE: # %bb.0: # %entry |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rcx |
| ; X64-RETPOLINE-NEXT: movq $-1, %rax |
| ; X64-RETPOLINE-NEXT: sarq $63, %rcx |
| ; X64-RETPOLINE-NEXT: movslq %edi, %rdx |
| ; X64-RETPOLINE-NEXT: movq global_blockaddrs(,%rdx,8), %rdx |
| ; X64-RETPOLINE-NEXT: orq %rcx, %rdx |
| ; X64-RETPOLINE-NEXT: cmpq $2, %rdx |
| ; X64-RETPOLINE-NEXT: je .LBB6_5 |
| ; X64-RETPOLINE-NEXT: # %bb.1: # %entry |
| ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: cmpq $3, %rdx |
| ; X64-RETPOLINE-NEXT: je .LBB6_6 |
| ; X64-RETPOLINE-NEXT: # %bb.2: # %entry |
| ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: cmpq $4, %rdx |
| ; X64-RETPOLINE-NEXT: jne .LBB6_3 |
| ; X64-RETPOLINE-NEXT: .Ltmp0: # Block address taken |
| ; X64-RETPOLINE-NEXT: # %bb.7: # %bb3 |
| ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $42, %eax |
| ; X64-RETPOLINE-NEXT: jmp .LBB6_4 |
| ; X64-RETPOLINE-NEXT: .Ltmp1: # Block address taken |
| ; X64-RETPOLINE-NEXT: .LBB6_5: # %bb1 |
| ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $7, %eax |
| ; X64-RETPOLINE-NEXT: jmp .LBB6_4 |
| ; X64-RETPOLINE-NEXT: .Ltmp2: # Block address taken |
| ; X64-RETPOLINE-NEXT: .LBB6_6: # %bb2 |
| ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $13, %eax |
| ; X64-RETPOLINE-NEXT: jmp .LBB6_4 |
| ; X64-RETPOLINE-NEXT: .Ltmp3: # Block address taken |
| ; X64-RETPOLINE-NEXT: .LBB6_3: # %bb0 |
| ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $2, %eax |
| ; X64-RETPOLINE-NEXT: .LBB6_4: # %bb0 |
| ; X64-RETPOLINE-NEXT: shlq $47, %rcx |
| ; X64-RETPOLINE-NEXT: orq %rcx, %rsp |
| ; X64-RETPOLINE-NEXT: retq |
| entry: |
| %ptr = getelementptr [4 x i8*], [4 x i8*]* @global_blockaddrs, i32 0, i32 %idx |
| %a = load i8*, i8** %ptr |
| indirectbr i8* %a, [ label %bb0, label %bb1, label %bb2, label %bb3 ] |
| |
| bb0: |
| ret i32 2 |
| |
| bb1: |
| ret i32 7 |
| |
| bb2: |
| ret i32 13 |
| |
| bb3: |
| ret i32 42 |
| } |
| |
| ; This function's switch is crafted to trigger jump-table lowering in the x86 |
| ; backend so that we can test how the exact jump table lowering behaves. |
| define i32 @test_switch_jumptable(i32 %idx) nounwind { |
| ; X64-LABEL: test_switch_jumptable: |
| ; X64: # %bb.0: # %entry |
| ; X64-NEXT: movq %rsp, %rcx |
| ; X64-NEXT: movq $-1, %rax |
| ; X64-NEXT: sarq $63, %rcx |
| ; X64-NEXT: cmpl $3, %edi |
| ; X64-NEXT: ja .LBB6_2 |
| ; X64-NEXT: # %bb.1: # %entry |
| ; X64-NEXT: cmovaq %rax, %rcx |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: movq .LJTI6_0(,%rax,8), %rax |
| ; X64-NEXT: orq %rcx, %rax |
| ; X64-NEXT: jmpq *%rax |
| ; X64-NEXT: .LBB6_3: # %bb1 |
| ; X64-NEXT: movl $7, %eax |
| ; X64-NEXT: jmp .LBB6_4 |
| ; X64-NEXT: .LBB6_2: # %bb0 |
| ; X64-NEXT: cmovbeq %rax, %rcx |
| ; X64-NEXT: movl $2, %eax |
| ; X64-NEXT: jmp .LBB6_4 |
| ; X64-NEXT: .LBB6_5: # %bb2 |
| ; X64-NEXT: movl $13, %eax |
| ; X64-NEXT: jmp .LBB6_4 |
| ; X64-NEXT: .LBB6_6: # %bb3 |
| ; X64-NEXT: movl $42, %eax |
| ; X64-NEXT: jmp .LBB6_4 |
| ; X64-NEXT: .LBB6_7: # %bb5 |
| ; X64-NEXT: movl $11, %eax |
| ; X64-NEXT: .LBB6_4: # %bb1 |
| ; X64-NEXT: shlq $47, %rcx |
| ; X64-NEXT: orq %rcx, %rsp |
| ; X64-NEXT: retq |
| ; |
| ; X64-RETPOLINE-LABEL: test_switch_jumptable: |
| ; X64-RETPOLINE: # %bb.0: # %entry |
| ; X64-RETPOLINE-NEXT: movq %rsp, %rcx |
| ; X64-RETPOLINE-NEXT: movq $-1, %rax |
| ; X64-RETPOLINE-NEXT: sarq $63, %rcx |
| ; X64-RETPOLINE-NEXT: cmpl $1, %edi |
| ; X64-RETPOLINE-NEXT: jg .LBB7_4 |
| ; X64-RETPOLINE-NEXT: # %bb.1: # %entry |
| ; X64-RETPOLINE-NEXT: cmovgq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: testl %edi, %edi |
| ; X64-RETPOLINE-NEXT: je .LBB7_8 |
| ; X64-RETPOLINE-NEXT: # %bb.2: # %entry |
| ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: cmpl $1, %edi |
| ; X64-RETPOLINE-NEXT: jne .LBB7_6 |
| ; X64-RETPOLINE-NEXT: # %bb.3: # %bb2 |
| ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $13, %eax |
| ; X64-RETPOLINE-NEXT: jmp .LBB7_7 |
| ; X64-RETPOLINE-NEXT: .LBB7_4: # %entry |
| ; X64-RETPOLINE-NEXT: cmovleq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: cmpl $2, %edi |
| ; X64-RETPOLINE-NEXT: je .LBB7_9 |
| ; X64-RETPOLINE-NEXT: # %bb.5: # %entry |
| ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: cmpl $3, %edi |
| ; X64-RETPOLINE-NEXT: jne .LBB7_6 |
| ; X64-RETPOLINE-NEXT: # %bb.10: # %bb5 |
| ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $11, %eax |
| ; X64-RETPOLINE-NEXT: jmp .LBB7_7 |
| ; X64-RETPOLINE-NEXT: .LBB7_6: |
| ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $2, %eax |
| ; X64-RETPOLINE-NEXT: jmp .LBB7_7 |
| ; X64-RETPOLINE-NEXT: .LBB7_8: # %bb1 |
| ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $7, %eax |
| ; X64-RETPOLINE-NEXT: jmp .LBB7_7 |
| ; X64-RETPOLINE-NEXT: .LBB7_9: # %bb3 |
| ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx |
| ; X64-RETPOLINE-NEXT: movl $42, %eax |
| ; X64-RETPOLINE-NEXT: .LBB7_7: # %bb0 |
| ; X64-RETPOLINE-NEXT: shlq $47, %rcx |
| ; X64-RETPOLINE-NEXT: orq %rcx, %rsp |
| ; X64-RETPOLINE-NEXT: retq |
| entry: |
| switch i32 %idx, label %bb0 [ |
| i32 0, label %bb1 |
| i32 1, label %bb2 |
| i32 2, label %bb3 |
| i32 3, label %bb5 |
| ] |
| |
| bb0: |
| ret i32 2 |
| |
| bb1: |
| ret i32 7 |
| |
| bb2: |
| ret i32 13 |
| |
| bb3: |
| ret i32 42 |
| |
| bb5: |
| ret i32 11 |
| } |