|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: llc -mtriple=riscv64 -mattr=+m,+c,+v < %s | FileCheck --check-prefix=RV64V %s |
| 3 | + |
| 4 | +declare void @llvm.va_copy.p0(ptr, ptr) |
| 5 | +declare void @llvm.va_end.p0(ptr) |
| 6 | + |
| 7 | +define dso_local void @_Z3fooPKcz(ptr noundef %0, ...) "frame-pointer"="all" { |
| 8 | +; RV64V-LABEL: _Z3fooPKcz: |
| 9 | +; RV64V: # %bb.0: |
| 10 | +; RV64V-NEXT: addi sp, sp, -496 |
| 11 | +; RV64V-NEXT: .cfi_def_cfa_offset 496 |
| 12 | +; RV64V-NEXT: sd ra, 424(sp) # 8-byte Folded Spill |
| 13 | +; RV64V-NEXT: sd s0, 416(sp) # 8-byte Folded Spill |
| 14 | +; RV64V-NEXT: .cfi_offset ra, -72 |
| 15 | +; RV64V-NEXT: .cfi_offset s0, -80 |
| 16 | +; RV64V-NEXT: addi s0, sp, 432 |
| 17 | +; RV64V-NEXT: .cfi_def_cfa s0, 64 |
| 18 | +; RV64V-NEXT: lui t0, 2 |
| 19 | +; RV64V-NEXT: addiw t0, t0, -576 |
| 20 | +; RV64V-NEXT: sub sp, sp, t0 |
| 21 | +; RV64V-NEXT: sd a5, 40(s0) |
| 22 | +; RV64V-NEXT: sd a6, 48(s0) |
| 23 | +; RV64V-NEXT: sd a7, 56(s0) |
| 24 | +; RV64V-NEXT: sd a1, 8(s0) |
| 25 | +; RV64V-NEXT: sd a2, 16(s0) |
| 26 | +; RV64V-NEXT: sd a3, 24(s0) |
| 27 | +; RV64V-NEXT: sd a4, 32(s0) |
| 28 | +; RV64V-NEXT: sd a0, -32(s0) |
| 29 | +; RV64V-NEXT: addi a0, s0, 8 |
| 30 | +; RV64V-NEXT: sd a0, -40(s0) |
| 31 | +; RV64V-NEXT: addi sp, s0, -496 |
| 32 | +; RV64V-NEXT: ld ra, 424(sp) # 8-byte Folded Reload |
| 33 | +; RV64V-NEXT: ld s0, 416(sp) # 8-byte Folded Reload |
| 34 | +; RV64V-NEXT: addi sp, sp, 496 |
| 35 | +; RV64V-NEXT: ret |
| 36 | + %2 = alloca ptr, align 8 |
| 37 | + %3 = alloca ptr, align 8 |
| 38 | + %4 = alloca [8000 x i8], align 1 |
| 39 | + store ptr %0, ptr %2, align 8 |
| 40 | + call void @llvm.va_start.p0(ptr %3) |
| 41 | + %5 = getelementptr inbounds [8000 x i8], ptr %4, i64 0, i64 0 |
| 42 | + %6 = load ptr, ptr %2, align 8 |
| 43 | + %7 = load ptr, ptr %3, align 8 |
| 44 | + call void @llvm.va_end.p0(ptr %3) |
| 45 | + ret void |
| 46 | +} |
0 commit comments