Skip to content

Commit 5ff6c8e

Browse files
committed
[RISCV] Add Zvfh scalable vector vp.splice tests. NFC
1 parent b8ea650 commit 5ff6c8e

File tree

1 file changed

+40
-13
lines changed

1 file changed

+40
-13
lines changed

llvm/test/CodeGen/RISCV/rvv/vp-splice.ll

Lines changed: 40 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,7 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2-
; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v -verify-machineinstrs \
2+
; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zvfh -verify-machineinstrs \
33
; RUN: < %s | FileCheck %s
44

5-
declare <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, <vscale x 2 x i1>, i32, i32)
6-
7-
declare <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, <vscale x 1 x i1>, i32, i32)
8-
declare <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, <vscale x 2 x i1>, i32, i32)
9-
declare <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, <vscale x 4 x i1>, i32, i32)
10-
declare <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, <vscale x 8 x i1>, i32, i32)
11-
12-
declare <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, i32, <vscale x 1 x i1>, i32, i32)
13-
declare <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i32, <vscale x 2 x i1>, i32, i32)
14-
15-
declare <vscale x 16 x i64> @llvm.experimental.vp.splice.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i64>, i32, <vscale x 16 x i1>, i32, i32)
16-
175
define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
186
; CHECK-LABEL: test_vp_splice_nxv2i64:
197
; CHECK: # %bb.0:
@@ -437,3 +425,42 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16
437425
%v = call <vscale x 16 x i64> @llvm.experimental.vp.splice.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i64> %vb, i32 -1, <vscale x 16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
438426
ret <vscale x 16 x i64> %v
439427
}
428+
429+
define <vscale x 2 x half> @test_vp_splice_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
430+
; CHECK-LABEL: test_vp_splice_nxv2f16:
431+
; CHECK: # %bb.0:
432+
; CHECK-NEXT: addi a0, a0, -5
433+
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
434+
; CHECK-NEXT: vslidedown.vi v8, v8, 5
435+
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
436+
; CHECK-NEXT: vslideup.vx v8, v9, a0
437+
; CHECK-NEXT: ret
438+
%v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
439+
ret <vscale x 2 x half> %v
440+
}
441+
442+
define <vscale x 2 x half> @test_vp_splice_nxv2f16_negative_offset(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
443+
; CHECK-LABEL: test_vp_splice_nxv2f16_negative_offset:
444+
; CHECK: # %bb.0:
445+
; CHECK-NEXT: addi a0, a0, -5
446+
; CHECK-NEXT: vsetivli zero, 5, e16, mf2, ta, ma
447+
; CHECK-NEXT: vslidedown.vx v8, v8, a0
448+
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
449+
; CHECK-NEXT: vslideup.vi v8, v9, 5
450+
; CHECK-NEXT: ret
451+
%v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
452+
ret <vscale x 2 x half> %v
453+
}
454+
455+
define <vscale x 2 x half> @test_vp_splice_nxv2f16_masked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
456+
; CHECK-LABEL: test_vp_splice_nxv2f16_masked:
457+
; CHECK: # %bb.0:
458+
; CHECK-NEXT: addi a0, a0, -5
459+
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
460+
; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
461+
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
462+
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
463+
; CHECK-NEXT: ret
464+
%v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
465+
ret <vscale x 2 x half> %v
466+
}

0 commit comments

Comments
 (0)