7
7
store <vscale x 1 x i8> %b, ptr %pa, align 1
8
8
ret void
9
9
}
10
-
10
+
11
+ define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
12
+ %va = load <vscale x 2 x i8>, ptr %pa, align 2
13
+ ret <vscale x 2 x i8> %va
14
+ }
15
+
16
+ define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
17
+ %va = load <vscale x 4 x i8>, ptr %pa, align 4
18
+ ret <vscale x 4 x i8> %va
19
+ }
20
+
21
+ define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
22
+ %va = load <vscale x 8 x i8>, ptr %pa, align 8
23
+ ret <vscale x 8 x i8> %va
24
+ }
25
+
26
+ define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
27
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
28
+ ret <vscale x 16 x i8> %va
29
+ }
30
+
11
31
...
12
32
---
13
33
name : vstore_nx1i8
@@ -28,3 +48,51 @@ body: |
28
48
PseudoRET
29
49
30
50
...
51
+ ---
52
+ name : vload_nx2i8
53
+ body : |
54
+ bb.1 (%ir-block.0):
55
+ liveins: $x10
56
+
57
+ %0:_(p0) = COPY $x10
58
+ %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
59
+ $v8 = COPY %1(<vscale x 2 x s8>)
60
+ PseudoRET implicit $v8
61
+
62
+ ...
63
+ ---
64
+ name : vload_nx4i8
65
+ body : |
66
+ bb.1 (%ir-block.0):
67
+ liveins: $x10
68
+
69
+ %0:_(p0) = COPY $x10
70
+ %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
71
+ $v8 = COPY %1(<vscale x 4 x s8>)
72
+ PseudoRET implicit $v8
73
+
74
+ ...
75
+ ---
76
+ name : vload_nx8i8
77
+ body : |
78
+ bb.1 (%ir-block.0):
79
+ liveins: $x10
80
+
81
+ %0:_(p0) = COPY $x10
82
+ %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
83
+ $v8 = COPY %1(<vscale x 8 x s8>)
84
+ PseudoRET implicit $v8
85
+
86
+ ...
87
+ ---
88
+ name : vload_nx16i8
89
+ body : |
90
+ bb.1 (%ir-block.0):
91
+ liveins: $x10
92
+
93
+ %0:_(p0) = COPY $x10
94
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
95
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
96
+ PseudoRET implicit $v8m2
97
+
98
+ ...
0 commit comments