@@ -880,3 +880,69 @@ define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) {
880
880
ret <vscale x 2 x i64 > %va
881
881
}
882
882
883
+ define <vscale x 1 x ptr > @vload_nx1ptr (ptr %pa ) {
884
+ ; RV32-LABEL: name: vload_nx1ptr
885
+ ; RV32: bb.1 (%ir-block.0):
886
+ ; RV32-NEXT: liveins: $x10
887
+ ; RV32-NEXT: {{ $}}
888
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
889
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
890
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
891
+ ; RV32-NEXT: PseudoRET implicit $v8
892
+ ;
893
+ ; RV64-LABEL: name: vload_nx1ptr
894
+ ; RV64: bb.1 (%ir-block.0):
895
+ ; RV64-NEXT: liveins: $x10
896
+ ; RV64-NEXT: {{ $}}
897
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
898
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
899
+ ; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
900
+ ; RV64-NEXT: PseudoRET implicit $v8
901
+ %va = load <vscale x 1 x ptr >, ptr %pa
902
+ ret <vscale x 1 x ptr > %va
903
+ }
904
+
905
+ define <vscale x 2 x ptr > @vload_nx2ptr (ptr %pa ) {
906
+ ; RV32-LABEL: name: vload_nx2ptr
907
+ ; RV32: bb.1 (%ir-block.0):
908
+ ; RV32-NEXT: liveins: $x10
909
+ ; RV32-NEXT: {{ $}}
910
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
911
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
912
+ ; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
913
+ ; RV32-NEXT: PseudoRET implicit $v8
914
+ ;
915
+ ; RV64-LABEL: name: vload_nx2ptr
916
+ ; RV64: bb.1 (%ir-block.0):
917
+ ; RV64-NEXT: liveins: $x10
918
+ ; RV64-NEXT: {{ $}}
919
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
920
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
921
+ ; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x p0>)
922
+ ; RV64-NEXT: PseudoRET implicit $v8m2
923
+ %va = load <vscale x 2x ptr >, ptr %pa
924
+ ret <vscale x 2 x ptr > %va
925
+ }
926
+
927
+ define <vscale x 8 x ptr > @vload_nx8ptr (ptr %pa ) {
928
+ ; RV32-LABEL: name: vload_nx8ptr
929
+ ; RV32: bb.1 (%ir-block.0):
930
+ ; RV32-NEXT: liveins: $x10
931
+ ; RV32-NEXT: {{ $}}
932
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
933
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
934
+ ; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
935
+ ; RV32-NEXT: PseudoRET implicit $v8m4
936
+ ;
937
+ ; RV64-LABEL: name: vload_nx8ptr
938
+ ; RV64: bb.1 (%ir-block.0):
939
+ ; RV64-NEXT: liveins: $x10
940
+ ; RV64-NEXT: {{ $}}
941
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
942
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
943
+ ; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x p0>)
944
+ ; RV64-NEXT: PseudoRET implicit $v8m8
945
+ %va = load <vscale x 8 x ptr >, ptr %pa
946
+ ret <vscale x 8 x ptr > %va
947
+ }
948
+
0 commit comments