You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> [[TMP1]], i64 0)
653
+
; CHECK-NEXT: ret <vscale x 4 x float> [[CAST_SCALABLE]]
654
+
;
655
+
entry:
656
+
%retval = alloca { <16 x float> }
657
+
%0 = fadd <vscale x 4 x float> %.coerce, %.coerce
658
+
store <vscale x 4 x float> %0, ptr%retval
659
+
%1 = load <16 x float>, ptr%retval
660
+
%cast.scalable = tailcall <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> %1, i640)
661
+
ret <vscale x 4 x float> %cast.scalable
662
+
}
663
+
664
+
define <vscale x 4 x float> @scalable_store_to_fixed_load_with_offset(<vscale x 4 x float> %a) #1 {
; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> [[TMP0]], i64 0)
672
+
; CHECK-NEXT: ret <vscale x 4 x float> [[CAST_SCALABLE]]
673
+
;
674
+
entry:
675
+
%ptr = alloca { <32 x float> }
676
+
store <vscale x 4 x float> %a, ptr%ptr
677
+
%gep = getelementptrinboundsi8, ptr%ptr, i648
678
+
%1 = load <16 x float>, ptr%gep
679
+
%cast.scalable = tailcall <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> %1, i640)
680
+
ret <vscale x 4 x float> %cast.scalable
681
+
}
682
+
683
+
define <vscale x 4 x float> @scalable_store_to_fixed_load_unknown_vscale(<vscale x 4 x float> %.coerce) {
; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> [[TMP1]], i64 0)
691
+
; CHECK-NEXT: ret <vscale x 4 x float> [[CAST_SCALABLE]]
692
+
;
693
+
entry:
694
+
%retval = alloca { <16 x float> }
695
+
%0 = fadd <vscale x 4 x float> %.coerce, %.coerce
696
+
store <vscale x 4 x float> %0, ptr%retval
697
+
%1 = load <16 x float>, ptr%retval
698
+
%cast.scalable = tailcall <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> poison, <16 x float> %1, i640)
699
+
ret <vscale x 4 x float> %cast.scalable
700
+
}
701
+
702
+
define <vscale x 4 x float> @scalable_store_to_fixed_load_size_missmatch(<vscale x 4 x float> %.coerce) #1 {
; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v32f32(<vscale x 4 x float> poison, <32 x float> [[TMP1]], i64 0)
710
+
; CHECK-NEXT: ret <vscale x 4 x float> [[CAST_SCALABLE]]
711
+
;
712
+
entry:
713
+
%retval = alloca { <32 x float> }
714
+
%0 = fadd <vscale x 4 x float> %.coerce, %.coerce
715
+
store <vscale x 4 x float> %0, ptr%retval
716
+
%1 = load <32 x float>, ptr%retval
717
+
%cast.scalable = tailcall <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v32f32(<vscale x 4 x float> poison, <32 x float> %1, i640)
718
+
ret <vscale x 4 x float> %cast.scalable
719
+
}
720
+
721
+
define <vscale x 4 x i32> @scalable_store_to_fixed_load_different_types(<vscale x 4 x float> %a) #1 {
; CHECK-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> [[TMP0]], i64 0)
728
+
; CHECK-NEXT: ret <vscale x 4 x i32> [[CAST_SCALABLE]]
729
+
;
730
+
entry:
731
+
%ptr = alloca { <16 x float> }
732
+
store <vscale x 4 x float> %a, ptr%ptr
733
+
%1 = load <16 x i32>, ptr%ptr
734
+
%cast.scalable = tailcall <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> poison, <16 x i32> %1, i640)
735
+
ret <vscale x 4 x i32> %cast.scalable
736
+
}
737
+
738
+
; This function does not have a fixed vscale, but the loaded vector is still known
739
+
; to be smaller or equal in size compared to the stored vector.
740
+
define <4 x float> @scalable_store_to_small_fixed_load(<vscale x 4 x float> %a) {
0 commit comments