@@ -631,3 +631,105 @@ func.func @shape_cast_size1_vector(%arg0 : vector<f32>) -> vector<1xf32> {
631
631
%1 = vector.shape_cast %arg0 : vector <f32 > to vector <1 xf32 >
632
632
return %1 : vector <1 xf32 >
633
633
}
634
+
635
+ // -----
636
+
637
+ module attributes {
638
+ spirv.target_env = #spirv.target_env <
639
+ #spirv.vce <v1.0 , [Shader ], [SPV_KHR_storage_buffer_storage_class ]>, #spirv.resource_limits <>>
640
+ } {
641
+
642
+ // CHECK-LABEL: @vector_load
643
+ // CHECK-SAME: (%[[ARG0:.*]]: memref<4xf32, #spirv.storage_class<StorageBuffer>>)
644
+ // CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<4xf32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>
645
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
646
+ // CHECK: %[[S1:.+]] = builtin.unrealized_conversion_cast %[[C0]] : index to i32
647
+ // CHECK: %[[CST1:.+]] = spirv.Constant 0 : i32
648
+ // CHECK: %[[CST2:.+]] = spirv.Constant 0 : i32
649
+ // CHECK: %[[CST3:.+]] = spirv.Constant 1 : i32
650
+ // CHECK: %[[S2:.+]] = spirv.IMul %[[CST3]], %[[S1]] : i32
651
+ // CHECK: %[[S3:.+]] = spirv.IAdd %[[CST2]], %[[S2]] : i32
652
+ // CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S3]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
653
+ // CHECK: %[[S5:.+]] = spirv.Bitcast %[[S4]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
654
+ // CHECK: %[[R0:.+]] = spirv.Load "StorageBuffer" %[[S5]] : vector<4xf32>
655
+ // CHECK: return %[[R0]] : vector<4xf32>
656
+ func.func @vector_load (%arg0 : memref <4 xf32 , #spirv.storage_class <StorageBuffer >>) -> vector <4 xf32 > {
657
+ %idx = arith.constant 0 : index
658
+ %cst_0 = arith.constant 0.000000e+00 : f32
659
+ %0 = vector.load %arg0 [%idx ] : memref <4 xf32 , #spirv.storage_class <StorageBuffer >>, vector <4 xf32 >
660
+ return %0: vector <4 xf32 >
661
+ }
662
+
663
+ // CHECK-LABEL: @vector_load_2d
664
+ // CHECK-SAME: (%[[ARG0:.*]]: memref<4x4xf32, #spirv.storage_class<StorageBuffer>>) -> vector<4xf32> {
665
+ // CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<4x4xf32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<16 x f32, stride=4> [0])>, StorageBuffer>
666
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
667
+ // CHECK: %[[S1:.+]] = builtin.unrealized_conversion_cast %[[C0]] : index to i32
668
+ // CHECK: %[[C1:.+]] = arith.constant 1 : index
669
+ // CHECK: %[[S2:.+]] = builtin.unrealized_conversion_cast %[[C1]] : index to i32
670
+ // CHECK: %[[CST0_1:.+]] = spirv.Constant 0 : i32
671
+ // CHECK: %[[CST0_2:.+]] = spirv.Constant 0 : i32
672
+ // CHECK: %[[CST4:.+]] = spirv.Constant 4 : i32
673
+ // CHECK: %[[S3:.+]] = spirv.IMul %[[CST4]], %[[S1]] : i32
674
+ // CHECK: %[[S4:.+]] = spirv.IAdd %[[CST0_2]], %[[S3]] : i32
675
+ // CHECK: %[[CST1:.+]] = spirv.Constant 1 : i32
676
+ // CHECK: %[[S5:.+]] = spirv.IMul %[[CST1]], %[[S2]] : i32
677
+ // CHECK: %[[S6:.+]] = spirv.IAdd %[[S4]], %[[S5]] : i32
678
+ // CHECK: %[[S7:.+]] = spirv.AccessChain %[[S0]][%[[CST0_1]], %[[S6]]] : !spirv.ptr<!spirv.struct<(!spirv.array<16 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
679
+ // CHECK: %[[S8:.+]] = spirv.Bitcast %[[S7]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
680
+ // CHECK: %[[R0:.+]] = spirv.Load "StorageBuffer" %[[S8]] : vector<4xf32>
681
+ // CHECK: return %[[R0]] : vector<4xf32>
682
+ func.func @vector_load_2d (%arg0 : memref <4 x4 xf32 , #spirv.storage_class <StorageBuffer >>) -> vector <4 xf32 > {
683
+ %idx_0 = arith.constant 0 : index
684
+ %idx_1 = arith.constant 1 : index
685
+ %0 = vector.load %arg0 [%idx_0 , %idx_1 ] : memref <4 x4 xf32 , #spirv.storage_class <StorageBuffer >>, vector <4 xf32 >
686
+ return %0: vector <4 xf32 >
687
+ }
688
+
689
+ // CHECK-LABEL: @vector_store
690
+ // CHECK-SAME: (%[[ARG0:.*]]: memref<4xf32, #spirv.storage_class<StorageBuffer>>
691
+ // CHECK-SAME: %[[ARG1:.*]]: vector<4xf32>
692
+ // CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<4xf32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>
693
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
694
+ // CHECK: %[[S1:.+]] = builtin.unrealized_conversion_cast %[[C0]] : index to i32
695
+ // CHECK: %[[CST1:.+]] = spirv.Constant 0 : i32
696
+ // CHECK: %[[CST2:.+]] = spirv.Constant 0 : i32
697
+ // CHECK: %[[CST3:.+]] = spirv.Constant 1 : i32
698
+ // CHECK: %[[S2:.+]] = spirv.IMul %[[CST3]], %[[S1]] : i32
699
+ // CHECK: %[[S3:.+]] = spirv.IAdd %[[CST2]], %[[S2]] : i32
700
+ // CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S3]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
701
+ // CHECK: %[[S5:.+]] = spirv.Bitcast %[[S4]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
702
+ // CHECK: spirv.Store "StorageBuffer" %[[S5]], %[[ARG1]] : vector<4xf32>
703
+ func.func @vector_store (%arg0 : memref <4 xf32 , #spirv.storage_class <StorageBuffer >>, %arg1 : vector <4 xf32 >) {
704
+ %idx = arith.constant 0 : index
705
+ vector.store %arg1 , %arg0 [%idx ] : memref <4 xf32 , #spirv.storage_class <StorageBuffer >>, vector <4 xf32 >
706
+ return
707
+ }
708
+
709
+ // CHECK-LABEL: @vector_store_2d
710
+ // CHECK-SAME: (%[[ARG0:.*]]: memref<4x4xf32, #spirv.storage_class<StorageBuffer>>
711
+ // CHECK-SAME: %[[ARG1:.*]]: vector<4xf32>
712
+ // CHECK: %[[S0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<4x4xf32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<16 x f32, stride=4> [0])>, StorageBuffer>
713
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
714
+ // CHECK: %[[S1:.+]] = builtin.unrealized_conversion_cast %[[C0]] : index to i32
715
+ // CHECK: %[[C1:.+]] = arith.constant 1 : index
716
+ // CHECK: %[[S2:.+]] = builtin.unrealized_conversion_cast %[[C1]] : index to i32
717
+ // CHECK: %[[CST0_1:.+]] = spirv.Constant 0 : i32
718
+ // CHECK: %[[CST0_2:.+]] = spirv.Constant 0 : i32
719
+ // CHECK: %[[CST4:.+]] = spirv.Constant 4 : i32
720
+ // CHECK: %[[S3:.+]] = spirv.IMul %[[CST4]], %[[S1]] : i32
721
+ // CHECK: %[[S4:.+]] = spirv.IAdd %[[CST0_2]], %[[S3]] : i32
722
+ // CHECK: %[[CST1:.+]] = spirv.Constant 1 : i32
723
+ // CHECK: %[[S5:.+]] = spirv.IMul %[[CST1]], %[[S2]] : i32
724
+ // CHECK: %[[S6:.+]] = spirv.IAdd %[[S4]], %[[S5]] : i32
725
+ // CHECK: %[[S7:.+]] = spirv.AccessChain %[[S0]][%[[CST0_1]], %[[S6]]] : !spirv.ptr<!spirv.struct<(!spirv.array<16 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
726
+ // CHECK: %[[S8:.+]] = spirv.Bitcast %[[S7]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
727
+ // CHECK: spirv.Store "StorageBuffer" %[[S8]], %[[ARG1]] : vector<4xf32>
728
+ func.func @vector_store_2d (%arg0 : memref <4 x4 xf32 , #spirv.storage_class <StorageBuffer >>, %arg1 : vector <4 xf32 >) {
729
+ %idx_0 = arith.constant 0 : index
730
+ %idx_1 = arith.constant 1 : index
731
+ vector.store %arg1 , %arg0 [%idx_0 , %idx_1 ] : memref <4 x4 xf32 , #spirv.storage_class <StorageBuffer >>, vector <4 xf32 >
732
+ return
733
+ }
734
+
735
+ } // end module
0 commit comments