@@ -18,9 +18,9 @@ define <256 x i32> @combine_store_2user(ptr%p) {
18
18
; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
19
19
; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64)
20
20
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[TMP1]], i64 64, x86_amx [[T1]])
21
- ; CHECK-NEXT: [[TMP3 :%.*]] = load <256 x i32>, ptr [[TMP1]], align 1024
21
+ ; CHECK-NEXT: [[TMP2 :%.*]] = load <256 x i32>, ptr [[TMP1]], align 1024
22
22
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64, x86_amx [[T1]])
23
- ; CHECK-NEXT: ret <256 x i32> [[TMP3 ]]
23
+ ; CHECK-NEXT: ret <256 x i32> [[TMP2 ]]
24
24
;
25
25
%t1 = call x86_amx @llvm.x86.tilezero.internal (i16 16 , i16 64 )
26
26
%t2 = call <256 x i32 > @llvm.x86.cast.tile.to.vector.v256i32 (x86_amx %t1 )
@@ -30,8 +30,8 @@ define <256 x i32> @combine_store_2user(ptr%p) {
30
30
31
31
define void @combine_load (ptr %p , ptr %p2 ) {
32
32
; CHECK-LABEL: @combine_load(
33
- ; CHECK-NEXT: [[TMP2 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64)
34
- ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP2 ]])
33
+ ; CHECK-NEXT: [[TMP1 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64)
34
+ ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP1 ]])
35
35
; CHECK-NEXT: ret void
36
36
;
37
37
%t1 = load <256 x i32 >, ptr %p , align 64
@@ -42,9 +42,9 @@ define void @combine_load(ptr%p, ptr%p2) {
42
42
43
43
define void @combine_cast_across_store (ptr %p , ptr %p2 ) {
44
44
; CHECK-LABEL: @combine_cast_across_store(
45
- ; CHECK-NEXT: [[TMP2 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64)
45
+ ; CHECK-NEXT: [[TMP1 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[P:%.*]], i64 64)
46
46
; CHECK-NEXT: store <256 x i32> zeroinitializer, ptr [[P]], align 64
47
- ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP2 ]])
47
+ ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP1 ]])
48
48
; CHECK-NEXT: ret void
49
49
;
50
50
%t1 = load <256 x i32 >, ptr %p , align 64
@@ -59,8 +59,8 @@ define <256 x i32> @combine_load_2user(ptr%p, ptr%p2) {
59
59
; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
60
60
; CHECK-NEXT: [[T1:%.*]] = load <256 x i32>, ptr [[P:%.*]], align 64
61
61
; CHECK-NEXT: store <256 x i32> [[T1]], ptr [[TMP1]], align 1024
62
- ; CHECK-NEXT: [[TMP3 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[TMP1]], i64 64)
63
- ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP3 ]])
62
+ ; CHECK-NEXT: [[TMP2 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, ptr [[TMP1]], i64 64)
63
+ ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP2 ]])
64
64
; CHECK-NEXT: ret <256 x i32> [[T1]]
65
65
;
66
66
%t1 = load <256 x i32 >, ptr %p , align 64
@@ -75,9 +75,9 @@ define <256 x i32> @combine_load_3user(ptr%p, ptr%p2) {
75
75
; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
76
76
; CHECK-NEXT: [[T1:%.*]] = load <256 x i32>, ptr [[P:%.*]], align 64
77
77
; CHECK-NEXT: store <256 x i32> [[T1]], ptr [[TMP1]], align 1024
78
- ; CHECK-NEXT: [[TMP3 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, ptr [[TMP1]], i64 16)
79
- ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP3 ]])
80
- ; CHECK-NEXT: [[TMP4 :%.*]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 16, i16 64, x86_amx [[TMP3 ]], x86_amx [[TMP3 ]], x86_amx [[TMP3 ]])
78
+ ; CHECK-NEXT: [[TMP2 :%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, ptr [[TMP1]], i64 16)
79
+ ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, ptr [[P2:%.*]], i64 64, x86_amx [[TMP2 ]])
80
+ ; CHECK-NEXT: [[TMP3 :%.*]] = call x86_amx @llvm.x86.tdpbssd.internal(i16 16, i16 16, i16 64, x86_amx [[TMP2 ]], x86_amx [[TMP2 ]], x86_amx [[TMP2 ]])
81
81
; CHECK-NEXT: ret <256 x i32> [[T1]]
82
82
;
83
83
%t1 = load <256 x i32 >, ptr %p , align 64
@@ -88,6 +88,48 @@ define <256 x i32> @combine_load_3user(ptr%p, ptr%p2) {
88
88
ret <256 x i32 > %t3
89
89
}
90
90
91
+ ; the shape is loaded after tile.
92
+ %struct.__tile1024i_str = type <{ i16 , i16 , [60 x i8 ], <256 x i32 > }>
93
+ define void @test_tile_dpbssd (ptr byval (%struct.__tile1024i_str ) align 64 %a , ptr byval (%struct.__tile1024i_str ) align 64 %b , ptr byval (%struct.__tile1024i_str ) align 64 %c ) {
94
+ ; CHECK-LABEL: @test_tile_dpbssd(
95
+ ; CHECK-NEXT: entry:
96
+ ; CHECK-NEXT: [[TMP0:%.*]] = alloca <256 x i32>, align 64
97
+ ; CHECK-NEXT: [[B_ROW_PTR:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 2
98
+ ; CHECK-NEXT: [[B_ROW:%.*]] = load i16, ptr [[B_ROW_PTR]], align 2
99
+ ; CHECK-NEXT: [[B_TILE_PTR:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64
100
+ ; CHECK-NEXT: [[B_TILE:%.*]] = load <256 x i32>, ptr [[B_TILE_PTR]], align 64
101
+ ; CHECK-NEXT: store <256 x i32> [[B_TILE]], ptr [[TMP0]], align 1024
102
+ ; CHECK-NEXT: [[A_ROW:%.*]] = load i16, ptr [[A:%.*]], align 64
103
+ ; CHECK-NEXT: [[A_COL_PTR:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2
104
+ ; CHECK-NEXT: [[A_COL:%.*]] = load i16, ptr [[A_COL_PTR]], align 2
105
+ ; CHECK-NEXT: [[TMP1:%.*]] = udiv i16 [[A_COL]], 4
106
+ ; CHECK-NEXT: [[A_TILE_PTR:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64
107
+ ; CHECK-NEXT: [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[A_ROW]], i16 [[A_COL]], ptr [[A_TILE_PTR]], i64 64)
108
+ ; CHECK-NEXT: [[C_TILE_PTR:%.*]] = getelementptr inbounds [[STRUCT___TILE1024I_STR:%.*]], ptr [[C:%.*]], i64 0, i32 3
109
+ ; CHECK-NEXT: [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[A_ROW]], i16 [[B_ROW]], ptr [[C_TILE_PTR]], i64 64)
110
+ ; CHECK-NEXT: [[TMP4:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[TMP1]], i16 [[B_ROW]], ptr [[TMP0]], i64 64)
111
+ ; CHECK-NEXT: [[RES:%.*]] = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 [[A_ROW]], i16 [[B_ROW]], i16 [[A_COL]], x86_amx [[TMP3]], x86_amx [[TMP2]], x86_amx [[TMP4]])
112
+ ; CHECK-NEXT: ret void
113
+ ;
114
+ entry:
115
+ %b.row.ptr = getelementptr inbounds i8 , ptr %b , i64 2
116
+ %b.row = load i16 , ptr %b.row.ptr , align 2
117
+ %b.tile.ptr = getelementptr inbounds i8 , ptr %b , i64 64
118
+ %b.tile = load <256 x i32 >, ptr %b.tile.ptr , align 64
119
+ %a.row = load i16 , ptr %a , align 64
120
+ %a.col.ptr = getelementptr inbounds i8 , ptr %a , i64 2
121
+ %a.col = load i16 , ptr %a.col.ptr , align 2
122
+ %a.tile.ptr = getelementptr inbounds i8 , ptr %a , i64 64
123
+ %a.tile = load <256 x i32 >, ptr %a.tile.ptr , align 64
124
+ %c.tile.ptr = getelementptr inbounds %struct.__tile1024i_str , ptr %c , i64 0 , i32 3
125
+ %c.tile = load <256 x i32 >, ptr %c.tile.ptr , align 64
126
+ %c.amx = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32 (<256 x i32 > %c.tile )
127
+ %a.amx = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32 (<256 x i32 > %a.tile )
128
+ %b.amx = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32 (<256 x i32 > %b.tile )
129
+ %res = tail call x86_amx @llvm.x86.tdpbssd.internal (i16 %a.row , i16 %b.row , i16 %a.col , x86_amx %c.amx , x86_amx %a.amx , x86_amx %b.amx )
130
+ ret void
131
+ }
132
+
91
133
declare x86_amx @llvm.x86.cast.vector.to.tile.v256i32 (<256 x i32 >)
92
134
declare <256 x i32 > @llvm.x86.cast.tile.to.vector.v256i32 (x86_amx)
93
135
declare x86_amx @llvm.x86.tilezero.internal (i16 , i16 )
0 commit comments