Skip to content
This repository was archived by the owner on Mar 28, 2023. It is now read-only.

Commit 85caea5

Browse files
[SYCL] Add new tests for int8 and bfloat16 automatic transpose and VNNI transform (#1415)
- Add new tests for automatic transpose and VNNI transform - fix a bug in bfloat16's testcase - add testcases for subB(int8, colmajor/rowmajor) and subA(int8, colmajor/rowmajor)
1 parent 43a41ea commit 85caea5

6 files changed

+512
-0
lines changed
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
//==-- joint_matrix_bfloat16_colmajorA_colmajorB.cpp - DPC++ joint_matrix--==//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
// REQUIRES: matrix
9+
10+
// RUN: %clangxx -fsycl %s -o %t.out
11+
// RUN: %CPU_RUN_PLACEHOLDER %t.out
12+
// RUN: %GPU_RUN_PLACEHOLDER %t.out
13+
// CHECK: passed
14+
15+
// This tests support of col major layout for matrix B which does transpose and
16+
// then VNNI transform. This is currently only available on AMX
17+
18+
// XFAIL: gpu
19+
20+
#include <iostream>
21+
#include <sycl/sycl.hpp>
22+
23+
using namespace sycl;
24+
using namespace sycl::ext::oneapi::experimental::matrix;
25+
using bfloat16 = sycl::ext::oneapi::experimental::bfloat16;
26+
27+
#define SG_SZ 16
28+
29+
#include "joint_matrix_bfloat16_colmajorA_colmajorB_impl.hpp"
Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
#define TM 8
2+
#define TN SG_SZ
3+
#define TK 16
4+
#define BF16_EPSILON 0.00781250
5+
6+
template <typename T, size_t NUM_ROWS, size_t NUM_COLS> struct big_matrix {
7+
private:
8+
T *mat;
9+
10+
public:
11+
T *get_data() { return mat; }
12+
void set_data(T *data) { mat = data; }
13+
big_matrix(T *data) : mat(data) {}
14+
};
15+
16+
template <typename T1, typename T2, size_t M, size_t N, size_t K>
17+
void matrix_multiply(big_matrix<T1, M, N> &C, big_matrix<T2, M, K> &A,
18+
big_matrix<T2, K, N> &B) {
19+
size_t NDRangeM = M / TM;
20+
size_t NDRangeN = N / TN;
21+
buffer<bfloat16, 2> bufA(A.get_data(), range<2>(M, K));
22+
buffer<bfloat16, 2> bufB(B.get_data(), range<2>(K, N));
23+
buffer<float, 2> bufC((float *)C.get_data(), range<2>(M, N));
24+
25+
queue q;
26+
q.submit([&](handler &cgh) {
27+
auto accC = bufC.get_access<access::mode::read_write>(cgh);
28+
auto accA = bufA.get_access<access::mode::read_write>(cgh);
29+
auto accB = bufB.get_access<access::mode::read_write>(cgh);
30+
31+
cgh.parallel_for<class imatrix>(
32+
nd_range<2>({NDRangeM, NDRangeN * SG_SZ}, {1, 1 * SG_SZ}),
33+
[=](nd_item<2> spmd_item) [[intel::reqd_sub_group_size(SG_SZ)]]
34+
35+
{
36+
// The submatrix API has to be accessed by all the workitems in a
37+
// subgroup these functions will be called once by the subgroup no
38+
// code divergence between the workitems
39+
const auto global_idx = spmd_item.get_global_id(0);
40+
const auto global_idy = spmd_item.get_global_id(1);
41+
const auto sg_startx = global_idx - spmd_item.get_local_id(0);
42+
const auto sg_starty = global_idy - spmd_item.get_local_id(1);
43+
44+
ext::oneapi::sub_group sg = spmd_item.get_sub_group();
45+
joint_matrix<bfloat16, TM, TK> sub_a(sg);
46+
joint_matrix<bfloat16, TK, TN, matrix_layout::packed_b> sub_b(sg);
47+
joint_matrix<float, TM, TN> sub_c(sg);
48+
49+
joint_matrix_load(sg, sub_c,
50+
accC.get_pointer() + (sg_startx * TM) * N +
51+
sg_starty / SG_SZ * TN,
52+
N, matrix_layout::row_major);
53+
for (int k = 0; k < K / TK; k += 1) { //
54+
joint_matrix_load(
55+
sg, sub_a, accA.get_pointer() + (k * TK) * M + sg_startx * TM,
56+
M, matrix_layout::col_major);
57+
joint_matrix_load(sg, sub_b,
58+
accB.get_pointer() +
59+
(sg_starty / SG_SZ * TN) * K + k * TK,
60+
K, matrix_layout::col_major);
61+
sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c);
62+
}
63+
joint_matrix_store(sg, sub_c,
64+
accC.get_pointer() + (sg_startx * TM) * N +
65+
sg_starty / SG_SZ * TN,
66+
N, matrix_layout::row_major);
67+
}); // parallel for
68+
}).wait();
69+
}
70+
71+
static constexpr size_t MATRIX_M = TM * 2;
72+
static constexpr size_t MATRIX_N = TN * 2;
73+
static constexpr size_t MATRIX_K = TK * 2;
74+
bfloat16 A[MATRIX_K][MATRIX_M];
75+
bfloat16 B[MATRIX_N][MATRIX_K];
76+
unsigned short Aref[MATRIX_K][MATRIX_M];
77+
unsigned short Bref[MATRIX_N][MATRIX_K];
78+
float C[MATRIX_M][MATRIX_N];
79+
float D[MATRIX_M][MATRIX_N];
80+
81+
float make_fp32(short x) {
82+
unsigned int y = x;
83+
y = y << 16;
84+
float *res = reinterpret_cast<float *>(&y);
85+
return *res;
86+
}
87+
88+
unsigned short make_bf16(float x) {
89+
int *res = reinterpret_cast<int *>(&x);
90+
*res = *res >> 16;
91+
return (unsigned short)*res;
92+
}
93+
94+
void matrix_multiply_ref(int M, int N, int K) {
95+
for (int m = 0; m < M; m++)
96+
for (int n = 0; n < N; n++) {
97+
for (int k = 0; k < K; k++) {
98+
D[m][n] += make_fp32(Aref[k][m]) * make_fp32(Bref[n][k]);
99+
}
100+
}
101+
}
102+
103+
int main() {
104+
for (int i = 0; i < MATRIX_K; i++) {
105+
for (int j = 0; j < MATRIX_M; j++) {
106+
// bfloat16 is created using unsigned short since conversion from float to
107+
// bfloat16 is not supported on the host side yet
108+
A[i][j] = bfloat16::from_bits(make_bf16(1.0f * (i + j)));
109+
Aref[i][j] = make_bf16(1.0f * (i + j));
110+
}
111+
}
112+
for (int i = 0; i < MATRIX_N; i++) {
113+
for (int j = 0; j < MATRIX_K; j++) {
114+
B[i][j] = bfloat16::from_bits((make_bf16(2.0f * i + 3.0f * j)));
115+
Bref[i][j] = make_bf16(2.0f * i + 3.0f * j);
116+
}
117+
}
118+
for (int i = 0; i < MATRIX_M; i++) {
119+
for (int j = 0; j < MATRIX_N; j++) {
120+
C[i][j] = 1.0;
121+
D[i][j] = 1.0;
122+
}
123+
}
124+
125+
big_matrix<float, MATRIX_M, MATRIX_N> MC((float *)&C);
126+
big_matrix<float, MATRIX_M, MATRIX_N> MD((float *)&D);
127+
big_matrix<bfloat16, MATRIX_M, MATRIX_K> MA((bfloat16 *)&A);
128+
big_matrix<bfloat16, MATRIX_K, MATRIX_N> MB((bfloat16 *)&B);
129+
matrix_multiply(MC, MA, MB);
130+
matrix_multiply_ref(MATRIX_M, MATRIX_N, MATRIX_K);
131+
132+
bool res = true;
133+
for (int i = 0; i < MATRIX_M; i++) {
134+
for (int j = 0; j < MATRIX_N; j++) {
135+
if ((fabs(C[i][j]) - fabs(D[i][j])) > BF16_EPSILON)
136+
res = false;
137+
}
138+
}
139+
if (res)
140+
std::cout << "passed\n";
141+
else
142+
std::cout << "failed\n";
143+
}
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
//==--joint_matrix_bfloat16_rowmajorA_rowmajorB.cpp - DPC++ joint_matrix---==//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
// REQUIRES: matrix
9+
10+
// RUN: %clangxx -fsycl %s -o %t.out
11+
// RUN: %CPU_RUN_PLACEHOLDER %t.out
12+
// RUN: %GPU_RUN_PLACEHOLDER %t.out
13+
// CHECK: passed
14+
15+
// This tests support of row major layout for matrix B which does automatic VNNI
16+
// transform. This is currently only available on AMX
17+
18+
// XFAIL: gpu
19+
20+
#include <iostream>
21+
#include <sycl/sycl.hpp>
22+
23+
using namespace sycl;
24+
using namespace sycl::ext::oneapi::experimental::matrix;
25+
using bfloat16 = sycl::ext::oneapi::experimental::bfloat16;
26+
27+
#define SG_SZ 16
28+
29+
#include "joint_matrix_bfloat16_rowmajorA_rowmajorB_impl.hpp"
Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
#define TM 8
2+
#define TN SG_SZ
3+
#define TK 16
4+
#define BF16_EPSILON 0.00781250
5+
6+
template <typename T, size_t NUM_ROWS, size_t NUM_COLS> struct big_matrix {
7+
private:
8+
T *mat;
9+
10+
public:
11+
T *get_data() { return mat; }
12+
void set_data(T *data) { mat = data; }
13+
big_matrix(T *data) : mat(data) {}
14+
};
15+
16+
template <typename T1, typename T2, size_t M, size_t N, size_t K>
17+
void matrix_multiply(big_matrix<T1, M, N> &C, big_matrix<T2, M, K> &A,
18+
big_matrix<T2, K, N> &B) {
19+
size_t NDRangeM = M / TM;
20+
size_t NDRangeN = N / TN;
21+
buffer<bfloat16, 2> bufA(A.get_data(), range<2>(M, K));
22+
buffer<bfloat16, 2> bufB(B.get_data(), range<2>(K, N));
23+
buffer<float, 2> bufC((float *)C.get_data(), range<2>(M, N));
24+
25+
queue q;
26+
q.submit([&](handler &cgh) {
27+
auto accC = bufC.get_access<access::mode::read_write>(cgh);
28+
auto accA = bufA.get_access<access::mode::read_write>(cgh);
29+
auto accB = bufB.get_access<access::mode::read_write>(cgh);
30+
31+
cgh.parallel_for<class imatrix>(
32+
nd_range<2>({NDRangeM, NDRangeN * SG_SZ}, {1, 1 * SG_SZ}),
33+
[=](nd_item<2> spmd_item) [[intel::reqd_sub_group_size(SG_SZ)]]
34+
35+
{
36+
// The submatrix API has to be accessed by all the workitems in a
37+
// subgroup these functions will be called once by the subgroup no
38+
// code divergence between the workitems
39+
const auto global_idx = spmd_item.get_global_id(0);
40+
const auto global_idy = spmd_item.get_global_id(1);
41+
const auto sg_startx = global_idx - spmd_item.get_local_id(0);
42+
const auto sg_starty = global_idy - spmd_item.get_local_id(1);
43+
44+
ext::oneapi::sub_group sg = spmd_item.get_sub_group();
45+
joint_matrix<bfloat16, TM, TK> sub_a(sg);
46+
joint_matrix<bfloat16, TK, TN, matrix_layout::packed_b> sub_b(sg);
47+
joint_matrix<float, TM, TN> sub_c(sg);
48+
49+
joint_matrix_load(sg, sub_c,
50+
accC.get_pointer() + (sg_startx * TM) * N +
51+
sg_starty / SG_SZ * TN,
52+
N, matrix_layout::row_major);
53+
for (int k = 0; k < K / TK; k += 1) {
54+
joint_matrix_load(
55+
sg, sub_a, accA.get_pointer() + (sg_startx * TM) * K + k * TK,
56+
K, matrix_layout::row_major);
57+
joint_matrix_load(sg, sub_b,
58+
accB.get_pointer() + (k * TK) * (N) +
59+
sg_starty / SG_SZ * TN,
60+
N, matrix_layout::row_major);
61+
sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c);
62+
}
63+
joint_matrix_store(sg, sub_c,
64+
accC.get_pointer() + (sg_startx * TM) * N +
65+
sg_starty / SG_SZ * TN,
66+
N, matrix_layout::row_major);
67+
}); // parallel for
68+
}).wait();
69+
}
70+
71+
static constexpr size_t MATRIX_M = TM * 2;
72+
static constexpr size_t MATRIX_N = TN * 2;
73+
static constexpr size_t MATRIX_K = TK * 2;
74+
bfloat16 A[MATRIX_M][MATRIX_K];
75+
bfloat16 B[MATRIX_K][MATRIX_N];
76+
unsigned short Aref[MATRIX_M][MATRIX_K];
77+
unsigned short Bref[MATRIX_K][MATRIX_N];
78+
float C[MATRIX_M][MATRIX_N];
79+
float D[MATRIX_M][MATRIX_N];
80+
81+
float make_fp32(short x) {
82+
unsigned int y = x;
83+
y = y << 16;
84+
float *res = reinterpret_cast<float *>(&y);
85+
return *res;
86+
}
87+
88+
unsigned short make_bf16(float x) {
89+
int *res = reinterpret_cast<int *>(&x);
90+
*res = *res >> 16;
91+
return (unsigned short)*res;
92+
}
93+
94+
void matrix_multiply_ref(int M, int N, int K) {
95+
for (int m = 0; m < M; m++)
96+
for (int n = 0; n < N; n++) {
97+
for (int k = 0; k < K; k++) {
98+
D[m][n] += make_fp32(Aref[m][k]) * make_fp32(Bref[k][n]);
99+
}
100+
}
101+
}
102+
103+
int main() {
104+
for (int i = 0; i < MATRIX_M; i++) {
105+
for (int j = 0; j < MATRIX_K; j++) {
106+
// bfloat16 is created using unsigned short since conversion from float to
107+
// bfloat16 is not supported on the host side yet
108+
A[i][j] = bfloat16::from_bits(make_bf16(1.0f * (i + j)));
109+
Aref[i][j] = make_bf16(1.0f * (i + j));
110+
}
111+
}
112+
for (int i = 0; i < MATRIX_K /*/ 2*/; i++) {
113+
for (int j = 0; j < MATRIX_N /** 2*/; j++) {
114+
B[i][j] = bfloat16::from_bits((make_bf16(2.0f * i + 3.0f * j)));
115+
Bref[i][j] = make_bf16(2.0f * i + 3.0f * j);
116+
}
117+
}
118+
for (int i = 0; i < MATRIX_M; i++) {
119+
for (int j = 0; j < MATRIX_N; j++) {
120+
C[i][j] = 1.0;
121+
D[i][j] = 1.0;
122+
}
123+
}
124+
125+
big_matrix<float, MATRIX_M, MATRIX_N> MC((float *)&C);
126+
big_matrix<float, MATRIX_M, MATRIX_N> MD((float *)&D);
127+
big_matrix<bfloat16, MATRIX_M, MATRIX_K> MA((bfloat16 *)&A);
128+
big_matrix<bfloat16, MATRIX_K, MATRIX_N> MB((bfloat16 *)&B);
129+
matrix_multiply(MC, MA, MB);
130+
matrix_multiply_ref(MATRIX_M, MATRIX_N, MATRIX_K);
131+
132+
bool res = true;
133+
for (int i = 0; i < MATRIX_M; i++) {
134+
for (int j = 0; j < MATRIX_N; j++) {
135+
if ((fabs(C[i][j]) - fabs(D[i][j])) > BF16_EPSILON)
136+
res = false;
137+
}
138+
}
139+
if (res)
140+
std::cout << "passed\n";
141+
else
142+
std::cout << "failed\n";
143+
}
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
//==----- joint_matrix_int8_colmajorA_colmajorB.cpp - DPC++ joint_matrix---==//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
// REQUIRES: matrix
9+
10+
// RUN: %clangxx -fsycl %s -o %t.out
11+
// RUN: %CPU_RUN_PLACEHOLDER %t.out
12+
// RUN: %GPU_RUN_PLACEHOLDER %t.out
13+
// CHECK: passed
14+
15+
// This tests support of col major layout for matrix B which does transpose and
16+
// then VNNI transform. This is currently only available on AMX
17+
18+
// XFAIL: gpu
19+
20+
#include <iostream>
21+
#include <sycl/sycl.hpp>
22+
23+
using namespace sycl;
24+
using namespace sycl::ext::oneapi::experimental::matrix;
25+
26+
#define SG_SZ 16
27+
28+
#include "joint_matrix_int8_colmajorA_colmajorB_impl.hpp"

0 commit comments

Comments
 (0)