-
Notifications
You must be signed in to change notification settings - Fork 12.2k
[SYCL] Add oneDNN primitive support #9091
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
9 commits
Select commit
Hold shift + click to select a range
4cffe91
add onednn
luoyu-intel 3d0a64f
add sycl_f16
luoyu-intel 4dc5515
add dnnl stream
luoyu-intel c751e65
add engine map
luoyu-intel b830685
fix
luoyu-intel 267af4e
format
luoyu-intel af1b276
use dnnl for intel only
luoyu-intel 2ee02b2
use fp16fp16fp16
luoyu-intel 3f5eaea
update doc
luoyu-intel File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
// | ||
// MIT license | ||
// Copyright (C) 2024 Intel Corporation | ||
// SPDX-License-Identifier: MIT | ||
// | ||
|
||
// | ||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||
// See https://llvm.org/LICENSE.txt for license information. | ||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
// | ||
|
||
#ifndef GGML_SYCL_GEMM_HPP | ||
#define GGML_SYCL_GEMM_HPP | ||
|
||
#include <fstream> | ||
#include <iostream> | ||
|
||
#include "ggml-sycl.h" | ||
|
||
#if GGML_SYCL_DNNL | ||
|
||
#include "dnnl.hpp" | ||
#include "dnnl_sycl.hpp" | ||
|
||
class DnnlGemmWrapper { | ||
public: | ||
using dt = dnnl::memory::data_type; | ||
using tag = dnnl::memory::format_tag; | ||
|
||
template<typename T> | ||
static constexpr dt to_dt() { | ||
if constexpr (std::is_same_v<T, float>) return dt::f32; | ||
else if constexpr (std::is_same_v<T, sycl::half>) return dt::f16; | ||
else static_assert(0); | ||
} | ||
|
||
static inline void row_gemm(sycl::queue& q, bool a_trans, | ||
bool b_trans, int m, int n, int k, | ||
const void* a, dt at, const void* b, dt bt, void* c, dt ct) | ||
{ | ||
// Get the device associated with the queue | ||
sycl::device dev = q.get_device(); | ||
// Get the context associated with the queue | ||
sycl::context ctx = q.get_context(); | ||
const dnnl::engine eng = dnnl::sycl_interop::make_engine(dev, ctx); | ||
const dnnl::stream stream = dnnl::sycl_interop::make_stream(eng, q); | ||
dnnl::memory::dims a_dims = { m, k }; | ||
dnnl::memory::dims b_dims = { k, n }; | ||
dnnl::memory::dims c_dims = { m, n }; | ||
const auto a_in_md = dnnl::memory::desc(a_dims, at, a_trans ? tag::ba : tag::ab); | ||
const auto b_in_md = dnnl::memory::desc(b_dims, bt, b_trans ? tag::ba : tag::ab); | ||
const auto c_md = dnnl::memory::desc(c_dims, ct, tag::ab); | ||
auto a_mem = dnnl::memory(a_in_md, eng, (void*)a); | ||
auto b_mem = dnnl::memory(b_in_md, eng, (void*)b); | ||
auto matmul_pd = dnnl::matmul::primitive_desc(eng, a_in_md, b_in_md, c_md); | ||
auto c_mem = dnnl::memory(matmul_pd.dst_desc(), eng, c); | ||
|
||
// Create the primitive. | ||
auto matmul_prim = dnnl::matmul(matmul_pd); | ||
// Primitive arguments. | ||
std::unordered_map<int, dnnl::memory> matmul_args; | ||
matmul_args.insert({ DNNL_ARG_SRC, a_mem }); | ||
matmul_args.insert({ DNNL_ARG_WEIGHTS, b_mem }); | ||
matmul_args.insert({ DNNL_ARG_DST, c_mem }); | ||
|
||
matmul_prim.execute(stream, matmul_args); | ||
} | ||
|
||
|
||
static inline void row_gemm(const dnnl::stream& stream, bool a_trans, | ||
bool b_trans, int m, int n, int k, | ||
const void* a, dt at, const void* b, dt bt, void* c, dt ct) | ||
{ | ||
auto const eng = stream.get_engine(); | ||
dnnl::memory::dims a_dims = { m, k }; | ||
dnnl::memory::dims b_dims = { k, n }; | ||
dnnl::memory::dims c_dims = { m, n }; | ||
const auto a_in_md = dnnl::memory::desc(a_dims, at, a_trans ? tag::ba : tag::ab); | ||
const auto b_in_md = dnnl::memory::desc(b_dims, bt, b_trans ? tag::ba : tag::ab); | ||
const auto c_md = dnnl::memory::desc(c_dims, ct, tag::ab); | ||
auto a_mem = dnnl::memory(a_in_md, eng, (void*)a); | ||
auto b_mem = dnnl::memory(b_in_md, eng, (void*)b); | ||
auto matmul_pd = dnnl::matmul::primitive_desc(eng, a_in_md, b_in_md, c_md); | ||
auto c_mem = dnnl::memory(matmul_pd.dst_desc(), eng, c); | ||
|
||
// Create the primitive. | ||
auto matmul_prim = dnnl::matmul(matmul_pd); | ||
// Primitive arguments. | ||
std::unordered_map<int, dnnl::memory> matmul_args; | ||
matmul_args.insert({ DNNL_ARG_SRC, a_mem }); | ||
matmul_args.insert({ DNNL_ARG_WEIGHTS, b_mem }); | ||
matmul_args.insert({ DNNL_ARG_DST, c_mem }); | ||
|
||
matmul_prim.execute(stream, matmul_args); | ||
} | ||
}; | ||
|
||
#endif | ||
|
||
#endif // GGML_SYCL_GEMM_HPP |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.