Skip to content

Commit 7c293e2

Browse files
[SYCL] Add a leaf limit to the execution graph (#1070)
This patch adds a leaf limit (per memory object) for the command execution graph in order to avoid leaf bloat in applications that have an overwhelming number of command groups that can be executed in parallel. Limiting the number of leaves is necessary for reducing performance overhead of regular cleanup of finished command nodes. Whenever the limit is exceeded, the oldest leaf is added as a dependency of the new one instead. Signed-off-by: Sergey Semenov <[email protected]>
1 parent 0bac4da commit 7c293e2

File tree

6 files changed

+283
-16
lines changed

6 files changed

+283
-16
lines changed
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
//==---------------- circular_buffer.hpp - Circular buffer -----------------==//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#pragma once
10+
11+
#include <CL/sycl/detail/defines.hpp>
12+
13+
#include <deque>
14+
#include <utility>
15+
16+
__SYCL_INLINE namespace cl {
17+
namespace sycl {
18+
namespace detail {
19+
20+
// A partial implementation of a circular buffer: once its capacity is full,
21+
// new data overwrites the old.
22+
template <typename T> class CircularBuffer {
23+
public:
24+
explicit CircularBuffer(size_t Capacity) : MCapacity{Capacity} {};
25+
26+
using value_type = T;
27+
using pointer = T *;
28+
using const_pointer = const T *;
29+
using reference = T &;
30+
using const_reference = const T &;
31+
32+
using iterator = typename std::deque<T>::iterator;
33+
using const_iterator = typename std::deque<T>::const_iterator;
34+
35+
iterator begin() { return MValues.begin(); }
36+
37+
const_iterator begin() const { return MValues.begin(); }
38+
39+
iterator end() { return MValues.end(); }
40+
41+
const_iterator end() const { return MValues.end(); }
42+
43+
reference front() { return MValues.front(); }
44+
45+
const_reference front() const { return MValues.front(); }
46+
47+
reference back() { return MValues.back(); }
48+
49+
const_reference back() const { return MValues.back(); }
50+
51+
reference operator[](size_t Idx) { return MValues[Idx]; }
52+
53+
const_reference operator[](size_t Idx) const { return MValues[Idx]; }
54+
55+
size_t size() const { return MValues.size(); }
56+
57+
size_t capacity() const { return MCapacity; }
58+
59+
bool empty() const { return MValues.empty(); };
60+
61+
bool full() const { return MValues.size() == MCapacity; };
62+
63+
void push_back(T Val) {
64+
if (MValues.size() == MCapacity)
65+
MValues.pop_front();
66+
MValues.push_back(std::move(Val));
67+
}
68+
69+
void push_front(T Val) {
70+
if (MValues.size() == MCapacity)
71+
MValues.pop_back();
72+
MValues.push_front(std::move(Val));
73+
}
74+
75+
void pop_back() { MValues.pop_back(); }
76+
77+
void pop_front() { MValues.pop_front(); }
78+
79+
void erase(const_iterator Pos) { MValues.erase(Pos); }
80+
81+
void erase(const_iterator First, const_iterator Last) {
82+
MValues.erase(First, Last);
83+
}
84+
85+
void clear() { MValues.clear(); }
86+
87+
private:
88+
// Deque is used as the underlying container for double-ended push/pop
89+
// operations and built-in iterator support. Frequent memory allocations
90+
// and deallocations are a concern, switching to an array/vector might be a
91+
// worthwhile optimization.
92+
std::deque<T> MValues;
93+
const size_t MCapacity;
94+
};
95+
96+
} // namespace detail
97+
} // namespace sycl
98+
} // namespace cl

sycl/include/CL/sycl/detail/scheduler/scheduler.hpp

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#pragma once
1010

1111
#include <CL/sycl/detail/cg.hpp>
12+
#include <CL/sycl/detail/circular_buffer.hpp>
1213
#include <CL/sycl/detail/scheduler/commands.hpp>
1314
#include <CL/sycl/detail/sycl_mem_obj_i.hpp>
1415

@@ -32,21 +33,25 @@ using ContextImplPtr = std::shared_ptr<detail::context_impl>;
3233
// The MemObjRecord is created for each memory object used in command
3334
// groups. There should be only one MemObjRecord for SYCL memory object.
3435
struct MemObjRecord {
36+
MemObjRecord(ContextImplPtr CurContext, size_t LeafLimit)
37+
: MReadLeaves{LeafLimit}, MWriteLeaves{LeafLimit}, MCurContext{
38+
CurContext} {}
39+
3540
// Contains all allocation commands for the memory object.
3641
std::vector<AllocaCommandBase *> MAllocaCommands;
3742

3843
// Contains latest read only commands working with memory object.
39-
std::vector<Command *> MReadLeaves;
44+
CircularBuffer<Command *> MReadLeaves;
4045

4146
// Contains latest write commands working with memory object.
42-
std::vector<Command *> MWriteLeaves;
47+
CircularBuffer<Command *> MWriteLeaves;
4348

4449
// The context which has the latest state of the memory object.
4550
ContextImplPtr MCurContext;
4651

4752
// The flag indicates that the content of the memory object was/will be
4853
// modified. Used while deciding if copy back needed.
49-
bool MMemModified;
54+
bool MMemModified = false;
5055
};
5156

5257
class Scheduler {
@@ -165,6 +170,9 @@ class Scheduler {
165170
std::set<Command *> findDepsForReq(MemObjRecord *Record, Requirement *Req,
166171
const ContextImplPtr &Context);
167172

173+
// Finds a command dependency corresponding to the record
174+
DepDesc findDepForRecord(Command *Cmd, MemObjRecord *Record);
175+
168176
// Searches for suitable alloca in memory record.
169177
AllocaCommandBase *findAllocaForReq(MemObjRecord *Record, Requirement *Req,
170178
const ContextImplPtr &Context);

sycl/source/detail/scheduler/graph_builder.cpp

Lines changed: 34 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -120,11 +120,9 @@ Scheduler::GraphBuilder::getOrInsertMemObjRecord(const QueueImplPtr &Queue,
120120
if (nullptr != Record)
121121
return Record;
122122

123-
MemObject->MRecord.reset(new MemObjRecord{/*MAllocaCommands*/ {},
124-
/*MReadLeaves*/ {},
125-
/*MWriteLeaves*/ {},
126-
Queue->getContextImplPtr(),
127-
/*MMemModified*/ false});
123+
const size_t LeafLimit = 8;
124+
MemObject->MRecord.reset(
125+
new MemObjRecord{Queue->getContextImplPtr(), LeafLimit});
128126

129127
MMemObjs.push_back(MemObject);
130128
return MemObject->MRecord.get();
@@ -153,10 +151,22 @@ void Scheduler::GraphBuilder::UpdateLeaves(const std::set<Command *> &Cmds,
153151
void Scheduler::GraphBuilder::AddNodeToLeaves(MemObjRecord *Record,
154152
Command *Cmd,
155153
access::mode AccessMode) {
156-
if (AccessMode == access::mode::read)
157-
Record->MReadLeaves.push_back(Cmd);
158-
else
159-
Record->MWriteLeaves.push_back(Cmd);
154+
CircularBuffer<Command *> &Leaves{AccessMode == access::mode::read
155+
? Record->MReadLeaves
156+
: Record->MWriteLeaves};
157+
if (Leaves.full()) {
158+
Command *OldLeaf = Leaves.front();
159+
// TODO this is a workaround for duplicate leaves, remove once fixed
160+
if (OldLeaf == Cmd)
161+
return;
162+
// Add the old leaf as a dependency for the new one by duplicating one of
163+
// the requirements for the current record
164+
DepDesc Dep = findDepForRecord(Cmd, Record);
165+
Dep.MDepCommand = OldLeaf;
166+
Cmd->addDep(Dep);
167+
OldLeaf->addUser(Cmd);
168+
}
169+
Leaves.push_back(Cmd);
160170
}
161171

162172
UpdateHostRequirementCommand *Scheduler::GraphBuilder::insertUpdateHostReqCmd(
@@ -389,9 +399,8 @@ Scheduler::GraphBuilder::findDepsForReq(MemObjRecord *Record, Requirement *Req,
389399
std::set<Command *> Visited;
390400
const bool ReadOnlyReq = Req->MAccessMode == access::mode::read;
391401

392-
std::vector<Command *> ToAnalyze;
393-
394-
ToAnalyze = Record->MWriteLeaves;
402+
std::vector<Command *> ToAnalyze{Record->MWriteLeaves.begin(),
403+
Record->MWriteLeaves.end()};
395404

396405
if (!ReadOnlyReq)
397406
ToAnalyze.insert(ToAnalyze.begin(), Record->MReadLeaves.begin(),
@@ -436,6 +445,19 @@ Scheduler::GraphBuilder::findDepsForReq(MemObjRecord *Record, Requirement *Req,
436445
return RetDeps;
437446
}
438447

448+
// A helper function for finding a command dependency on a specific memory
449+
// object
450+
DepDesc Scheduler::GraphBuilder::findDepForRecord(Command *Cmd,
451+
MemObjRecord *Record) {
452+
for (const DepDesc &DD : Cmd->MDeps) {
453+
if (getMemObjRecord(DD.MDepRequirement->MSYCLMemObj) == Record) {
454+
return DD;
455+
}
456+
}
457+
assert(false && "No dependency found for a leaf of the record");
458+
return {nullptr, nullptr, nullptr};
459+
}
460+
439461
// The function searches for the alloca command matching context and
440462
// requirement.
441463
AllocaCommandBase *Scheduler::GraphBuilder::findAllocaForReq(

sycl/source/detail/scheduler/scheduler.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ EventImplPtr Scheduler::addHostAccessor(Requirement *Req) {
146146
void Scheduler::releaseHostAccessor(Requirement *Req) {
147147
Req->MBlockedCmd->MCanEnqueue = true;
148148
MemObjRecord* Record = Req->MSYCLMemObj->MRecord.get();
149-
auto EnqueueLeaves = [](std::vector<Command *> &Leaves) {
149+
auto EnqueueLeaves = [](CircularBuffer<Command *> &Leaves) {
150150
for (Command *Cmd : Leaves) {
151151
EnqueueResultT Res;
152152
bool Enqueued = GraphProcessor::enqueueCommand(Cmd, Res);
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// RUN: %clangxx -fsycl %s -o %t.out
2+
// RUN: %t.out
3+
4+
#include <CL/sycl/detail/circular_buffer.hpp>
5+
6+
#include <algorithm>
7+
#include <cassert>
8+
#include <vector>
9+
10+
// This test contains basic checks for cl::sycl::detail::CircularBuffer
11+
void checkEquality(const cl::sycl::detail::CircularBuffer<int> &CB,
12+
const std::vector<int> &V) {
13+
assert(std::equal(CB.begin(), CB.end(), V.begin()));
14+
}
15+
16+
int main() {
17+
const size_t Capacity = 6;
18+
cl::sycl::detail::CircularBuffer<int> CB{Capacity};
19+
assert(CB.capacity() == Capacity);
20+
assert(CB.empty());
21+
22+
int nextValue = 0;
23+
for (; nextValue < Capacity; ++nextValue) {
24+
assert(CB.size() == nextValue);
25+
CB.push_back(nextValue);
26+
}
27+
assert(CB.full() && CB.size() == CB.capacity());
28+
checkEquality(CB, {0, 1, 2, 3, 4, 5});
29+
30+
CB.push_back(nextValue++);
31+
checkEquality(CB, {1, 2, 3, 4, 5, 6});
32+
CB.push_front(nextValue++);
33+
checkEquality(CB, {7, 1, 2, 3, 4, 5});
34+
35+
assert(CB.front() == 7);
36+
assert(CB.back() == 5);
37+
38+
CB.erase(CB.begin() + 2);
39+
checkEquality(CB, {7, 1, 3, 4, 5});
40+
CB.erase(CB.begin(), CB.begin() + 2);
41+
checkEquality(CB, {3, 4, 5});
42+
43+
CB.pop_back();
44+
checkEquality(CB, {3, 4});
45+
CB.pop_front();
46+
checkEquality(CB, {4});
47+
}

sycl/test/scheduler/LeafLimit.cpp

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
// RUN: %clangxx -fsycl %s -o %t.out
2+
// RUN: %t.out
3+
#include <CL/sycl.hpp>
4+
5+
#include <memory>
6+
#include <vector>
7+
8+
// This test checks the leaf limit imposed on the execution graph
9+
10+
using namespace cl::sycl;
11+
12+
class FakeCommand : public detail::Command {
13+
public:
14+
FakeCommand(detail::QueueImplPtr Queue, detail::Requirement Req)
15+
: Command{detail::Command::ALLOCA, Queue}, MRequirement{std::move(Req)} {}
16+
17+
void printDot(std::ostream &Stream) const override {}
18+
19+
const detail::Requirement *getRequirement() const final {
20+
return &MRequirement;
21+
};
22+
23+
cl_int enqueueImp() override { return MRetVal; }
24+
25+
cl_int MRetVal = CL_SUCCESS;
26+
27+
protected:
28+
detail::Requirement MRequirement;
29+
};
30+
31+
class TestScheduler : public detail::Scheduler {
32+
public:
33+
void AddNodeToLeaves(detail::MemObjRecord *Rec, detail::Command *Cmd,
34+
access::mode Mode) {
35+
return MGraphBuilder.AddNodeToLeaves(Rec, Cmd, Mode);
36+
}
37+
38+
detail::MemObjRecord *
39+
getOrInsertMemObjRecord(const detail::QueueImplPtr &Queue,
40+
detail::Requirement *Req) {
41+
return MGraphBuilder.getOrInsertMemObjRecord(Queue, Req);
42+
}
43+
};
44+
45+
int main() {
46+
TestScheduler TS;
47+
queue Queue;
48+
buffer<int, 1> Buf(range<1>(1));
49+
detail::Requirement FakeReq{{0, 0, 0},
50+
{0, 0, 0},
51+
{0, 0, 0},
52+
access::mode::read_write,
53+
detail::getSyclObjImpl(Buf).get(),
54+
0,
55+
0,
56+
0};
57+
FakeCommand *FakeDepCmd =
58+
new FakeCommand(detail::getSyclObjImpl(Queue), FakeReq);
59+
detail::MemObjRecord *Rec =
60+
TS.getOrInsertMemObjRecord(detail::getSyclObjImpl(Queue), &FakeReq);
61+
62+
// Create commands that will be added as leaves exceeding the limit by 1
63+
std::vector<FakeCommand *> LeavesToAdd;
64+
for (size_t i = 0; i < Rec->MWriteLeaves.capacity() + 1; ++i) {
65+
LeavesToAdd.push_back(
66+
new FakeCommand(detail::getSyclObjImpl(Queue), FakeReq));
67+
}
68+
// Create edges: all soon-to-be leaves are direct users of FakeDep
69+
for (auto Leaf : LeavesToAdd) {
70+
FakeDepCmd->addUser(Leaf);
71+
Leaf->addDep(detail::DepDesc{FakeDepCmd, Leaf->getRequirement(), nullptr});
72+
}
73+
// Add edges as leaves and exceed the leaf limit
74+
for (auto LeafPtr : LeavesToAdd) {
75+
TS.AddNodeToLeaves(Rec, LeafPtr, access::mode::read_write);
76+
}
77+
// Check that the oldest leaf has been removed from the leaf list
78+
// and added as a dependency of the newest one instead
79+
const detail::CircularBuffer<detail::Command *> &Leaves = Rec->MWriteLeaves;
80+
assert(std::find(Leaves.begin(), Leaves.end(), LeavesToAdd.front()) ==
81+
Leaves.end());
82+
for (size_t i = 1; i < LeavesToAdd.size(); ++i) {
83+
assert(std::find(Leaves.begin(), Leaves.end(), LeavesToAdd[i]) !=
84+
Leaves.end());
85+
}
86+
FakeCommand *OldestLeaf = LeavesToAdd.front();
87+
FakeCommand *NewestLeaf = LeavesToAdd.back();
88+
assert(OldestLeaf->MUsers.size() == 1);
89+
assert(OldestLeaf->MUsers[0] == NewestLeaf);
90+
assert(NewestLeaf->MDeps.size() == 2);
91+
assert(NewestLeaf->MDeps[1].MDepCommand == OldestLeaf);
92+
}

0 commit comments

Comments
 (0)