@@ -20,12 +20,6 @@ __SYCL_INLINE_NAMESPACE(cl) {
20
20
namespace sycl {
21
21
namespace detail {
22
22
23
- EventImplPtr addHostAccessorToSchedulerInstance (Requirement *Req,
24
- const bool destructor) {
25
- return cl::sycl::detail::Scheduler::getInstance ().
26
- addHostAccessor (Req, destructor);
27
- }
28
-
29
23
void Scheduler::waitForRecordToFinish (MemObjRecord *Record) {
30
24
#ifdef XPTI_ENABLE_INSTRUMENTATION
31
25
// Will contain the list of dependencies for the Release Command
@@ -72,7 +66,7 @@ EventImplPtr Scheduler::addCG(std::unique_ptr<detail::CG> CommandGroup,
72
66
Command *NewCmd = nullptr ;
73
67
const bool IsKernel = CommandGroup->getType () == CG::KERNEL;
74
68
{
75
- std::lock_guard<std::mutex > Lock (MGraphLock);
69
+ std::lock_guard<std::shared_timed_mutex > Lock (MGraphLock);
76
70
77
71
switch (CommandGroup->getType ()) {
78
72
case CG::UPDATE_HOST:
@@ -97,7 +91,7 @@ EventImplPtr Scheduler::addCG(std::unique_ptr<detail::CG> CommandGroup,
97
91
}
98
92
99
93
EventImplPtr Scheduler::addCopyBack (Requirement *Req) {
100
- std::lock_guard<std::mutex> lock (MGraphLock);
94
+ std::lock_guard<std::shared_timed_mutex> Lock (MGraphLock);
101
95
Command *NewCmd = MGraphBuilder.addCopyBack (Req);
102
96
// Command was not creted because there were no operations with
103
97
// buffer.
@@ -121,35 +115,39 @@ EventImplPtr Scheduler::addCopyBack(Requirement *Req) {
121
115
// else that has no priority set, or has a priority higher than 2000).
122
116
Scheduler Scheduler::instance __attribute__ ((init_priority(2000 )));
123
117
#else
124
- #pragma warning(disable: 4073)
118
+ #pragma warning(disable : 4073)
125
119
#pragma init_seg(lib)
126
120
Scheduler Scheduler::instance;
127
121
#endif
128
122
129
- Scheduler &Scheduler::getInstance () {
130
- return instance;
131
- }
123
+ Scheduler &Scheduler::getInstance () { return instance; }
132
124
133
125
std::vector<EventImplPtr> Scheduler::getWaitList (EventImplPtr Event) {
134
- std::lock_guard <std::mutex> lock (MGraphLock);
126
+ std::shared_lock <std::shared_timed_mutex> Lock (MGraphLock);
135
127
return GraphProcessor::getWaitList (std::move (Event));
136
128
}
137
129
138
130
void Scheduler::waitForEvent (EventImplPtr Event) {
131
+ std::shared_lock<std::shared_timed_mutex> Lock (MGraphLock);
139
132
GraphProcessor::waitForEvent (std::move (Event));
140
133
}
141
134
142
135
void Scheduler::cleanupFinishedCommands (EventImplPtr FinishedEvent) {
143
- std::lock_guard<std::mutex> lock (MGraphLock);
144
- Command *FinishedCmd = static_cast <Command *>(FinishedEvent->getCommand ());
145
- // The command might have been cleaned up (and set to nullptr) by another
146
- // thread
147
- if (FinishedCmd)
148
- MGraphBuilder.cleanupFinishedCommands (FinishedCmd);
136
+ // Avoiding deadlock situation, where one thread is in the process of
137
+ // enqueueing (with a locked mutex) a currently blocked task that waits for
138
+ // another thread which is stuck at attempting cleanup.
139
+ std::unique_lock<std::shared_timed_mutex> Lock (MGraphLock, std::try_to_lock);
140
+ if (Lock.owns_lock ()) {
141
+ Command *FinishedCmd = static_cast <Command *>(FinishedEvent->getCommand ());
142
+ // The command might have been cleaned up (and set to nullptr) by another
143
+ // thread
144
+ if (FinishedCmd)
145
+ MGraphBuilder.cleanupFinishedCommands (FinishedCmd);
146
+ }
149
147
}
150
148
151
149
void Scheduler::removeMemoryObject (detail::SYCLMemObjI *MemObj) {
152
- std::lock_guard<std::mutex> lock (MGraphLock);
150
+ std::lock_guard<std::shared_timed_mutex> Lock (MGraphLock);
153
151
154
152
MemObjRecord *Record = MGraphBuilder.getMemObjRecord (MemObj);
155
153
if (!Record)
@@ -163,7 +161,7 @@ void Scheduler::removeMemoryObject(detail::SYCLMemObjI *MemObj) {
163
161
164
162
EventImplPtr Scheduler::addHostAccessor (Requirement *Req,
165
163
const bool destructor) {
166
- std::lock_guard<std::mutex> lock (MGraphLock);
164
+ std::lock_guard<std::shared_timed_mutex> Lock (MGraphLock);
167
165
168
166
Command *NewCmd = MGraphBuilder.addHostAccessor (Req, destructor);
169
167
@@ -178,7 +176,8 @@ EventImplPtr Scheduler::addHostAccessor(Requirement *Req,
178
176
179
177
void Scheduler::releaseHostAccessor (Requirement *Req) {
180
178
Req->MBlockedCmd ->MEnqueueStatus = EnqueueResultT::SyclEnqueueReady;
181
- MemObjRecord* Record = Req->MSYCLMemObj ->MRecord .get ();
179
+ std::shared_lock<std::shared_timed_mutex> Lock (MGraphLock);
180
+ MemObjRecord *Record = Req->MSYCLMemObj ->MRecord .get ();
182
181
auto EnqueueLeaves = [](CircularBuffer<Command *> &Leaves) {
183
182
for (Command *Cmd : Leaves) {
184
183
EnqueueResultT Res;
@@ -193,9 +192,9 @@ void Scheduler::releaseHostAccessor(Requirement *Req) {
193
192
194
193
Scheduler::Scheduler () {
195
194
sycl::device HostDevice;
196
- DefaultHostQueue = QueueImplPtr (new queue_impl (
197
- detail::getSyclObjImpl (HostDevice), /* AsyncHandler=*/ {},
198
- QueueOrder::Ordered, /* PropList=*/ {}));
195
+ DefaultHostQueue = QueueImplPtr (
196
+ new queue_impl ( detail::getSyclObjImpl (HostDevice), /* AsyncHandler=*/ {},
197
+ QueueOrder::Ordered, /* PropList=*/ {}));
199
198
}
200
199
201
200
} // namespace detail
0 commit comments