@@ -52,7 +52,7 @@ static bool IsSuitableSubReq(const Requirement *Req) {
52
52
return Req->MIsSubBuffer ;
53
53
}
54
54
55
- static bool isOnSameContext (const ContextImplPtr Context, queue_impl *Queue) {
55
+ static bool isOnSameContext (context_impl * Context, queue_impl *Queue) {
56
56
// Covers case for host usage (nullptr == nullptr) and existing device
57
57
// contexts comparison.
58
58
return Context == queue_impl::getContext (Queue);
@@ -233,8 +233,8 @@ Scheduler::GraphBuilder::getOrInsertMemObjRecord(queue_impl *Queue,
233
233
" shouldn't lead to any enqueuing (no linked "
234
234
" alloca or exceeding the leaf limit)." );
235
235
} else
236
- MemObject->MRecord .reset (new MemObjRecord{
237
- queue_impl::getContext (Queue). get (), LeafLimit, AllocateDependency});
236
+ MemObject->MRecord .reset (new MemObjRecord{queue_impl::getContext (Queue),
237
+ LeafLimit, AllocateDependency});
238
238
239
239
MMemObjs.push_back (MemObject);
240
240
return MemObject->MRecord .get ();
@@ -346,15 +346,16 @@ Scheduler::GraphBuilder::insertMemoryMove(MemObjRecord *Record,
346
346
}
347
347
348
348
AllocaCommandBase *AllocaCmdSrc =
349
- findAllocaForReq (Record, Req, Record->MCurContext );
349
+ findAllocaForReq (Record, Req, Record->getCurContext () );
350
350
if (!AllocaCmdSrc && IsSuitableSubReq (Req)) {
351
351
// Since no alloca command for the sub buffer requirement was found in the
352
352
// current context, need to find a parent alloca command for it (it must be
353
353
// there)
354
354
auto IsSuitableAlloca = [Record](AllocaCommandBase *AllocaCmd) {
355
- bool Res = isOnSameContext (Record->MCurContext , AllocaCmd->getQueue ()) &&
356
- // Looking for a parent buffer alloca command
357
- AllocaCmd->getType () == Command::CommandType::ALLOCA;
355
+ bool Res =
356
+ isOnSameContext (Record->getCurContext (), AllocaCmd->getQueue ()) &&
357
+ // Looking for a parent buffer alloca command
358
+ AllocaCmd->getType () == Command::CommandType::ALLOCA;
358
359
return Res;
359
360
};
360
361
const auto It =
@@ -384,10 +385,9 @@ Scheduler::GraphBuilder::insertMemoryMove(MemObjRecord *Record,
384
385
NewCmd = insertMapUnmapForLinkedCmds (AllocaCmdSrc, AllocaCmdDst, MapMode);
385
386
Record->MHostAccess = MapMode;
386
387
} else {
387
-
388
388
if ((Req->MAccessMode == access::mode::discard_write) ||
389
389
(Req->MAccessMode == access::mode::discard_read_write)) {
390
- Record->MCurContext = Context;
390
+ Record->setCurContext ( Context) ;
391
391
return nullptr ;
392
392
} else {
393
393
// Full copy of buffer is needed to avoid loss of data that may be caused
@@ -409,7 +409,7 @@ Scheduler::GraphBuilder::insertMemoryMove(MemObjRecord *Record,
409
409
addNodeToLeaves (Record, NewCmd, access::mode::read_write, ToEnqueue);
410
410
for (Command *Cmd : ToCleanUp)
411
411
cleanupCommand (Cmd);
412
- Record->MCurContext = Context;
412
+ Record->setCurContext ( Context) ;
413
413
return NewCmd;
414
414
}
415
415
@@ -422,7 +422,8 @@ Command *Scheduler::GraphBuilder::remapMemoryObject(
422
422
AllocaCommandBase *LinkedAllocaCmd = HostAllocaCmd->MLinkedAllocaCmd ;
423
423
assert (LinkedAllocaCmd && " Linked alloca command expected" );
424
424
425
- std::set<Command *> Deps = findDepsForReq (Record, Req, Record->MCurContext );
425
+ std::set<Command *> Deps =
426
+ findDepsForReq (Record, Req, Record->getCurContext ());
426
427
427
428
UnMapMemObject *UnMapCmd = new UnMapMemObject (
428
429
LinkedAllocaCmd, *LinkedAllocaCmd->getRequirement (),
@@ -473,7 +474,7 @@ Scheduler::GraphBuilder::addCopyBack(Requirement *Req,
473
474
474
475
std::set<Command *> Deps = findDepsForReq (Record, Req, nullptr );
475
476
AllocaCommandBase *SrcAllocaCmd =
476
- findAllocaForReq (Record, Req, Record->MCurContext );
477
+ findAllocaForReq (Record, Req, Record->getCurContext () );
477
478
478
479
auto MemCpyCmdUniquePtr = std::make_unique<MemCpyCommandHost>(
479
480
*SrcAllocaCmd->getRequirement (), SrcAllocaCmd, *Req, &Req->MData ,
@@ -525,7 +526,7 @@ Scheduler::GraphBuilder::addHostAccessor(Requirement *Req,
525
526
AllocaCommandBase *HostAllocaCmd =
526
527
getOrCreateAllocaForReq (Record, Req, nullptr , ToEnqueue);
527
528
528
- if (isOnSameContext (Record->MCurContext , HostAllocaCmd->getQueue ())) {
529
+ if (isOnSameContext (Record->getCurContext () , HostAllocaCmd->getQueue ())) {
529
530
if (!isAccessModeAllowed (Req->MAccessMode , Record->MHostAccess )) {
530
531
remapMemoryObject (Record, Req,
531
532
Req->MIsSubBuffer ? (static_cast <AllocaSubBufCommand *>(
@@ -571,10 +572,8 @@ Command *Scheduler::GraphBuilder::addCGUpdateHost(
571
572
// / 1. New and examined commands only read -> can bypass
572
573
// / 2. New and examined commands has non-overlapping requirements -> can bypass
573
574
// / 3. New and examined commands have different contexts -> cannot bypass
574
- std::set<Command *>
575
- Scheduler::GraphBuilder::findDepsForReq (MemObjRecord *Record,
576
- const Requirement *Req,
577
- const ContextImplPtr &Context) {
575
+ std::set<Command *> Scheduler::GraphBuilder::findDepsForReq (
576
+ MemObjRecord *Record, const Requirement *Req, context_impl *Context) {
578
577
std::set<Command *> RetDeps;
579
578
std::vector<Command *> Visited;
580
579
const bool ReadOnlyReq = Req->MAccessMode == access::mode::read;
@@ -644,7 +643,7 @@ DepDesc Scheduler::GraphBuilder::findDepForRecord(Command *Cmd,
644
643
// The function searches for the alloca command matching context and
645
644
// requirement.
646
645
AllocaCommandBase *Scheduler::GraphBuilder::findAllocaForReq (
647
- MemObjRecord *Record, const Requirement *Req, const ContextImplPtr & Context,
646
+ MemObjRecord *Record, const Requirement *Req, context_impl * Context,
648
647
bool AllowConst) {
649
648
auto IsSuitableAlloca = [&Context, Req,
650
649
AllowConst](AllocaCommandBase *AllocaCmd) {
@@ -663,7 +662,7 @@ AllocaCommandBase *Scheduler::GraphBuilder::findAllocaForReq(
663
662
return (Record->MAllocaCommands .end () != It) ? *It : nullptr ;
664
663
}
665
664
666
- static bool checkHostUnifiedMemory (const ContextImplPtr & Ctx) {
665
+ static bool checkHostUnifiedMemory (context_impl * Ctx) {
667
666
if (const char *HUMConfig = SYCLConfig<SYCL_HOST_UNIFIED_MEMORY>::get ()) {
668
667
if (std::strcmp (HUMConfig, " 0" ) == 0 )
669
668
return Ctx == nullptr ;
@@ -744,7 +743,7 @@ AllocaCommandBase *Scheduler::GraphBuilder::getOrCreateAllocaForReq(
744
743
Record->MAllocaCommands .push_back (HostAllocaCmd);
745
744
Record->MWriteLeaves .push_back (HostAllocaCmd, ToEnqueue);
746
745
++(HostAllocaCmd->MLeafCounter );
747
- Record->MCurContext = nullptr ;
746
+ Record->setCurContext ( nullptr ) ;
748
747
}
749
748
}
750
749
} else {
@@ -768,11 +767,12 @@ AllocaCommandBase *Scheduler::GraphBuilder::getOrCreateAllocaForReq(
768
767
bool PinnedHostMemory = MemObj->usesPinnedHostMemory ();
769
768
770
769
bool HostUnifiedMemoryOnNonHostDevice =
771
- Queue == nullptr ? checkHostUnifiedMemory (Record->MCurContext )
772
- : HostUnifiedMemory;
770
+ Queue == nullptr
771
+ ? checkHostUnifiedMemory (Record->getCurContext ())
772
+ : HostUnifiedMemory;
773
773
if (PinnedHostMemory || HostUnifiedMemoryOnNonHostDevice) {
774
774
AllocaCommandBase *LinkedAllocaCmdCand = findAllocaForReq (
775
- Record, Req, Record->MCurContext , /* AllowConst=*/ false );
775
+ Record, Req, Record->getCurContext () , /* AllowConst=*/ false );
776
776
777
777
// Cannot setup link if candidate is linked already
778
778
if (LinkedAllocaCmdCand &&
@@ -812,7 +812,7 @@ AllocaCommandBase *Scheduler::GraphBuilder::getOrCreateAllocaForReq(
812
812
AllocaCmd->MIsActive = false ;
813
813
} else {
814
814
LinkedAllocaCmd->MIsActive = false ;
815
- Record->MCurContext = Context;
815
+ Record->setCurContext ( Context) ;
816
816
817
817
std::set<Command *> Deps = findDepsForReq (Record, Req, Context);
818
818
for (Command *Dep : Deps) {
@@ -965,7 +965,7 @@ Command *Scheduler::GraphBuilder::addCG(
965
965
AllocaCmd =
966
966
getOrCreateAllocaForReq (Record, Req, QueueForAlloca, ToEnqueue);
967
967
968
- isSameCtx = isOnSameContext (Record->MCurContext , QueueForAlloca);
968
+ isSameCtx = isOnSameContext (Record->getCurContext () , QueueForAlloca);
969
969
}
970
970
971
971
// If there is alloca command we need to check if the latest memory is in
@@ -992,7 +992,7 @@ Command *Scheduler::GraphBuilder::addCG(
992
992
const detail::CGHostTask &HT =
993
993
static_cast <detail::CGHostTask &>(NewCmd->getCG ());
994
994
995
- if (!isOnSameContext (Record->MCurContext , HT.MQueue .get ())) {
995
+ if (!isOnSameContext (Record->getCurContext () , HT.MQueue .get ())) {
996
996
NeedMemMoveToHost = true ;
997
997
MemMoveTargetQueue = HT.MQueue .get ();
998
998
}
@@ -1226,9 +1226,7 @@ Command *Scheduler::GraphBuilder::connectDepEvent(
1226
1226
try {
1227
1227
std::shared_ptr<detail::HostTask> HT (new detail::HostTask);
1228
1228
std::unique_ptr<detail::CG> ConnectCG (new detail::CGHostTask (
1229
- std::move (HT),
1230
- /* Queue = */ Cmd->getQueue (),
1231
- /* Context = */ {},
1229
+ std::move (HT), /* Queue = */ Cmd->getQueue (), /* Context = */ nullptr ,
1232
1230
/* Args = */ {},
1233
1231
detail::CG::StorageInitHelper (
1234
1232
/* ArgsStorage = */ {}, /* AccStorage = */ {},
@@ -1302,7 +1300,7 @@ Command *Scheduler::GraphBuilder::addCommandGraphUpdate(
1302
1300
1303
1301
AllocaCmd = getOrCreateAllocaForReq (Record, Req, Queue, ToEnqueue);
1304
1302
1305
- isSameCtx = isOnSameContext (Record->MCurContext , Queue);
1303
+ isSameCtx = isOnSameContext (Record->getCurContext () , Queue);
1306
1304
}
1307
1305
1308
1306
if (!isSameCtx) {
0 commit comments