Skip to content

[OpenMP][CodeGen] Improved codegen for combined loop directives #87278

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Apr 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion clang/include/clang/AST/StmtOpenMP.h
Original file line number Diff line number Diff line change
Expand Up @@ -6109,6 +6109,8 @@ class OMPTeamsGenericLoopDirective final : public OMPLoopDirective {
class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if loop directive's associated loop can be a parallel for.
bool CanBeParallelFor = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
Expand All @@ -6131,6 +6133,9 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
llvm::omp::OMPD_target_teams_loop, SourceLocation(),
SourceLocation(), CollapsedNum) {}

/// Set whether associated loop can be a parallel for.
void setCanBeParallelFor(bool ParFor) { CanBeParallelFor = ParFor; }

public:
/// Creates directive with a list of \p Clauses.
///
Expand All @@ -6145,7 +6150,7 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
static OMPTargetTeamsGenericLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool CanBeParallelFor);

/// Creates an empty directive with the place
/// for \a NumClauses clauses.
Expand All @@ -6159,6 +6164,10 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
unsigned CollapsedNum,
EmptyShell);

/// Return true if current loop directive's associated loop can be a
/// parallel for.
bool canBeParallelFor() const { return CanBeParallelFor; }

static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass;
}
Expand Down
3 changes: 2 additions & 1 deletion clang/lib/AST/StmtOpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2431,7 +2431,7 @@ OMPTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
const HelperExprs &Exprs, bool CanBeParallelFor) {
auto *Dir = createDirective<OMPTargetTeamsGenericLoopDirective>(
C, Clauses, AssociatedStmt,
numLoopChildren(CollapsedNum, OMPD_target_teams_loop), StartLoc, EndLoc,
Expand Down Expand Up @@ -2473,6 +2473,7 @@ OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
Dir->setCanBeParallelFor(CanBeParallelFor);
return Dir;
}

Expand Down
15 changes: 9 additions & 6 deletions clang/lib/CodeGen/CGOpenMPRuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2656,11 +2656,12 @@ void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
// Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc,
isOpenMPDistributeDirective(DKind)
isOpenMPDistributeDirective(DKind) ||
(DKind == OMPD_target_teams_loop)
? OMP_IDENT_WORK_DISTRIBUTE
: isOpenMPLoopDirective(DKind)
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS),
: isOpenMPLoopDirective(DKind)
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
if (isOpenMPDistributeDirective(DKind) &&
Expand Down Expand Up @@ -8885,7 +8886,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
// For now, just treat 'target teams loop' as if it's distributed.
// For now, treat 'target' with nested 'teams loop' as if it's
// distributed (target teams distribute).
if (isOpenMPDistributeDirective(DKind) || DKind == OMPD_teams_loop)
return NestedDir;
if (DKind == OMPD_teams) {
Expand Down Expand Up @@ -9369,7 +9371,8 @@ llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
SizeEmitter) {
OpenMPDirectiveKind Kind = D.getDirectiveKind();
const OMPExecutableDirective *TD = &D;
// Get nested teams distribute kind directive, if any.
// Get nested teams distribute kind directive, if any. For now, treat
// 'target_teams_loop' as if it's really a target_teams_distribute.
if ((!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind)) &&
Kind != OMPD_target_teams_loop)
TD = getNestedDistributeDirective(CGM.getContext(), D);
Expand Down
7 changes: 6 additions & 1 deletion clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -646,7 +646,6 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target:
case OMPD_target_teams:
return hasNestedSPMDDirective(Ctx, D);
case OMPD_target_teams_loop:
case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
Expand All @@ -658,6 +657,12 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
return true;
case OMPD_target_teams_distribute:
return false;
case OMPD_target_teams_loop:
// Whether this is true or not depends on how the directive will
// eventually be emitted.
if (auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&D))
return TTLD->canBeParallelFor();
return false;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
Expand Down
87 changes: 72 additions & 15 deletions clang/lib/CodeGen/CGStmtOpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
Expand All @@ -34,11 +35,14 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Debug.h"
#include <optional>
using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;

#define TTL_CODEGEN_TYPE "target-teams-loop-codegen"

static const VarDecl *getBaseDecl(const Expr *Ref);

namespace {
Expand Down Expand Up @@ -1432,9 +1436,12 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
*this, D.getBeginLoc(),
isOpenMPWorksharingDirective(D.getDirectiveKind()));
}
bool TeamsLoopCanBeParallel = false;
if (auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&D))
TeamsLoopCanBeParallel = TTLD->canBeParallelFor();
bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
ReductionKind == OMPD_simd;
TeamsLoopCanBeParallel || ReductionKind == OMPD_simd;
bool SimpleReduction = ReductionKind == OMPD_simd;
// Emit nowait reduction if nowait clause is present or directive is a
// parallel directive (it always has implicit barrier).
Expand Down Expand Up @@ -7928,11 +7935,9 @@ void CodeGenFunction::EmitOMPParallelGenericLoopDirective(
void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
const OMPTeamsGenericLoopDirective &S) {
// To be consistent with current behavior of 'target teams loop', emit
// 'teams loop' as if its constituent constructs are 'distribute,
// 'parallel, and 'for'.
// 'teams loop' as if its constituent constructs are 'teams' and 'distribute'.
auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
S.getDistInc());
CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};

// Emit teams region as a standalone region.
Expand All @@ -7946,15 +7951,33 @@ void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
CodeGenDistribute);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
emitPostUpdateForReductionClause(*this, S,
[](CodeGenFunction &) { return nullptr; });
}

static void
emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
const OMPTargetTeamsGenericLoopDirective &S,
PrePostActionTy &Action) {
static void emitTargetTeamsLoopCodegenStatus(CodeGenFunction &CGF,
std::string StatusMsg,
const OMPExecutableDirective &D) {
#ifndef NDEBUG
bool IsDevice = CGF.CGM.getLangOpts().OpenMPIsTargetDevice;
if (IsDevice)
StatusMsg += ": DEVICE";
else
StatusMsg += ": HOST";
SourceLocation L = D.getBeginLoc();
auto &SM = CGF.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(L);
const char *FileName = PLoc.isValid() ? PLoc.getFilename() : nullptr;
unsigned LineNo =
PLoc.isValid() ? PLoc.getLine() : SM.getExpansionLineNumber(L);
llvm::dbgs() << StatusMsg << ": " << FileName << ": " << LineNo << "\n";
#endif
}

static void emitTargetTeamsGenericLoopRegionAsParallel(
CodeGenFunction &CGF, PrePostActionTy &Action,
const OMPTargetTeamsGenericLoopDirective &S) {
Action.Enter(CGF);
// Emit 'teams loop' as if its constituent constructs are 'distribute,
// 'parallel, and 'for'.
Expand All @@ -7974,19 +7997,50 @@ emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};

DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
emitTargetTeamsLoopCodegenStatus(
CGF, TTL_CODEGEN_TYPE " as parallel for", S));
emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
CodeGenTeams);
emitPostUpdateForReductionClause(CGF, S,
[](CodeGenFunction &) { return nullptr; });
}

/// Emit combined directive 'target teams loop' as if its constituent
/// constructs are 'target', 'teams', 'distribute', 'parallel', and 'for'.
static void emitTargetTeamsGenericLoopRegionAsDistribute(
CodeGenFunction &CGF, PrePostActionTy &Action,
const OMPTargetTeamsGenericLoopDirective &S) {
Action.Enter(CGF);
// Emit 'teams loop' as if its constituent construct is 'distribute'.
auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};

// Emit teams region as a standalone region.
auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
PrePostActionTy &Action) {
Action.Enter(CGF);
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
emitTargetTeamsLoopCodegenStatus(
CGF, TTL_CODEGEN_TYPE " as distribute", S));
emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
emitPostUpdateForReductionClause(CGF, S,
[](CodeGenFunction &) { return nullptr; });
}

void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDirective(
const OMPTargetTeamsGenericLoopDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
emitTargetTeamsGenericLoopRegion(CGF, S, Action);
if (S.canBeParallelFor())
emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
else
emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
};
emitCommonOMPTargetDirective(*this, S, CodeGen);
}
Expand All @@ -7996,7 +8050,10 @@ void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
const OMPTargetTeamsGenericLoopDirective &S) {
// Emit SPMD target parallel loop region as a standalone region.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
emitTargetTeamsGenericLoopRegion(CGF, S, Action);
if (S.canBeParallelFor())
emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
else
emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
};
llvm::Function *Fn;
llvm::Constant *Addr;
Expand Down
86 changes: 83 additions & 3 deletions clang/lib/Sema/SemaOpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4478,6 +4478,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
// For 'target teams loop', collect all captured regions so codegen can
// later decide the best IR to emit given the associated loop-nest.
case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
Expand Down Expand Up @@ -6135,6 +6137,79 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
}
}

namespace {
/// A 'teams loop' with a nested 'loop bind(parallel)' or generic function
/// call in the associated loop-nest cannot be a 'parallel for'.
class TeamsLoopChecker final : public ConstStmtVisitor<TeamsLoopChecker> {
Sema &SemaRef;

public:
bool teamsLoopCanBeParallelFor() const { return TeamsLoopCanBeParallelFor; }

// Is there a nested OpenMP loop bind(parallel)
void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
if (D->getDirectiveKind() == llvm::omp::Directive::OMPD_loop) {
if (const auto *C = D->getSingleClause<OMPBindClause>())
if (C->getBindKind() == OMPC_BIND_parallel) {
TeamsLoopCanBeParallelFor = false;
// No need to continue visiting any more
return;
}
}
for (const Stmt *Child : D->children())
if (Child)
Visit(Child);
}

void VisitCallExpr(const CallExpr *C) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm concerned detecting CallExprs like this is going to have unexpected effects on the generated code: there are a lot of constructs that are written as "calls", but don't actually call anything external. So minor changes to the user's code could have an unexpectedly large impact on code generation.

Is there some way we can do this transform in an LLVM IR optimization pass?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for the delayed response, Eli. I didn't get notified of your comment.

So, to answer your specific question first, yes this can be done in OpenMPOpt, and eventually it should be. But for now, this is a good compromise built on what's currently implemented.

And you are right that code generated can be different. If a call is added to an existing target-teams-loop loop nest, it will impact the code generation (target-teams-distribute versus a target-teams-distribute-parallel-for) and it will be slower, though correct. If a call is removed, then the opposite occurs. The idea is that since prior to this change, target-teams-distribute-parallel-for was always generated (which in some cases can be incorrect), using this simplistic approach allows us to catch many cases where we can still generate the faster IR.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you have a plan to migrate the check to an LLVM optimization, this is okay as a temporary solution, I guess. I just don't want to end up in a situation where we try to continually refine this check instead of just moving it where it's supposed to be.

Maybe put a comment in the code describing the plan.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Currently, there are no plans to move this into the optimizer, though it is a step that should be taken at some point. Also, there are no plans make any more changes to the current implementation after this.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just meant "plan" more loosely in the sense of how you expect the code to evolve in the future, not that you have a timeline to implement a specific change.

// Function calls inhibit parallel loop translation of 'target teams loop'
// unless the assume-no-nested-parallelism flag has been specified.
// OpenMP API runtime library calls do not inhibit parallel loop
// translation, regardless of the assume-no-nested-parallelism.
if (C) {
bool IsOpenMPAPI = false;
auto *FD = dyn_cast_or_null<FunctionDecl>(C->getCalleeDecl());
if (FD) {
std::string Name = FD->getNameInfo().getAsString();
IsOpenMPAPI = Name.find("omp_") == 0;
}
TeamsLoopCanBeParallelFor =
IsOpenMPAPI || SemaRef.getLangOpts().OpenMPNoNestedParallelism;
if (!TeamsLoopCanBeParallelFor)
return;
}
for (const Stmt *Child : C->children())
if (Child)
Visit(Child);
}
Comment on lines +6169 to +6184
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @ddpagan, the static verifier complains about this code since you check that 'C' is not a nullptr, but then do not check before the 'C->children() call. It seems 'C' won't be null since it is checked in ActOnOpenMPTargetTeamsGenericLoopDirective. Can we remove the if(C) check and use an assert instead if you think it helps? Or if 'C' really can be null somehow we need to ensure that for loop is excluded for that case.


void VisitCapturedStmt(const CapturedStmt *S) {
if (!S)
return;
Visit(S->getCapturedDecl()->getBody());
}

void VisitStmt(const Stmt *S) {
if (!S)
return;
for (const Stmt *Child : S->children())
if (Child)
Visit(Child);
}
explicit TeamsLoopChecker(Sema &SemaRef)
: SemaRef(SemaRef), TeamsLoopCanBeParallelFor(true) {}

private:
bool TeamsLoopCanBeParallelFor;
};
} // namespace

static bool teamsLoopCanBeParallelFor(Stmt *AStmt, Sema &SemaRef) {
TeamsLoopChecker Checker(SemaRef);
Checker.Visit(AStmt);
return Checker.teamsLoopCanBeParallelFor();
}

bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
ArrayRef<OMPClause *> Clauses,
OpenMPBindClauseKind &BindKind,
Expand Down Expand Up @@ -10895,7 +10970,8 @@ StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective(
setFunctionHasBranchProtectedScope();

return OMPTargetTeamsGenericLoopDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
teamsLoopCanBeParallelFor(AStmt, *this));
}

StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
Expand Down Expand Up @@ -15645,14 +15721,19 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
CaptureRegion = OMPD_target;
break;
case OMPD_teams_loop:
case OMPD_target_teams_loop:
// For [target] teams loop, assume capture region is 'teams' so it's
// available for codegen later to use if/when necessary.
CaptureRegion = OMPD_teams;
break;
case OMPD_target_teams_distribute_parallel_for_simd:
if (OpenMPVersion >= 50 &&
(NameModifier == OMPD_unknown || NameModifier == OMPD_simd)) {
CaptureRegion = OMPD_parallel;
break;
}
[[fallthrough]];
case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
// If this clause applies to the nested 'parallel' region, capture within
// the 'teams' region, otherwise do not capture.
Expand Down Expand Up @@ -15775,7 +15856,6 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
case OMPD_teams_loop:
case OMPD_teams:
case OMPD_tile:
case OMPD_unroll:
Expand Down
Loading