Skip to content

[OpenMP] Patch for Support to loop bind clause : Checking Parent Region #76938

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jan 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions clang/include/clang/Sema/Sema.h
Original file line number Diff line number Diff line change
Expand Up @@ -11346,9 +11346,12 @@ class Sema final {
/// rigorous semantic checking in the new mapped directives.
bool mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
ArrayRef<OMPClause *> Clauses,
OpenMPBindClauseKind BindKind,
OpenMPBindClauseKind &BindKind,
OpenMPDirectiveKind &Kind,
OpenMPDirectiveKind &PrevMappedDirective);
OpenMPDirectiveKind &PrevMappedDirective,
SourceLocation StartLoc, SourceLocation EndLoc,
const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion);

public:
/// The declarator \p D defines a function in the scope \p S which is nested
Expand Down
56 changes: 41 additions & 15 deletions clang/lib/Sema/SemaOpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5072,6 +5072,18 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
CurrentRegion != OMPD_cancellation_point &&
CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
return false;
// Checks needed for mapping "loop" construct. Please check mapLoopConstruct
// for a detailed explanation
if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop &&
(BindKind == OMPC_BIND_parallel || BindKind == OMPC_BIND_teams) &&
(isOpenMPWorksharingDirective(ParentRegion) ||
ParentRegion == OMPD_loop)) {
int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4;
SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
<< true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber
<< getOpenMPDirectiveName(CurrentRegion);
return true;
}
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
// OpenMP [2.16, Nesting of Regions]
Expand Down Expand Up @@ -6124,21 +6136,25 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,

bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
ArrayRef<OMPClause *> Clauses,
OpenMPBindClauseKind BindKind,
OpenMPBindClauseKind &BindKind,
OpenMPDirectiveKind &Kind,
OpenMPDirectiveKind &PrevMappedDirective) {
OpenMPDirectiveKind &PrevMappedDirective,
SourceLocation StartLoc, SourceLocation EndLoc,
const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion) {

bool UseClausesWithoutBind = false;

// Restricting to "#pragma omp loop bind"
if (getLangOpts().OpenMP >= 50 && Kind == OMPD_loop) {

const OpenMPDirectiveKind ParentDirective = DSAStack->getParentDirective();

if (BindKind == OMPC_BIND_unknown) {
// Setting the enclosing teams or parallel construct for the loop
// directive without bind clause.
BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown

const OpenMPDirectiveKind ParentDirective =
DSAStack->getParentDirective();
if (ParentDirective == OMPD_unknown) {
Diag(DSAStack->getDefaultDSALocation(),
diag::err_omp_bind_required_on_loop);
Expand All @@ -6150,9 +6166,10 @@ bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
BindKind = OMPC_BIND_teams;
}
} else {
// bind clause is present, so we should set flag indicating to only
// use the clauses that aren't the bind clause for the new directive that
// loop is lowered to.
// bind clause is present in loop directive. When the loop directive is
// changed to a new directive the bind clause is not used. So, we should
// set flag indicating to only use the clauses that aren't the
// bind clause.
UseClausesWithoutBind = true;
}

Expand Down Expand Up @@ -6213,26 +6230,35 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind PrevMappedDirective) {
StmtResult Res = StmtError();
OpenMPBindClauseKind BindKind = OMPC_BIND_unknown;
llvm::SmallVector<OMPClause *> ClausesWithoutBind;
bool UseClausesWithoutBind = false;

if (const OMPBindClause *BC =
OMPExecutableDirective::getSingleClause<OMPBindClause>(Clauses))
BindKind = BC->getBindKind();

// Variable used to note down the DirectiveKind because mapLoopConstruct may
// change "Kind" variable, due to mapping of "omp loop" to other directives.
OpenMPDirectiveKind DK = Kind;
if (Kind == OMPD_loop || PrevMappedDirective == OMPD_loop) {
UseClausesWithoutBind = mapLoopConstruct(
ClausesWithoutBind, Clauses, BindKind, Kind, PrevMappedDirective,
StartLoc, EndLoc, DirName, CancelRegion);
DK = OMPD_loop;
}

// First check CancelRegion which is then used in checkNestingOfRegions.
if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
BindKind, StartLoc))
checkNestingOfRegions(*this, DSAStack, DK, DirName, CancelRegion,
BindKind, StartLoc)) {
return StmtError();
}

// Report affected OpenMP target offloading behavior when in HIP lang-mode.
if (getLangOpts().HIP && (isOpenMPTargetExecutionDirective(Kind) ||
isOpenMPTargetDataManagementDirective(Kind)))
Diag(StartLoc, diag::warn_hip_omp_target_directives);

llvm::SmallVector<OMPClause *> ClausesWithoutBind;
bool UseClausesWithoutBind = false;

UseClausesWithoutBind = mapLoopConstruct(ClausesWithoutBind, Clauses,
BindKind, Kind, PrevMappedDirective);

llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
VarsWithInheritedDSAType VarsWithInheritedDSA;
bool ErrorFound = false;
Expand Down
191 changes: 173 additions & 18 deletions clang/test/OpenMP/loop_bind_messages.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

#define NNN 50
int aaa[NNN];
int aaa2[NNN][NNN];

void parallel_loop() {
#pragma omp parallel
Expand All @@ -13,10 +14,82 @@ void parallel_loop() {
aaa[j] = j*NNN;
}
}

#pragma omp parallel for
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}

#pragma omp parallel
#pragma omp for nowait
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}

#pragma omp parallel for
for (int i = 0 ; i < NNN ; i++) {
#pragma omp nothing
#pragma omp loop
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}

#pragma omp target teams distribute parallel for
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}

#pragma omp target parallel
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(parallel)
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}

#pragma omp parallel for
for (int i = 0; i < 100; ++i) {
#pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'loop' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa[j] = j*NNN;
}
}
}

#pragma omp parallel
{
#pragma omp sections
{
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'sections' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}

#pragma omp section
{
aaa[NNN-1] = NNN;
}
}
}
}

void teams_loop() {
int var1, var2;
int var1;
int total = 0;

#pragma omp teams
{
Expand All @@ -32,24 +105,22 @@ void teams_loop() {
}
}
}
}

void orphan_loop_with_bind() {
#pragma omp loop bind(parallel)
for (int j = 0 ; j < NNN ; j++) {
aaa[j] = j*NNN;
#pragma omp target teams
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(teams)
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}
}

void orphan_loop_no_bind() {
#pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}}
for (int j = 0 ; j < NNN ; j++) {
aaa[j] = j*NNN;
#pragma omp target teams distribute parallel for
for (int i = 0 ; i < NNN ; i++) {
#pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa2[i][j] = i+j;
}
}
}

void teams_loop_reduction() {
int total = 0;

#pragma omp teams
{
Expand All @@ -63,14 +134,98 @@ void teams_loop_reduction() {
total+=aaa[j];
}
}

#pragma omp teams num_teams(8) thread_limit(256)
#pragma omp distribute parallel for dist_schedule(static, 1024) \
schedule(static, 64)
for (int i = 0; i < NNN; i++) {
#pragma omp loop bind(teams) // expected-error{{'distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
for (int j = 0; j < NNN; j++) {
aaa2[i][j] = i+j;
}
}

#pragma omp teams
for (int i = 0; i < NNN; i++) {
#pragma omp loop bind(thread)
for (int j = 0 ; j < NNN ; j++) {
aaa[i] = i+i*NNN;
}
}

#pragma omp teams loop
for (int i = 0; i < NNN; i++) {
#pragma omp loop
for (int j = 0 ; j < NNN ; j++) {
aaa[i] = i+i*NNN;
}
}

#pragma omp teams loop
for (int i = 0; i < NNN; i++) {
#pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'teams loop' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa[i] = i+i*NNN;
}
}
}

void thread_loop() {
#pragma omp parallel
for (int i = 0; i < NNN; i++) {
#pragma omp loop bind(thread)
for (int j = 0 ; j < NNN ; j++) {
aaa[i] = i+i*NNN;
}
}

#pragma omp teams
for (int i = 0; i < NNN; i++) {
#pragma omp loop bind(thread)
for (int j = 0 ; j < NNN ; j++) {
aaa[i] = i+i*NNN;
}
}
}

void parallel_for_with_loop_teams_bind(){
#pragma omp parallel for
for (int i = 0; i < NNN; i++) {
#pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}}
for (int j = 0 ; j < NNN ; j++) {
aaa[i] = i+i*NNN;
}
}
}

void orphan_loops() {
#pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}}
for (int j = 0 ; j < NNN ; j++) {
aaa[j] = j*NNN;
}

#pragma omp loop bind(parallel)
for (int j = 0 ; j < NNN ; j++) {
aaa[j] = j*NNN;
}

#pragma omp loop bind(teams)
for (int i = 0; i < NNN; i++) {
aaa[i] = i+i*NNN;
}

#pragma omp loop bind(thread)
for (int i = 0; i < NNN; i++) {
aaa[i] = i+i*NNN;
}
}

int main(int argc, char *argv[]) {
parallel_loop();
teams_loop();
orphan_loop_with_bind();
orphan_loop_no_bind();
teams_loop_reduction();
thread_loop();
parallel_for_with_loop_teams_bind();
orphan_loops();
}

#endif
8 changes: 6 additions & 2 deletions clang/test/PCH/pragma-loop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,13 @@ class pragma_test {

inline void run10(int *List, int Length) {
int i = 0;
#pragma omp loop bind(teams)
int j = 0;
#pragma omp teams
for (int i = 0; i < Length; i++) {
List[i] = i;
#pragma omp loop bind(teams)
for (int j = 0; j < Length; j++) {
List[i] = i+j;
}
}
}

Expand Down