Skip to content

[MLIR][OpenMP] NFC: Sort clauses alphabetically (1/2) #101193

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion flang/test/Lower/OpenMP/task.f90
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ subroutine task_multiple_clauses()
integer :: x, y, z
logical :: buzz

!CHECK: omp.task if(%{{.+}}) final(%{{.+}}) priority(%{{.+}}) allocate(%{{.+}} : i64 -> %{{.+}} : !fir.ref<i32>) {
!CHECK: omp.task allocate(%{{.+}} : i64 -> %{{.+}} : !fir.ref<i32>) final(%{{.+}}) if(%{{.+}}) priority(%{{.+}}) {
!$omp task if(buzz) final(buzz) priority(z) allocate(omp_high_bw_mem_alloc: x) private(x) firstprivate(y)

!CHECK: %[[X_PRIV_ALLOCA:.+]] = fir.alloca i32 {bindc_name = "x", pinned, uniq_name = "_QFtask_multiple_clausesEx"}
Expand Down
39 changes: 16 additions & 23 deletions mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -132,13 +132,12 @@ def ParallelOp : OpenMP_Op<"parallel", traits = [
DeclareOpInterfaceMethods<OutlineableOpenMPOpInterface>,
RecursiveMemoryEffects
], clauses = [
// TODO: Sort clauses alphabetically.
OpenMP_AllocateClauseSkip<assemblyFormat = true>,
OpenMP_IfClauseSkip<assemblyFormat = true>,
OpenMP_NumThreadsClauseSkip<assemblyFormat = true>,
OpenMP_AllocateClauseSkip<assemblyFormat = true>,
OpenMP_ReductionClauseSkip<assemblyFormat = true>,
OpenMP_PrivateClauseSkip<assemblyFormat = true>,
OpenMP_ProcBindClauseSkip<assemblyFormat = true>,
OpenMP_PrivateClauseSkip<assemblyFormat = true>
OpenMP_ReductionClauseSkip<assemblyFormat = true>
], singleRegion = true> {
let summary = "parallel construct";
let description = [{
Expand Down Expand Up @@ -195,9 +194,8 @@ def TerminatorOp : OpenMP_Op<"terminator", [Terminator, Pure]> {
def TeamsOp : OpenMP_Op<"teams", traits = [
AttrSizedOperandSegments, RecursiveMemoryEffects
], clauses = [
// TODO: Sort clauses alphabetically.
OpenMP_NumTeamsClause, OpenMP_IfClause, OpenMP_ThreadLimitClause,
OpenMP_AllocateClause, OpenMP_ReductionClause, OpenMP_PrivateClause
OpenMP_AllocateClause, OpenMP_IfClause, OpenMP_NumTeamsClause,
OpenMP_PrivateClause, OpenMP_ReductionClause, OpenMP_ThreadLimitClause
], singleRegion = true> {
let summary = "teams construct";
let description = [{
Expand Down Expand Up @@ -237,9 +235,8 @@ def SectionOp : OpenMP_Op<"section", [HasParent<"SectionsOp">],
def SectionsOp : OpenMP_Op<"sections", traits = [
AttrSizedOperandSegments
], clauses = [
// TODO: Sort clauses alphabetically.
OpenMP_ReductionClause, OpenMP_AllocateClause, OpenMP_NowaitClause,
OpenMP_PrivateClause
OpenMP_AllocateClause, OpenMP_NowaitClause, OpenMP_PrivateClause,
OpenMP_ReductionClause
], singleRegion = true> {
let summary = "sections construct";
let description = [{
Expand Down Expand Up @@ -362,15 +359,14 @@ def WsloopOp : OpenMP_Op<"wsloop", traits = [
AttrSizedOperandSegments, DeclareOpInterfaceMethods<LoopWrapperInterface>,
RecursiveMemoryEffects, SingleBlock
], clauses = [
// TODO: Sort clauses alphabetically.
OpenMP_AllocateClauseSkip<assemblyFormat = true>,
OpenMP_LinearClauseSkip<assemblyFormat = true>,
OpenMP_ReductionClauseSkip<assemblyFormat = true>,
OpenMP_ScheduleClauseSkip<assemblyFormat = true>,
OpenMP_NowaitClauseSkip<assemblyFormat = true>,
OpenMP_OrderedClauseSkip<assemblyFormat = true>,
OpenMP_OrderClauseSkip<assemblyFormat = true>,
OpenMP_AllocateClauseSkip<assemblyFormat = true>,
OpenMP_PrivateClauseSkip<assemblyFormat = true>
OpenMP_OrderedClauseSkip<assemblyFormat = true>,
OpenMP_PrivateClauseSkip<assemblyFormat = true>,
OpenMP_ReductionClauseSkip<assemblyFormat = true>,
OpenMP_ScheduleClauseSkip<assemblyFormat = true>
], singleRegion = true> {
let summary = "worksharing-loop construct";
let description = [{
Expand Down Expand Up @@ -506,8 +502,7 @@ def DistributeOp : OpenMP_Op<"distribute", traits = [
AttrSizedOperandSegments, DeclareOpInterfaceMethods<LoopWrapperInterface>,
RecursiveMemoryEffects, SingleBlock
], clauses = [
// TODO: Sort clauses alphabetically.
OpenMP_DistScheduleClause, OpenMP_AllocateClause, OpenMP_OrderClause,
OpenMP_AllocateClause, OpenMP_DistScheduleClause, OpenMP_OrderClause,
OpenMP_PrivateClause
], singleRegion = true> {
let summary = "distribute construct";
Expand Down Expand Up @@ -560,11 +555,9 @@ def TaskOp : OpenMP_Op<"task", traits = [
OutlineableOpenMPOpInterface
], clauses = [
// TODO: Complete clause list (affinity, detach).
// TODO: Sort clauses alphabetically.
OpenMP_IfClause, OpenMP_FinalClause, OpenMP_UntiedClause,
OpenMP_MergeableClause, OpenMP_InReductionClause,
OpenMP_PriorityClause, OpenMP_DependClause, OpenMP_AllocateClause,
OpenMP_PrivateClause
OpenMP_AllocateClause, OpenMP_DependClause, OpenMP_FinalClause,
OpenMP_IfClause, OpenMP_InReductionClause, OpenMP_MergeableClause,
OpenMP_PriorityClause, OpenMP_PrivateClause, OpenMP_UntiedClause
], singleRegion = true> {
let summary = "task construct";
let description = [{
Expand Down
12 changes: 6 additions & 6 deletions mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -444,16 +444,16 @@ struct ParallelOpLowering : public OpRewritePattern<scf::ParallelOp> {
// Create the parallel wrapper.
auto ompParallel = rewriter.create<omp::ParallelOp>(
loc,
/* if_expr = */ Value{},
/* num_threads = */ numThreadsVar,
/* allocate_vars = */ llvm::SmallVector<Value>{},
/* allocator_vars = */ llvm::SmallVector<Value>{},
/* if_expr = */ Value{},
/* num_threads = */ numThreadsVar,
/* private_vars = */ ValueRange(),
/* private_syms = */ nullptr,
/* proc_bind_kind = */ omp::ClauseProcBindKindAttr{},
/* reduction_vars = */ llvm::SmallVector<Value>{},
/* reduction_byref = */ DenseBoolArrayAttr{},
/* reduction_syms = */ ArrayAttr{},
/* proc_bind_kind = */ omp::ClauseProcBindKindAttr{},
/* private_vars = */ ValueRange(),
/* private_syms = */ nullptr);
/* reduction_syms = */ ArrayAttr{});
{

OpBuilder::InsertionGuard guard(rewriter);
Expand Down
97 changes: 49 additions & 48 deletions mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1475,25 +1475,25 @@ LogicalResult TargetOp::verify() {

void ParallelOp::build(OpBuilder &builder, OperationState &state,
ArrayRef<NamedAttribute> attributes) {
ParallelOp::build(
builder, state, /*if_expr=*/nullptr, /*num_threads=*/nullptr,
/*allocate_vars=*/ValueRange(), /*allocator_vars=*/ValueRange(),
/*reduction_vars=*/ValueRange(), /*reduction_byref=*/nullptr,
/*reduction_syms=*/nullptr, /*proc_bind_kind=*/nullptr,
/*private_vars=*/ValueRange(), /*private_syms=*/nullptr);
ParallelOp::build(builder, state, /*allocate_vars=*/ValueRange(),
/*allocator_vars=*/ValueRange(), /*if_expr=*/nullptr,
/*num_threads=*/nullptr, /*private_vars=*/ValueRange(),
/*private_syms=*/nullptr, /*proc_bind_kind=*/nullptr,
/*reduction_vars=*/ValueRange(),
/*reduction_byref=*/nullptr, /*reduction_syms=*/nullptr);
state.addAttributes(attributes);
}

void ParallelOp::build(OpBuilder &builder, OperationState &state,
const ParallelOperands &clauses) {
MLIRContext *ctx = builder.getContext();

ParallelOp::build(
builder, state, clauses.ifVar, clauses.numThreads, clauses.allocateVars,
clauses.allocatorVars, clauses.reductionVars,
makeDenseBoolArrayAttr(ctx, clauses.reductionByref),
makeArrayAttr(ctx, clauses.reductionSyms), clauses.procBindKind,
clauses.privateVars, makeArrayAttr(ctx, clauses.privateSyms));
ParallelOp::build(builder, state, clauses.allocateVars, clauses.allocatorVars,
clauses.ifVar, clauses.numThreads, clauses.privateVars,
makeArrayAttr(ctx, clauses.privateSyms),
clauses.procBindKind, clauses.reductionVars,
makeDenseBoolArrayAttr(ctx, clauses.reductionByref),
makeArrayAttr(ctx, clauses.reductionSyms));
}

template <typename OpType>
Expand Down Expand Up @@ -1582,12 +1582,13 @@ void TeamsOp::build(OpBuilder &builder, OperationState &state,
const TeamsOperands &clauses) {
MLIRContext *ctx = builder.getContext();
// TODO Store clauses in op: privateVars, privateSyms.
TeamsOp::build(builder, state, clauses.numTeamsLower, clauses.numTeamsUpper,
clauses.ifVar, clauses.threadLimit, clauses.allocateVars,
clauses.allocatorVars, clauses.reductionVars,
TeamsOp::build(builder, state, clauses.allocateVars, clauses.allocatorVars,
clauses.ifVar, clauses.numTeamsLower, clauses.numTeamsUpper,
/*private_vars=*/{},
/*private_syms=*/nullptr, clauses.reductionVars,
makeDenseBoolArrayAttr(ctx, clauses.reductionByref),
makeArrayAttr(ctx, clauses.reductionSyms), /*private_vars=*/{},
/*private_syms=*/nullptr);
makeArrayAttr(ctx, clauses.reductionSyms),
clauses.threadLimit);
}

LogicalResult TeamsOp::verify() {
Expand Down Expand Up @@ -1630,11 +1631,11 @@ void SectionsOp::build(OpBuilder &builder, OperationState &state,
const SectionsOperands &clauses) {
MLIRContext *ctx = builder.getContext();
// TODO Store clauses in op: privateVars, privateSyms.
SectionsOp::build(builder, state, clauses.reductionVars,
SectionsOp::build(builder, state, clauses.allocateVars, clauses.allocatorVars,
clauses.nowait, /*private_vars=*/{},
/*private_syms=*/nullptr, clauses.reductionVars,
makeDenseBoolArrayAttr(ctx, clauses.reductionByref),
makeArrayAttr(ctx, clauses.reductionSyms),
clauses.allocateVars, clauses.allocatorVars, clauses.nowait,
/*private_vars=*/{}, /*private_syms=*/nullptr);
makeArrayAttr(ctx, clauses.reductionSyms));
}

LogicalResult SectionsOp::verify() {
Expand Down Expand Up @@ -1715,14 +1716,14 @@ void printWsloop(OpAsmPrinter &p, Operation *op, Region &region,

void WsloopOp::build(OpBuilder &builder, OperationState &state,
ArrayRef<NamedAttribute> attributes) {
build(builder, state, /*linear_vars=*/ValueRange(),
/*linear_step_vars=*/ValueRange(), /*reduction_vars=*/ValueRange(),
/*reduction_byref=*/nullptr, /*reduction_syms=*/nullptr,
/*schedule_kind=*/nullptr, /*schedule_chunk=*/nullptr,
/*schedule_mod=*/nullptr, /*schedule_simd=*/false, /*nowait=*/false,
/*ordered=*/nullptr, /*order=*/nullptr, /*order_mod=*/nullptr,
/*allocate_vars=*/{}, /*allocator_vars=*/{}, /*private_vars=*/{},
/*private_syms=*/nullptr);
build(builder, state, /*allocate_vars=*/{}, /*allocator_vars=*/{},
/*linear_vars=*/ValueRange(), /*linear_step_vars=*/ValueRange(),
/*nowait=*/false, /*order=*/nullptr, /*order_mod=*/nullptr,
/*ordered=*/nullptr, /*private_vars=*/{}, /*private_syms=*/nullptr,
/*reduction_vars=*/ValueRange(), /*reduction_byref=*/nullptr,
/*reduction_syms=*/nullptr, /*schedule_kind=*/nullptr,
/*schedule_chunk=*/nullptr, /*schedule_mod=*/nullptr,
/*schedule_simd=*/false);
state.addAttributes(attributes);
}

Expand All @@ -1731,15 +1732,15 @@ void WsloopOp::build(OpBuilder &builder, OperationState &state,
MLIRContext *ctx = builder.getContext();
// TODO: Store clauses in op: allocateVars, allocatorVars, privateVars,
// privateSyms.
WsloopOp::build(builder, state, clauses.linearVars, clauses.linearStepVars,
clauses.reductionVars,
makeDenseBoolArrayAttr(ctx, clauses.reductionByref),
makeArrayAttr(ctx, clauses.reductionSyms),
clauses.scheduleKind, clauses.scheduleChunk,
clauses.scheduleMod, clauses.scheduleSimd, clauses.nowait,
clauses.ordered, clauses.order, clauses.orderMod,
/*allocate_vars=*/{}, /*allocator_vars=*/{},
/*private_vars=*/{}, /*private_syms=*/nullptr);
WsloopOp::build(
builder, state,
/*allocate_vars=*/{}, /*allocator_vars=*/{}, clauses.linearVars,
clauses.linearStepVars, clauses.nowait, clauses.order, clauses.orderMod,
clauses.ordered, /*private_vars=*/{}, /*private_syms=*/nullptr,
clauses.reductionVars,
makeDenseBoolArrayAttr(ctx, clauses.reductionByref),
makeArrayAttr(ctx, clauses.reductionSyms), clauses.scheduleKind,
clauses.scheduleChunk, clauses.scheduleMod, clauses.scheduleSimd);
}

LogicalResult WsloopOp::verify() {
Expand Down Expand Up @@ -1804,10 +1805,10 @@ LogicalResult SimdOp::verify() {
void DistributeOp::build(OpBuilder &builder, OperationState &state,
const DistributeOperands &clauses) {
// TODO Store clauses in op: privateVars, privateSyms.
DistributeOp::build(builder, state, clauses.distScheduleStatic,
clauses.distScheduleChunkSize, clauses.allocateVars,
clauses.allocatorVars, clauses.order, clauses.orderMod,
/*private_vars=*/{}, /*private_syms=*/nullptr);
DistributeOp::build(
builder, state, clauses.allocateVars, clauses.allocatorVars,
clauses.distScheduleStatic, clauses.distScheduleChunkSize, clauses.order,
clauses.orderMod, /*private_vars=*/{}, /*private_syms=*/nullptr);
}

LogicalResult DistributeOp::verify() {
Expand Down Expand Up @@ -1934,13 +1935,13 @@ void TaskOp::build(OpBuilder &builder, OperationState &state,
const TaskOperands &clauses) {
MLIRContext *ctx = builder.getContext();
// TODO Store clauses in op: privateVars, privateSyms.
TaskOp::build(builder, state, clauses.ifVar, clauses.final, clauses.untied,
clauses.mergeable, clauses.inReductionVars,
makeDenseBoolArrayAttr(ctx, clauses.inReductionByref),
makeArrayAttr(ctx, clauses.inReductionSyms), clauses.priority,
TaskOp::build(builder, state, clauses.allocateVars, clauses.allocatorVars,
makeArrayAttr(ctx, clauses.dependKinds), clauses.dependVars,
clauses.allocateVars, clauses.allocatorVars,
/*private_vars=*/{}, /*private_syms=*/nullptr);
clauses.final, clauses.ifVar, clauses.inReductionVars,
makeDenseBoolArrayAttr(ctx, clauses.inReductionByref),
makeArrayAttr(ctx, clauses.inReductionSyms), clauses.mergeable,
clauses.priority, /*private_vars=*/{}, /*private_syms=*/nullptr,
clauses.untied);
}

LogicalResult TaskOp::verify() {
Expand Down
16 changes: 8 additions & 8 deletions mlir/test/Dialect/OpenMP/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -1394,7 +1394,7 @@ func.func @omp_teams_allocate(%data_var : memref<i32>) {
// expected-error @below {{expected equal sizes for allocate and allocator variables}}
"omp.teams" (%data_var) ({
omp.terminator
}) {operandSegmentSizes = array<i32: 0,0,0,0,1,0,0,0>} : (memref<i32>) -> ()
}) {operandSegmentSizes = array<i32: 1,0,0,0,0,0,0,0>} : (memref<i32>) -> ()
omp.terminator
}
return
Expand All @@ -1407,7 +1407,7 @@ func.func @omp_teams_num_teams1(%lb : i32) {
// expected-error @below {{expected num_teams upper bound to be defined if the lower bound is defined}}
"omp.teams" (%lb) ({
omp.terminator
}) {operandSegmentSizes = array<i32: 1,0,0,0,0,0,0,0>} : (i32) -> ()
}) {operandSegmentSizes = array<i32: 0,0,0,1,0,0,0,0>} : (i32) -> ()
omp.terminator
}
return
Expand All @@ -1432,7 +1432,7 @@ func.func @omp_sections(%data_var : memref<i32>) -> () {
// expected-error @below {{expected equal sizes for allocate and allocator variables}}
"omp.sections" (%data_var) ({
omp.terminator
}) {operandSegmentSizes = array<i32: 0,1,0,0>} : (memref<i32>) -> ()
}) {operandSegmentSizes = array<i32: 1,0,0,0>} : (memref<i32>) -> ()
return
}

Expand All @@ -1442,7 +1442,7 @@ func.func @omp_sections(%data_var : memref<i32>) -> () {
// expected-error @below {{expected as many reduction symbol references as reduction variables}}
"omp.sections" (%data_var) ({
omp.terminator
}) {operandSegmentSizes = array<i32: 1,0,0,0>} : (memref<i32>) -> ()
}) {operandSegmentSizes = array<i32: 0,0,0,1>} : (memref<i32>) -> ()
return
}

Expand Down Expand Up @@ -1623,7 +1623,7 @@ func.func @omp_task_depend(%data_var: memref<i32>) {
// expected-error @below {{op expected as many depend values as depend variables}}
"omp.task"(%data_var) ({
"omp.terminator"() : () -> ()
}) {depend_kinds = [], operandSegmentSizes = array<i32: 0, 0, 0, 0, 1, 0, 0, 0>} : (memref<i32>) -> ()
}) {depend_kinds = [], operandSegmentSizes = array<i32: 0, 0, 1, 0, 0, 0, 0, 0>} : (memref<i32>) -> ()
"func.return"() : () -> ()
}

Expand Down Expand Up @@ -2137,7 +2137,7 @@ func.func @omp_target_depend(%data_var: memref<i32>) {

func.func @omp_distribute_schedule(%chunk_size : i32) -> () {
// expected-error @below {{op chunk size set without dist_schedule_static being present}}
"omp.distribute"(%chunk_size) <{operandSegmentSizes = array<i32: 1, 0, 0, 0>}> ({
"omp.distribute"(%chunk_size) <{operandSegmentSizes = array<i32: 0, 0, 1, 0>}> ({
"omp.terminator"() : () -> ()
}) : (i32) -> ()
}
Expand All @@ -2146,7 +2146,7 @@ func.func @omp_distribute_schedule(%chunk_size : i32) -> () {

func.func @omp_distribute_allocate(%data_var : memref<i32>) -> () {
// expected-error @below {{expected equal sizes for allocate and allocator variables}}
"omp.distribute"(%data_var) <{operandSegmentSizes = array<i32: 0, 1, 0, 0>}> ({
"omp.distribute"(%data_var) <{operandSegmentSizes = array<i32: 1, 0, 0, 0>}> ({
"omp.terminator"() : () -> ()
}) : (memref<i32>) -> ()
}
Expand Down Expand Up @@ -2340,7 +2340,7 @@ func.func @undefined_privatizer(%arg0: index) {
// -----
func.func @undefined_privatizer(%arg0: !llvm.ptr) {
// expected-error @below {{inconsistent number of private variables and privatizer op symbols, private vars: 1 vs. privatizer op symbols: 2}}
"omp.parallel"(%arg0) <{operandSegmentSizes = array<i32: 0, 0, 0, 0, 0, 1>, private_syms = [@x.privatizer, @y.privatizer]}> ({
"omp.parallel"(%arg0) <{operandSegmentSizes = array<i32: 0, 0, 0, 0, 1, 0>, private_syms = [@x.privatizer, @y.privatizer]}> ({
^bb0(%arg2: !llvm.ptr):
omp.terminator
}) : (!llvm.ptr) -> ()
Expand Down
Loading
Loading