Skip to content

Commit 3e7d98d

Browse files
committed
Fixing definition of ShardShapeOp and lowering it to MPI
1 parent 30e6cd1 commit 3e7d98d

File tree

7 files changed

+269
-37
lines changed

7 files changed

+269
-37
lines changed

mlir/include/mlir/Dialect/Mesh/IR/MeshOps.td

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -345,24 +345,32 @@ def Mesh_GetShardingOp : Mesh_Op<"get_sharding", [Pure]> {
345345
}];
346346
}
347347

348-
def Mesh_ShardShapeOp : Mesh_Op<"shard_shape", [Pure]> {
349-
let summary = "Get the shard shape of a given process/device.";
348+
def Mesh_ShardShapeOp : Mesh_Op<"shard_shape", [
349+
Pure, AttrSizedOperandSegments,
350+
DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>
351+
]> {
352+
let summary = "Get the shard shape for a given process/device.";
350353
let description = [{
351-
The device/process id is a linearized id of the device/process in the mesh.
354+
The device/process id is a multi-index of the device/process in the mesh.
352355
This operation might be used during spmdization when the shard shape depends
353356
on (non-constant) values used in `mesh.sharding`.
354357
}];
355358
let arguments = (ins
356-
DenseI64ArrayAttr:$shape,
359+
DenseI64ArrayAttr:$dims,
360+
Variadic<Index>:$dims_dynamic,
357361
Mesh_Sharding:$sharding,
358-
Index:$device
362+
DenseI64ArrayAttr:$device,
363+
Variadic<Index>:$device_dynamic
359364
);
360365
let results = (outs Variadic<Index>:$result);
361366
let assemblyFormat = [{
362-
custom<DimensionList>($shape) $sharding $device attr-dict `:` type($result)
367+
`dims` `=` custom<DynamicIndexList>($dims_dynamic, $dims)
368+
`sharding` `=` $sharding
369+
`device` `=` custom<DynamicIndexList>($device_dynamic, $device)
370+
attr-dict `:` type(results)
363371
}];
364372
let builders = [
365-
OpBuilder<(ins "ArrayRef<int64_t>":$shape, "Value":$sharding, "Value":$device)>
373+
OpBuilder<(ins "ArrayRef<int64_t>":$dims, "ArrayRef<Value>":$dims_dyn, "Value":$sharding, "ValueRange":$device)>
366374
];
367375
}
368376

mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp

Lines changed: 145 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -380,23 +380,159 @@ struct ConvertNeighborsLinearIndicesOp
380380
[&](OpBuilder &builder, Location loc) {
381381
SmallVector<Value> tmp = mIdx;
382382
tmp[axes[0]] =
383-
rewriter.create<arith::AddIOp>(op.getLoc(), orgIdx, one)
384-
.getResult();
383+
rewriter.create<arith::AddIOp>(op.getLoc(), orgIdx, one);
385384
builder.create<scf::YieldOp>(
386385
loc, multiToLinearIndex(loc, rewriter, tmp, dims));
387386
});
388387
rewriter.replaceOp(op, ValueRange{down.getResult(0), up.getResult(0)});
389-
return mlir::success();
388+
return success();
390389
}
391390
};
392391

393-
struct ConvertUpdateHaloOp
394-
: public mlir::OpRewritePattern<mlir::mesh::UpdateHaloOp> {
395-
using OpRewritePattern::OpRewritePattern;
392+
struct ConvertShardShapeOp : public OpConversionPattern<ShardShapeOp> {
393+
using OpConversionPattern::OpConversionPattern;
396394

397-
mlir::LogicalResult
398-
matchAndRewrite(mlir::mesh::UpdateHaloOp op,
399-
mlir::PatternRewriter &rewriter) const override {
395+
LogicalResult
396+
matchAndRewrite(ShardShapeOp op, OneToNOpAdaptor adaptor,
397+
ConversionPatternRewriter &rewriter) const override {
398+
auto sharding = op.getSharding().getDefiningOp<ShardingOp>();
399+
if (!sharding) {
400+
return op->emitError()
401+
<< "Expected SharingOp as defining op for sharding"
402+
<< " but found " << adaptor.getSharding()[0].getDefiningOp();
403+
}
404+
405+
// Compute the sharded shape by applying the sharding to the input shape.
406+
// Without shardedDimsOffsets in the sharding, the shard shape is computed
407+
// by dividing the dimension size by the number of shards in that dimension
408+
// (which is given by the size of the mesh axes provided in split-axes).
409+
// Odd elements get distributed to trailing shards.
410+
// If a shardedDimsOffsets is provided, the shard shape is computed by
411+
// subtracting the offset of the current shard from the offset of the next
412+
// shard.
413+
414+
Location loc = op.getLoc();
415+
Type index = rewriter.getIndexType();
416+
417+
// This is a 1:N conversion because the sharding op is a 1:3 conversion.
418+
// The operands in the adaptor are a vector<ValeRange>. For dims and device
419+
// we have a 1:1 conversion.
420+
// For simpler access fill a vector with the dynamic dims.
421+
SmallVector<Value> dynDims, dynDevice;
422+
for (auto dim : adaptor.getDimsDynamic()) {
423+
// type conversion should be 1:1 for ints
424+
assert(dim.size() == 1);
425+
dynDims.emplace_back(dim[0]);
426+
}
427+
// same for device
428+
for (auto device : adaptor.getDeviceDynamic()) {
429+
assert(device.size() == 1);
430+
dynDevice.emplace_back(device[0]);
431+
}
432+
433+
// To keep the code simple, convert dims/device to values when they are
434+
// attributes. Count on canonicalization to fold static values.
435+
auto shape = getMixedAsValues(rewriter, loc, op.getDims(), dynDims, index);
436+
auto multiIdx =
437+
getMixedAsValues(rewriter, loc, adaptor.getDevice(), dynDevice, index);
438+
439+
// Get the MeshOp, the mesh shape is needed to compute the sharded shape.
440+
SymbolTableCollection symbolTableCollection;
441+
auto meshOp = getMesh(sharding, symbolTableCollection);
442+
// For now we only support static mesh shapes
443+
if (ShapedType::isDynamicShape(meshOp.getShape()))
444+
return failure();
445+
446+
auto splitAxes = sharding.getSplitAxes().getAxes();
447+
// shardedDimsOffsets are optional and might be Values (not attributes).
448+
// Also, the shardId might be dynamic which means the position in the
449+
// shardedDimsOffsets is not statically known. Create a tensor of the
450+
// shardedDimsOffsets and later extract the offsets for computing the
451+
// local shard-size.
452+
Value shardedDimsOffs;
453+
{
454+
auto tmp = getMixedAsValues(
455+
rewriter, loc, sharding.getStaticShardedDimsOffsets(),
456+
sharding.getDynamicShardedDimsOffsets(), index);
457+
if (!tmp.empty())
458+
shardedDimsOffs = rewriter.create<tensor::FromElementsOp>(
459+
loc, RankedTensorType::get({(int64_t)tmp.size()}, index), tmp);
460+
}
461+
462+
// With static mesh shape the sizes of the split axes are known.
463+
// Hence the start/pos for each split axes in shardDimsOffsets can be
464+
// computed statically.
465+
int64_t pos = 0;
466+
SmallVector<Value> shardShape;
467+
Value zero =
468+
rewriter.create<arith::ConstantOp>(loc, rewriter.getZeroAttr(index));
469+
Value one =
470+
rewriter.create<arith::ConstantOp>(loc, rewriter.getOneAttr(index));
471+
472+
// Iterate over the dimensions of the tensor shape, get their split Axes,
473+
// and compute the sharded shape.
474+
for (auto [i, dim] : llvm::enumerate(shape)) {
475+
// Trailing dimensions might not be annotated.
476+
if (i < splitAxes.size() && !splitAxes[i].empty()) {
477+
auto axes = splitAxes[i];
478+
// The current dimension might not be sharded.
479+
// Create a value from the static position in shardDimsOffsets.
480+
Value posVal =
481+
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(pos));
482+
// Get the index of the local shard in the mesh axis.
483+
Value idx = multiIdx[axes[0]];
484+
auto _numShards =
485+
collectiveProcessGroupSize(axes.asArrayRef(), meshOp.getShape());
486+
if (shardedDimsOffs) {
487+
// If sharded dims offsets are provided, use them to compute the
488+
// sharded shape.
489+
if (axes.size() > 1) {
490+
return op->emitError() << "Only single axis sharding is "
491+
<< "supported for each dimension.";
492+
}
493+
idx = rewriter.create<arith::AddIOp>(loc, posVal, idx);
494+
// Compute size = shardedDimsOffs[idx+1] - shardedDimsOffs[idx].
495+
Value off =
496+
rewriter.create<tensor::ExtractOp>(loc, shardedDimsOffs, idx);
497+
idx = rewriter.create<arith::AddIOp>(loc, idx, one);
498+
Value nextOff =
499+
rewriter.create<tensor::ExtractOp>(loc, shardedDimsOffs, idx);
500+
Value sz = rewriter.create<arith::SubIOp>(loc, nextOff, off);
501+
shardShape.emplace_back(sz);
502+
} else {
503+
auto numShards = rewriter.create<arith::ConstantOp>(
504+
loc, rewriter.getIndexAttr(_numShards));
505+
// Compute shard dim size by distributing odd elements to trailing
506+
// shards:
507+
// sz = dim / numShards
508+
// + (idx >= (numShards - (dim % numShards)) ? 1 : 0)
509+
Value sz = rewriter.create<arith::DivSIOp>(loc, dim, numShards);
510+
Value sz1 = rewriter.create<arith::RemSIOp>(loc, dim, numShards);
511+
sz1 = rewriter.create<arith::SubIOp>(loc, numShards, sz1);
512+
auto cond = rewriter.create<arith::CmpIOp>(
513+
loc, arith::CmpIPredicate::sge, idx, sz1);
514+
Value odd = rewriter.create<arith::SelectOp>(loc, cond, one, zero);
515+
sz = rewriter.create<arith::AddIOp>(loc, sz, odd);
516+
shardShape.emplace_back(sz);
517+
}
518+
pos += _numShards + 1; // add one for the total size.
519+
} // else no sharding if split axis is empty or no split axis
520+
// If no size was added -> no sharding in this dimension.
521+
if (shardShape.size() <= i)
522+
shardShape.emplace_back(dim);
523+
}
524+
assert(shardShape.size() == shape.size());
525+
rewriter.replaceOp(op, shardShape);
526+
return success();
527+
}
528+
};
529+
530+
struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
531+
using OpConversionPattern::OpConversionPattern;
532+
533+
LogicalResult
534+
matchAndRewrite(UpdateHaloOp op, OpAdaptor adaptor,
535+
ConversionPatternRewriter &rewriter) const override {
400536

401537
// The input/output memref is assumed to be in C memory order.
402538
// Halos are exchanged as 2 blocks per dimension (one for each side: down

mlir/lib/Dialect/Mesh/IR/MeshOps.cpp

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -831,12 +831,19 @@ MeshSharding MeshSharding::get(::mlir::FlatSymbolRefAttr mesh_,
831831
// mesh.shard_shape
832832
//===----------------------------------------------------------------------===//
833833

834+
void ShardShapeOp::getAsmResultNames(
835+
function_ref<void(Value, StringRef)> setNameFn) {
836+
setNameFn(getResult()[0], "shard_shape");
837+
}
838+
834839
void ShardShapeOp::build(::mlir::OpBuilder &odsBuilder,
835840
::mlir::OperationState &odsState,
836-
::llvm::ArrayRef<int64_t> shape,
837-
::mlir::Value sharding, ::mlir::Value device) {
838-
SmallVector<mlir::Type> resType(shape.size(), odsBuilder.getIndexType());
839-
build(odsBuilder, odsState, resType, shape, sharding, device);
841+
::llvm::ArrayRef<int64_t> dims,
842+
ArrayRef<Value> dims_dyn, ::mlir::Value sharding,
843+
::mlir::ValueRange device) {
844+
SmallVector<mlir::Type> resType(dims.size(), odsBuilder.getIndexType());
845+
build(odsBuilder, odsState, resType, dims, dims_dyn, sharding,
846+
SmallVector<int64_t>(device.size(), ShapedType::kDynamic), device);
840847
}
841848

842849
//===----------------------------------------------------------------------===//

mlir/lib/Dialect/Tensor/Extensions/MeshShardingExtensions.cpp

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ struct CreatorOpShardingInterface
5050
IRMapping &spmdizationMap,
5151
SymbolTableCollection &symbolTable,
5252
OpBuilder &builder) const {
53-
auto shardType = cast<ShapedType>(mesh::shardType(
54-
op->getResult(0).getType(),
55-
mesh::getMesh(op, resultShardings[0].getMeshAttr(), symbolTable),
56-
resultShardings[0]));
53+
auto mesh =
54+
mesh::getMesh(op, resultShardings[0].getMeshAttr(), symbolTable);
55+
auto shardType = cast<ShapedType>(
56+
mesh::shardType(op->getResult(0).getType(), mesh, resultShardings[0]));
5757
Operation *newOp = nullptr;
5858
// if the sharding introduces a new dynamic dimension, we take it from
5959
// the dynamic sharding info. For now bail out if it's not
@@ -66,18 +66,19 @@ struct CreatorOpShardingInterface
6666
assert(oldType.getRank() == shardType.getRank());
6767
int currOldOprndNum = -1;
6868
mesh::ShardShapeOp shapeForDevice;
69-
Value device;
69+
ValueRange device;
7070
Operation *newSharding = nullptr;
7171
for (auto i = 0; i < oldType.getRank(); ++i) {
7272
if (!oldType.isDynamicDim(i) && shardType.isDynamicDim(i)) {
7373
if (!newSharding) {
7474
newSharding =
7575
builder.create<ShardingOp>(op->getLoc(), resultShardings[0]);
76-
device = builder.create<mesh::ProcessLinearIndexOp>(
77-
op->getLoc(), resultShardings[0].getMesh());
76+
device =
77+
builder.create<mesh::ProcessMultiIndexOp>(op->getLoc(), mesh)
78+
.getResults();
7879
shapeForDevice = builder.create<mesh::ShardShapeOp>(
79-
op->getLoc(), oldType.getShape(), newSharding->getResult(0),
80-
device);
80+
op->getLoc(), oldType.getShape(), spmdizedOperands,
81+
newSharding->getResult(0), device);
8182
}
8283
newOperands.emplace_back(shapeForDevice.getResult()[i]);
8384
} else if (oldType.isDynamicDim(i)) {
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
// RUN: mlir-opt %s --convert-mesh-to-mpi -canonicalize | FileCheck %s
2+
3+
module attributes { mpi.dlti = #dlti.map<"MPI:comm_world_rank" = 24> } {
4+
5+
// CHECK: mesh.mesh @mesh0
6+
mesh.mesh @mesh0(shape = 3x4x5)
7+
8+
// Notice: comm_world_rank/linear index 24 is multiindex [1, 0, 4] in @mesh0
9+
10+
// all shards are equal
11+
// CHECK-LABEL: func.func @shard_shape_equal() -> (index, index, index) {
12+
func.func @shard_shape_equal() -> (index, index, index) {
13+
%sharding = mesh.sharding @mesh0 split_axes = [[0], [1], [2]] : !mesh.sharding
14+
%0:3 = mesh.process_multi_index on @mesh0 : index, index, index
15+
%c9 = arith.constant 9 : index
16+
%c12 = arith.constant 12 : index
17+
// CHECK: [[vc3:%.*]] = arith.constant 3 : index
18+
%1:3 = mesh.shard_shape dims = [%c9, %c12, 15] sharding = %sharding device = [%0#0, %0#1, %0#2] : index, index, index
19+
// CHECK: return [[vc3]], [[vc3]], [[vc3]] : index, index, index
20+
return %1#0, %1#1, %1#2 : index, index, index
21+
}
22+
23+
// last shard in last dim gets an extra element
24+
// CHECK-LABEL: func.func @shard_shape_odd_1() -> (index, index, index) {
25+
func.func @shard_shape_odd_1() -> (index, index, index) {
26+
%sharding = mesh.sharding @mesh0 split_axes = [[0], [1], [2]] : !mesh.sharding
27+
%0:3 = mesh.process_multi_index on @mesh0 : index, index, index
28+
%c9 = arith.constant 9 : index
29+
%c12 = arith.constant 12 : index
30+
// CHECK-DAG: [[vc3:%.*]] = arith.constant 3 : index
31+
// CHECK-DAG: [[vc4:%.*]] = arith.constant 4 : index
32+
%1:3 = mesh.shard_shape dims = [%c9, %c12, 16] sharding = %sharding device = [%0#0, %0#1, %0#2] : index, index, index
33+
// CHECK: return [[vc3]], [[vc3]], [[vc4]] : index, index, index
34+
return %1#0, %1#1, %1#2 : index, index, index
35+
}
36+
37+
// all except first shard in second dim get an extra element
38+
// CHECK-LABEL: func.func @shard_shape_odd_2() -> (index, index, index) {
39+
func.func @shard_shape_odd_2() -> (index, index, index) {
40+
%sharding = mesh.sharding @mesh0 split_axes = [[0], [1], [2]] : !mesh.sharding
41+
%0:3 = mesh.process_multi_index on @mesh0 : index, index, index
42+
%c9 = arith.constant 9 : index
43+
// CHECK: [[vc3:%.*]] = arith.constant 3 : index
44+
%1:3 = mesh.shard_shape dims = [%c9, 15, 15] sharding = %sharding device = [%0#0, %0#1, %0#2] : index, index, index
45+
// CHECK: return [[vc3]], [[vc3]], [[vc3]] : index, index, index
46+
return %1#0, %1#1, %1#2 : index, index, index
47+
}
48+
49+
// all except first shard in first dim get an extra element
50+
// CHECK-LABEL: func.func @shard_shape_odd_3() -> (index, index, index) {
51+
func.func @shard_shape_odd_3() -> (index, index, index) {
52+
%sharding = mesh.sharding @mesh0 split_axes = [[0], [1], [2]] : !mesh.sharding
53+
%0:3 = mesh.process_multi_index on @mesh0 : index, index, index
54+
// CHECK-DAG: [[vc3:%.*]] = arith.constant 3 : index
55+
// CHECK-DAG: [[vc4:%.*]] = arith.constant 4 : index
56+
%1:3 = mesh.shard_shape dims = [11, 12, 15] sharding = %sharding device = [%0#0, %0#1, %0#2] : index, index, index
57+
// CHECK: return [[vc4]], [[vc3]], [[vc3]] : index, index, index
58+
return %1#0, %1#1, %1#2 : index, index, index
59+
}
60+
61+
// extract from sharded_dims_offsets
62+
// CHECK-LABEL: func.func @shard_shape_sharded_dims_offs() -> (index, index, index) {
63+
func.func @shard_shape_sharded_dims_offs() -> (index, index, index) {
64+
%sharding = mesh.sharding @mesh0 split_axes = [[0], [1], [2]]
65+
sharded_dims_offsets = [0, 1, 4, 9, 0, 2, 6, 12, 12, 0, 3, 6, 9, 12, 15]: !mesh.sharding
66+
%0:3 = mesh.process_multi_index on @mesh0 : index, index, index
67+
%c9 = arith.constant 9 : index
68+
%c12 = arith.constant 12 : index
69+
// CHECK: [[vc3:%.*]] = arith.constant 3 : index
70+
// CHECK: [[vc2:%.*]] = arith.constant 2 : index
71+
%1:3 = mesh.shard_shape dims = [%c9, %c12, 15] sharding = %sharding device = [%0#0, %0#1, %0#2] : index, index, index
72+
// CHECK: return [[vc3]], [[vc2]], [[vc3]] : index, index, index
73+
return %1#0, %1#1, %1#2 : index, index, index
74+
}
75+
}

mlir/test/Dialect/Mesh/ops.mlir

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -157,10 +157,12 @@ func.func @mesh_shard_shape() {
157157
%c3 = arith.constant 3 : index
158158
// CHECK-NEXT: %[[S:.*]] = mesh.sharding @mesh0 split_axes = {{\[\[}}]] : !mesh.sharding
159159
%s = mesh.sharding @mesh0 split_axes = [[]] : !mesh.sharding
160-
// CHECK-NEXT: mesh.shard_shape 8x? %[[S]] %[[C3]] : index, index
161-
%shp:2 = mesh.shard_shape 8x? %s %c3 : index, index
162-
// CHECK-NEXT: mesh.shard_shape 8x4 %[[S]] %[[C3]] : index, index
163-
%shp1:2 = mesh.shard_shape 8x4 %s %c3 : index, index
160+
// CHECK-NEXT: mesh.shard_shape dims = [8, %[[C3]]
161+
// CHECK-SAME: ] sharding = %[[S]] device = [%[[C3]]
162+
// CHECK-SAME: ] : index, index
163+
%shp:2 = mesh.shard_shape dims = [8, %c3] sharding = %s device = [%c3] : index, index
164+
// CHECK-NEXT: mesh.shard_shape dims = [8, 4] sharding = %[[S]] device = [3] : index, index
165+
%shp1:2 = mesh.shard_shape dims = [8, 4] sharding = %s device = [3] : index, index
164166
return
165167
}
166168

mlir/test/Dialect/Tensor/mesh-spmdization.mlir

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,9 @@ func.func @tensor_empty_static_sharded_dims_offsets() -> () {
1010
%sharding = mesh.sharding @mesh_1d_4 split_axes = [[0]] sharded_dims_offsets = [0, 1, 4, 7, 8] : !mesh.sharding
1111
%sharded= mesh.shard %b to %sharding : tensor<8x16xf32>
1212
// CHECK: %[[sharding:.*]] = mesh.sharding @mesh_1d_4 split_axes = {{\[\[}}0]] sharded_dims_offsets = [0, 1, 4, 7, 8] : !mesh.sharding
13-
// CHECK: %[[proc_linear_idx:.*]] = mesh.process_linear_index on @mesh_1d_4 : index
14-
// CHECK: %[[V0:.*]]:2 = mesh.shard_shape 8x16 %[[sharding]] %[[proc_linear_idx]] : index, index
13+
// CHECK: %[[proc_multi_idx:.*]] = mesh.process_multi_index on @mesh_1d_4 : index
14+
// CHECK: %[[V0:.*]]:2 = mesh.shard_shape dims = [8, 16] sharding = %[[sharding]] device = [%[[proc_multi_idx]]
15+
// CHECK-SAME: ] : index, index
1516
// CHECK: tensor.empty(%[[V0]]#0) : tensor<?x16xf32>
1617

1718
return
@@ -24,8 +25,10 @@ func.func @tensor_empty_dynamic_sharded_dims_offsets(%arg0 : index) -> () {
2425
%sharding = mesh.sharding @mesh_1d_4 split_axes = [[0]] sharded_dims_offsets = [0, 1, 4, 7, 8] : !mesh.sharding
2526
%sharded= mesh.shard %b to %sharding : tensor<8x?xf32>
2627
// CHECK: %[[sharding:.*]] = mesh.sharding @mesh_1d_4 split_axes = {{\[\[}}0]] sharded_dims_offsets = [0, 1, 4, 7, 8] : !mesh.sharding
27-
// CHECK: %[[proc_linear_idx:.*]] = mesh.process_linear_index on @mesh_1d_4 : index
28-
// CHECK: %[[V0:.*]]:2 = mesh.shard_shape 8x? %[[sharding]] %[[proc_linear_idx]] : index, index
28+
// CHECK: %[[proc_multi_idx:.*]] = mesh.process_multi_index on @mesh_1d_4 : index
29+
// CHECK: %[[V0:.*]]:2 = mesh.shard_shape dims = [8, %[[A0]]
30+
// CHECK-SAME: ] sharding = %[[sharding]] device = [%[[proc_multi_idx]]
31+
// CHECK-SAME: ] : index, index
2932
// CHECK: tensor.empty(%[[V0]]#0, %[[A0]]) : tensor<?x?xf32>
3033

3134
return

0 commit comments

Comments
 (0)