@@ -1168,10 +1168,11 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
1168
1168
auto typeCodeVal = this ->genConstantOffset (loc, rewriter, typeCode);
1169
1169
if (width == 8 )
1170
1170
return {len, typeCodeVal};
1171
- auto byteWidth = this ->genConstantOffset (loc, rewriter, width / 8 );
1172
1171
auto i64Ty = mlir::IntegerType::get (&this ->lowerTy ().getContext (), 64 );
1172
+ auto byteWidth = genConstantIndex (loc, i64Ty, rewriter, width / 8 );
1173
+ auto len64 = FIROpConversion<OP>::integerCast (loc, rewriter, i64Ty, len);
1173
1174
auto size =
1174
- rewriter.create <mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len );
1175
+ rewriter.create <mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64 );
1175
1176
return {size, typeCodeVal};
1176
1177
};
1177
1178
auto getKindMap = [&]() -> fir::KindMapping & {
@@ -1382,10 +1383,11 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
1382
1383
base.getType ().cast <mlir::LLVM::LLVMPointerType>().getElementType ();
1383
1384
if (baseType.isa <mlir::LLVM::LLVMArrayType>()) {
1384
1385
auto idxTy = this ->lowerTy ().indexType ();
1385
- mlir::Value zero = genConstantIndex (loc, idxTy, rewriter, 0 );
1386
- gepOperands.push_back (zero);
1386
+ gepOperands.push_back (genConstantIndex (loc, idxTy, rewriter, 0 ));
1387
+ gepOperands.push_back (lowerBound);
1388
+ } else {
1389
+ gepOperands.push_back (lowerBound);
1387
1390
}
1388
- gepOperands.push_back (lowerBound);
1389
1391
return this ->genGEP (loc, base.getType (), rewriter, base, gepOperands);
1390
1392
}
1391
1393
@@ -1468,50 +1470,58 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1468
1470
mlir::Location loc = xbox.getLoc ();
1469
1471
mlir::Value zero = genConstantIndex (loc, i64Ty, rewriter, 0 );
1470
1472
mlir::Value one = genConstantIndex (loc, i64Ty, rewriter, 1 );
1471
- mlir::Value prevDim = integerCast (loc, rewriter, i64Ty, eleSize);
1472
1473
mlir::Value prevPtrOff = one;
1473
1474
mlir::Type eleTy = boxTy.getEleTy ();
1474
1475
const unsigned rank = xbox.getRank ();
1475
1476
llvm::SmallVector<mlir::Value> gepArgs;
1476
1477
unsigned constRows = 0 ;
1477
1478
mlir::Value ptrOffset = zero;
1478
- if (auto memEleTy = fir::dyn_cast_ptrEleTy (xbox.memref ().getType ()))
1479
- if (auto seqTy = memEleTy.dyn_cast <fir::SequenceType>()) {
1480
- mlir::Type seqEleTy = seqTy.getEleTy ();
1481
- // Adjust the element scaling factor if the element is a dependent type.
1482
- if (fir::hasDynamicSize (seqEleTy)) {
1483
- if (fir::isa_char (seqEleTy)) {
1484
- assert (xbox.lenParams ().size () == 1 );
1485
- prevPtrOff = integerCast (loc, rewriter, i64Ty,
1486
- operands[xbox.lenParamOffset ()]);
1487
- } else if (seqEleTy.isa <fir::RecordType>()) {
1488
- TODO (loc, " generate call to calculate size of PDT" );
1489
- } else {
1490
- return rewriter.notifyMatchFailure (xbox, " unexpected dynamic type" );
1491
- }
1492
- } else {
1493
- constRows = seqTy.getConstantRows ();
1494
- }
1479
+ mlir::Type memEleTy = fir::dyn_cast_ptrEleTy (xbox.memref ().getType ());
1480
+ assert (memEleTy.isa <fir::SequenceType>());
1481
+ auto seqTy = memEleTy.cast <fir::SequenceType>();
1482
+ mlir::Type seqEleTy = seqTy.getEleTy ();
1483
+ // Adjust the element scaling factor if the element is a dependent type.
1484
+ if (fir::hasDynamicSize (seqEleTy)) {
1485
+ if (auto charTy = seqEleTy.dyn_cast <fir::CharacterType>()) {
1486
+ assert (xbox.lenParams ().size () == 1 );
1487
+ mlir::LLVM::ConstantOp charSize = genConstantIndex (
1488
+ loc, i64Ty, rewriter, lowerTy ().characterBitsize (charTy) / 8 );
1489
+ mlir::Value castedLen =
1490
+ integerCast (loc, rewriter, i64Ty, operands[xbox.lenParamOffset ()]);
1491
+ auto byteOffset =
1492
+ rewriter.create <mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen);
1493
+ prevPtrOff = integerCast (loc, rewriter, i64Ty, byteOffset);
1494
+ } else if (seqEleTy.isa <fir::RecordType>()) {
1495
+ // prevPtrOff = ;
1496
+ TODO (loc, " generate call to calculate size of PDT" );
1497
+ } else {
1498
+ fir::emitFatalError (loc, " unexpected dynamic type" );
1495
1499
}
1500
+ } else {
1501
+ constRows = seqTy.getConstantRows ();
1502
+ }
1496
1503
1497
- bool hasSubcomp = !xbox.subcomponent ().empty ();
1498
- if ( !xbox.substr ().empty ())
1499
- TODO (loc, " codegen of fir.embox with substring " );
1500
-
1501
- mlir::Value stepExpr ;
1504
+ const auto hasSubcomp = !xbox.subcomponent ().empty ();
1505
+ const bool hasSubstr = !xbox.substr ().empty ();
1506
+ // / Compute initial element stride that will be use to compute the step in
1507
+ // / each dimension.
1508
+ mlir::Value prevDimByteStride = integerCast (loc, rewriter, i64Ty, eleSize) ;
1502
1509
if (hasSubcomp) {
1503
1510
// We have a subcomponent. The step value needs to be the number of
1504
1511
// bytes per element (which is a derived type).
1505
- mlir::Type ty0 = base.getType ();
1506
- [[maybe_unused]] auto ptrTy = ty0.dyn_cast <mlir::LLVM::LLVMPointerType>();
1507
- assert (ptrTy && " expected pointer type" );
1508
- mlir::Type memEleTy = fir::dyn_cast_ptrEleTy (xbox.memref ().getType ());
1509
- assert (memEleTy && " expected fir pointer type" );
1510
- auto seqTy = memEleTy.dyn_cast <fir::SequenceType>();
1511
- assert (seqTy && " expected sequence type" );
1512
- mlir::Type seqEleTy = seqTy.getEleTy ();
1513
1512
auto eleTy = mlir::LLVM::LLVMPointerType::get (convertType (seqEleTy));
1514
- stepExpr = computeDerivedTypeSize (loc, eleTy, i64Ty, rewriter);
1513
+ prevDimByteStride = computeDerivedTypeSize (loc, eleTy, i64Ty, rewriter);
1514
+ } else if (hasSubstr) {
1515
+ // We have a substring. The step value needs to be the number of bytes
1516
+ // per CHARACTER element.
1517
+ auto charTy = seqEleTy.cast <fir::CharacterType>();
1518
+ if (fir::hasDynamicSize (charTy)) {
1519
+ prevDimByteStride = prevPtrOff;
1520
+ } else {
1521
+ prevDimByteStride = genConstantIndex (
1522
+ loc, i64Ty, rewriter,
1523
+ charTy.getLen () * lowerTy ().characterBitsize (charTy) / 8 );
1524
+ }
1515
1525
}
1516
1526
1517
1527
// Process the array subspace arguments (shape, shift, etc.), if any,
@@ -1544,36 +1554,36 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1544
1554
}
1545
1555
}
1546
1556
if (!skipNext) {
1557
+ // store extent
1547
1558
if (hasSlice)
1548
1559
extent = computeTripletExtent (rewriter, loc, operands[sliceOffset],
1549
1560
operands[sliceOffset + 1 ],
1550
1561
operands[sliceOffset + 2 ], zero, i64Ty);
1551
- // store lower bound (normally 0) for BIND(C) interoperability.
1562
+ // Lower bound is normalized to 0 for BIND(C) interoperability.
1552
1563
mlir::Value lb = zero;
1553
1564
const bool isaPointerOrAllocatable =
1554
1565
eleTy.isa <fir::PointerType>() || eleTy.isa <fir::HeapType>();
1555
1566
// Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and
1556
1567
// denormalized descriptors.
1557
- if (isaPointerOrAllocatable || !normalizedLowerBound (xbox)) {
1568
+ if (isaPointerOrAllocatable || !normalizedLowerBound (xbox))
1558
1569
lb = one;
1559
- // If there is a shifted origin, and no fir.slice, and this is not
1560
- // a normalized descriptor then use the value from the shift op as
1561
- // the lower bound.
1562
- if (hasShift && !(hasSlice || hasSubcomp)) {
1563
- lb = operands[shiftOffset];
1564
- auto extentIsEmpty = rewriter. create <mlir::LLVM::ICmpOp>(
1565
- loc, mlir::LLVM::ICmpPredicate::eq, extent, zero);
1566
- lb = rewriter. create < mlir::LLVM::SelectOp>(loc, extentIsEmpty, one,
1567
- lb);
1568
- }
1570
+ // If there is a shifted origin, and no fir.slice, and this is not
1571
+ // a normalized descriptor then use the value from the shift op as
1572
+ // the lower bound.
1573
+ if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) &&
1574
+ (isaPointerOrAllocatable || ! normalizedLowerBound (xbox))) {
1575
+ lb = operands[shiftOffset];
1576
+ auto extentIsEmpty = rewriter. create < mlir::LLVM::ICmpOp>(
1577
+ loc, mlir::LLVM::ICmpPredicate::eq, extent, zero);
1578
+ lb = rewriter. create <mlir::LLVM::SelectOp>(loc, extentIsEmpty, one,
1579
+ lb);
1569
1580
}
1570
1581
dest = insertLowerBound (rewriter, loc, dest, descIdx, lb);
1571
1582
1572
1583
dest = insertExtent (rewriter, loc, dest, descIdx, extent);
1573
1584
1574
1585
// store step (scaled by shaped extent)
1575
-
1576
- mlir::Value step = hasSubcomp ? stepExpr : prevDim;
1586
+ mlir::Value step = prevDimByteStride;
1577
1587
if (hasSlice)
1578
1588
step = rewriter.create <mlir::LLVM::MulOp>(loc, i64Ty, step,
1579
1589
operands[sliceOffset + 2 ]);
@@ -1582,8 +1592,8 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1582
1592
}
1583
1593
1584
1594
// compute the stride and offset for the next natural dimension
1585
- prevDim =
1586
- rewriter. create <mlir::LLVM::MulOp>( loc, i64Ty, prevDim , outerExtent);
1595
+ prevDimByteStride = rewriter. create <mlir::LLVM::MulOp>(
1596
+ loc, i64Ty, prevDimByteStride , outerExtent);
1587
1597
if (constRows == 0 )
1588
1598
prevPtrOff = rewriter.create <mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff,
1589
1599
outerExtent);
@@ -1597,7 +1607,7 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1597
1607
if (hasSlice)
1598
1608
sliceOffset += 3 ;
1599
1609
}
1600
- if (hasSlice || hasSubcomp || !xbox. substr (). empty () ) {
1610
+ if (hasSlice || hasSubcomp || hasSubstr ) {
1601
1611
llvm::SmallVector<mlir::Value> args = {ptrOffset};
1602
1612
args.append (gepArgs.rbegin (), gepArgs.rend ());
1603
1613
if (hasSubcomp) {
@@ -1613,7 +1623,7 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1613
1623
}
1614
1624
base =
1615
1625
rewriter.create <mlir::LLVM::GEPOp>(loc, base.getType (), base, args);
1616
- if (!xbox. substr (). empty () )
1626
+ if (hasSubstr )
1617
1627
base = shiftSubstringBase (rewriter, loc, base,
1618
1628
operands[xbox.substrOffset ()]);
1619
1629
}
0 commit comments