Skip to content

Commit 5d27abe

Browse files
clementvalschweitzpgijeanPerier
committed
[fir] Add fircg.ext_array_coor conversion
This patch adds the conversion pattern for the fircg.ext_array_coor operation. It applies the address arithmetic on a dynamically shaped, shifted and/or sliced array. This patch is part of the upstreaming effort from fir-dev branch. Reviewed By: kiranchandramohan Differential Revision: https://reviews.llvm.org/D113968 Co-authored-by: Eric Schweitz <[email protected]> Co-authored-by: Jean Perier <[email protected]>
1 parent b23d17f commit 5d27abe

File tree

3 files changed

+321
-2
lines changed

3 files changed

+321
-2
lines changed

flang/include/flang/Optimizer/CodeGen/CGOps.td

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,16 @@ def fircg_XArrayCoorOp : fircg_Op<"ext_array_coor", [AttrSizedOperandSegments]>
176176

177177
let extraClassDeclaration = [{
178178
unsigned getRank();
179+
180+
// Shape is optional, but if it exists, it will be at offset 1.
181+
unsigned shapeOffset() { return 1; }
182+
unsigned shiftOffset() { return shapeOffset() + shape().size(); }
183+
unsigned sliceOffset() { return shiftOffset() + shift().size(); }
184+
unsigned subcomponentOffset() { return sliceOffset() + slice().size(); }
185+
unsigned indicesOffset() {
186+
return subcomponentOffset() + subcomponent().size();
187+
}
188+
unsigned lenParamsOffset() { return indicesOffset() + indices().size(); }
179189
}];
180190
}
181191

flang/lib/Optimizer/CodeGen/CodeGen.cpp

Lines changed: 186 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,11 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
6666
return lowerTy().convertType(ty);
6767
}
6868

69+
mlir::Type getVoidPtrType() const {
70+
return mlir::LLVM::LLVMPointerType::get(
71+
mlir::IntegerType::get(&lowerTy().getContext(), 8));
72+
}
73+
6974
mlir::LLVM::ConstantOp
7075
genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
7176
int value) const {
@@ -126,6 +131,17 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
126131
return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
127132
}
128133

134+
mlir::Value
135+
loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim,
136+
mlir::ConversionPatternRewriter &rewriter) const {
137+
auto idxTy = lowerTy().indexType();
138+
auto c0 = genConstantOffset(loc, rewriter, 0);
139+
auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox);
140+
auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim);
141+
return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy,
142+
rewriter);
143+
}
144+
129145
/// Read base address from a fir.box. Returned address has type ty.
130146
mlir::Value
131147
loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box,
@@ -188,6 +204,12 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
188204
return type;
189205
}
190206

207+
// Return LLVM type of the base address given the LLVM type
208+
// of the related descriptor (lowered fir.box type).
209+
static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) {
210+
return getBoxEleTy(type, {kAddrPosInBox});
211+
}
212+
191213
template <typename... ARGS>
192214
mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty,
193215
mlir::ConversionPatternRewriter &rewriter,
@@ -2004,6 +2026,168 @@ struct InsertOnRangeOpConversion
20042026
}
20052027
};
20062028

2029+
/// XArrayCoor is the address arithmetic on a dynamically shaped, sliced,
2030+
/// shifted etc. array.
2031+
/// (See the static restriction on coordinate_of.) array_coor determines the
2032+
/// coordinate (location) of a specific element.
2033+
struct XArrayCoorOpConversion
2034+
: public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> {
2035+
using FIROpAndTypeConversion::FIROpAndTypeConversion;
2036+
2037+
mlir::LogicalResult
2038+
doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor,
2039+
mlir::ConversionPatternRewriter &rewriter) const override {
2040+
auto loc = coor.getLoc();
2041+
mlir::ValueRange operands = adaptor.getOperands();
2042+
unsigned rank = coor.getRank();
2043+
assert(coor.indices().size() == rank);
2044+
assert(coor.shape().empty() || coor.shape().size() == rank);
2045+
assert(coor.shift().empty() || coor.shift().size() == rank);
2046+
assert(coor.slice().empty() || coor.slice().size() == 3 * rank);
2047+
mlir::Type idxTy = lowerTy().indexType();
2048+
mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
2049+
mlir::Value prevExt = one;
2050+
mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
2051+
mlir::Value offset = zero;
2052+
const bool isShifted = !coor.shift().empty();
2053+
const bool isSliced = !coor.slice().empty();
2054+
const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>();
2055+
2056+
auto indexOps = coor.indices().begin();
2057+
auto shapeOps = coor.shape().begin();
2058+
auto shiftOps = coor.shift().begin();
2059+
auto sliceOps = coor.slice().begin();
2060+
// For each dimension of the array, generate the offset calculation.
2061+
for (unsigned i = 0; i < rank;
2062+
++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) {
2063+
mlir::Value index =
2064+
integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]);
2065+
mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy,
2066+
operands[coor.shiftOffset() + i])
2067+
: one;
2068+
mlir::Value step = one;
2069+
bool normalSlice = isSliced;
2070+
// Compute zero based index in dimension i of the element, applying
2071+
// potential triplets and lower bounds.
2072+
if (isSliced) {
2073+
mlir::Value ub = *(sliceOps + 1);
2074+
normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp());
2075+
if (normalSlice)
2076+
step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2));
2077+
}
2078+
auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb);
2079+
mlir::Value diff =
2080+
rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step);
2081+
if (normalSlice) {
2082+
mlir::Value sliceLb =
2083+
integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]);
2084+
auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb);
2085+
diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj);
2086+
}
2087+
// Update the offset given the stride and the zero based index `diff`
2088+
// that was just computed.
2089+
if (baseIsBoxed) {
2090+
// Use stride in bytes from the descriptor.
2091+
mlir::Value stride =
2092+
loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter);
2093+
auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride);
2094+
offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
2095+
} else {
2096+
// Use stride computed at last iteration.
2097+
auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt);
2098+
offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
2099+
// Compute next stride assuming contiguity of the base array
2100+
// (in element number).
2101+
auto nextExt =
2102+
integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]);
2103+
prevExt =
2104+
rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt);
2105+
}
2106+
}
2107+
2108+
// Add computed offset to the base address.
2109+
if (baseIsBoxed) {
2110+
// Working with byte offsets. The base address is read from the fir.box.
2111+
// and need to be casted to i8* to do the pointer arithmetic.
2112+
mlir::Type baseTy =
2113+
getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType());
2114+
mlir::Value base =
2115+
loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter);
2116+
mlir::Type voidPtrTy = getVoidPtrType();
2117+
base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
2118+
llvm::SmallVector<mlir::Value> args{base, offset};
2119+
auto addr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, args);
2120+
if (coor.subcomponent().empty()) {
2121+
rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr);
2122+
return success();
2123+
}
2124+
auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr);
2125+
args.clear();
2126+
args.push_back(casted);
2127+
args.push_back(zero);
2128+
if (!coor.lenParams().empty()) {
2129+
// If type parameters are present, then we don't want to use a GEPOp
2130+
// as below, as the LLVM struct type cannot be statically defined.
2131+
TODO(loc, "derived type with type parameters");
2132+
}
2133+
// TODO: array offset subcomponents must be converted to LLVM's
2134+
// row-major layout here.
2135+
for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
2136+
args.push_back(operands[i]);
2137+
rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, args);
2138+
return success();
2139+
}
2140+
2141+
// The array was not boxed, so it must be contiguous. offset is therefore an
2142+
// element offset and the base type is kept in the GEP unless the element
2143+
// type size is itself dynamic.
2144+
mlir::Value base;
2145+
if (coor.subcomponent().empty()) {
2146+
// No subcomponent.
2147+
if (!coor.lenParams().empty()) {
2148+
// Type parameters. Adjust element size explicitly.
2149+
auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType());
2150+
assert(eleTy && "result must be a reference-like type");
2151+
if (fir::characterWithDynamicLen(eleTy)) {
2152+
assert(coor.lenParams().size() == 1);
2153+
auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize(
2154+
eleTy.cast<fir::CharacterType>().getFKind());
2155+
auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8);
2156+
auto scaledBySize =
2157+
rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling);
2158+
auto length =
2159+
integerCast(loc, rewriter, idxTy,
2160+
adaptor.getOperands()[coor.lenParamsOffset()]);
2161+
offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize,
2162+
length);
2163+
} else {
2164+
TODO(loc, "compute size of derived type with type parameters");
2165+
}
2166+
}
2167+
// Cast the base address to a pointer to T.
2168+
base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty,
2169+
adaptor.getOperands()[0]);
2170+
} else {
2171+
// Operand #0 must have a pointer type. For subcomponent slicing, we
2172+
// want to cast away the array type and have a plain struct type.
2173+
mlir::Type ty0 = adaptor.getOperands()[0].getType();
2174+
auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>();
2175+
assert(ptrTy && "expected pointer type");
2176+
mlir::Type eleTy = ptrTy.getElementType();
2177+
while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>())
2178+
eleTy = arrTy.getElementType();
2179+
auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy);
2180+
base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy,
2181+
adaptor.getOperands()[0]);
2182+
}
2183+
SmallVector<mlir::Value> args = {base, offset};
2184+
for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
2185+
args.push_back(operands[i]);
2186+
rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, args);
2187+
return success();
2188+
}
2189+
};
2190+
20072191
//
20082192
// Primitive operations on Complex types
20092193
//
@@ -2431,8 +2615,8 @@ class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> {
24312615
ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion,
24322616
SliceOpConversion, StoreOpConversion, StringLitOpConversion,
24332617
SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion,
2434-
UndefOpConversion, UnreachableOpConversion, XEmboxOpConversion,
2435-
ZeroOpConversion>(typeConverter);
2618+
UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion,
2619+
XEmboxOpConversion, ZeroOpConversion>(typeConverter);
24362620
mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern);
24372621
mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter,
24382622
pattern);

flang/test/Fir/convert-to-llvm.fir

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1828,3 +1828,128 @@ func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
18281828
// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[ADDR_BITCAST]], %[[BOX9]][0 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
18291829
// CHECK: llvm.store %[[BOX10]], %[[ALLOCA]] : !llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
18301830
// CHECK: llvm.call @_QPtest_dt_callee(%1) : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> ()
1831+
1832+
// -----
1833+
1834+
// Test `fircg.ext_array_coor` conversion.
1835+
1836+
// Conversion with only shape and indice.
1837+
1838+
func @ext_array_coor0(%arg0: !fir.ref<!fir.array<?xi32>>) {
1839+
%c0 = arith.constant 0 : i64
1840+
%1 = fircg.ext_array_coor %arg0(%c0) <%c0> : (!fir.ref<!fir.array<?xi32>>, i64, i64) -> !fir.ref<i32>
1841+
return
1842+
}
1843+
1844+
// CHECK-LABEL: llvm.func @ext_array_coor0(
1845+
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<i32>)
1846+
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
1847+
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
1848+
// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i64) : i64
1849+
// CHECK: %[[IDX:.*]] = llvm.sub %[[C0]], %[[C1]] : i64
1850+
// CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C1]] : i64
1851+
// CHECK: %[[SC:.*]] = llvm.mul %[[DIFF0]], %[[C1]] : i64
1852+
// CHECK: %[[OFFSET:.*]] = llvm.add %[[SC]], %[[C0_1]] : i64
1853+
// CHECK: %[[BITCAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr<i32> to !llvm.ptr<i32>
1854+
// CHECK: %{{.*}} = llvm.getelementptr %[[BITCAST]][%[[OFFSET]]] : (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>
1855+
1856+
// Conversion with shift and slice.
1857+
1858+
func @ext_array_coor1(%arg0: !fir.ref<!fir.array<?xi32>>) {
1859+
%c0 = arith.constant 0 : i64
1860+
%1 = fircg.ext_array_coor %arg0(%c0) origin %c0[%c0, %c0, %c0]<%c0> : (!fir.ref<!fir.array<?xi32>>, i64, i64, i64, i64, i64, i64) -> !fir.ref<i32>
1861+
return
1862+
}
1863+
1864+
// CHECK-LABEL: llvm.func @ext_array_coor1(
1865+
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<i32>)
1866+
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
1867+
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
1868+
// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i64) : i64
1869+
// CHECK: %[[IDX:.*]] = llvm.sub %[[C0]], %[[C0]] : i64
1870+
// CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C0]] : i64
1871+
// CHECK: %[[ADJ:.*]] = llvm.sub %[[C0]], %[[C0]] : i64
1872+
// CHECK: %[[DIFF1:.*]] = llvm.add %[[DIFF0]], %[[ADJ]] : i64
1873+
// CHECK: %[[STRIDE:.*]] = llvm.mul %[[DIFF1]], %[[C1]] : i64
1874+
// CHECK: %[[OFFSET:.*]] = llvm.add %[[STRIDE]], %[[C0_1]] : i64
1875+
// CHECK: %[[BITCAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr<i32> to !llvm.ptr<i32>
1876+
// CHECK: %{{.*}} = llvm.getelementptr %[[BITCAST]][%[[OFFSET]]] : (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>
1877+
1878+
// Conversion for a dynamic length char.
1879+
1880+
func @ext_array_coor2(%arg0: !fir.ref<!fir.array<?x!fir.char<1,?>>>) {
1881+
%c0 = arith.constant 0 : i64
1882+
%1 = fircg.ext_array_coor %arg0(%c0) <%c0> : (!fir.ref<!fir.array<?x!fir.char<1,?>>>, i64, i64) -> !fir.ref<i32>
1883+
return
1884+
}
1885+
1886+
// CHECK-LABEL: llvm.func @ext_array_coor2(
1887+
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<i8>)
1888+
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
1889+
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
1890+
// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i64) : i64
1891+
// CHECK: %[[IDX:.*]] = llvm.sub %[[C0]], %[[C1]] : i64
1892+
// CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C1]] : i64
1893+
// CHECK: %[[SC:.*]] = llvm.mul %[[DIFF0]], %[[C1]] : i64
1894+
// CHECK: %[[OFFSET:.*]] = llvm.add %[[SC]], %[[C0_1]] : i64
1895+
// CHECK: %[[BITCAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr<i8> to !llvm.ptr<i32>
1896+
// CHECK: %{{.*}} = llvm.getelementptr %[[BITCAST]][%[[OFFSET]]] : (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>
1897+
1898+
// Conversion for a `fir.box`.
1899+
1900+
func @ext_array_coor3(%arg0: !fir.box<!fir.array<?xi32>>) {
1901+
%c0 = arith.constant 0 : i64
1902+
%1 = fircg.ext_array_coor %arg0(%c0) <%c0> : (!fir.box<!fir.array<?xi32>>, i64, i64) -> !fir.ref<i32>
1903+
return
1904+
}
1905+
1906+
// CHECK-LABEL: llvm.func @ext_array_coor3(
1907+
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) {
1908+
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
1909+
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
1910+
// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i64) : i64
1911+
// CHECK: %[[IDX:.*]] = llvm.sub %[[C0]], %[[C1]] : i64
1912+
// CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C1]] : i64
1913+
// CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : i32) : i32
1914+
// CHECK: %[[DIMPOSINBOX:.*]] = llvm.mlir.constant(7 : i32) : i32
1915+
// CHECK: %[[DIMOFFSET:.*]] = llvm.mlir.constant(0 : i64) : i64
1916+
// CHECK: %[[STRIDPOS:.*]] = llvm.mlir.constant(2 : i32) : i32
1917+
// CHECK: %[[GEPSTRIDE:.*]] = llvm.getelementptr %[[ARG0]][%[[C0_2]], %[[DIMPOSINBOX]], %[[DIMOFFSET]], %[[STRIDPOS]]] : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i32, i64, i32) -> !llvm.ptr<i64>
1918+
// CHECK: %[[LOADEDSTRIDE:.*]] = llvm.load %[[GEPSTRIDE]] : !llvm.ptr<i64>
1919+
// CHECK: %[[SC:.*]] = llvm.mul %[[DIFF0]], %[[LOADEDSTRIDE]] : i64
1920+
// CHECK: %[[OFFSET:.*]] = llvm.add %[[SC]], %[[C0_1]] : i64
1921+
// CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : i32) : i32
1922+
// CHECK: %[[ADDRPOSINBOX:.*]] = llvm.mlir.constant(0 : i32) : i32
1923+
// CHECK: %[[GEPADDR:.*]] = llvm.getelementptr %[[ARG0]][%[[C0_3]], %[[ADDRPOSINBOX]]] : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i32) -> !llvm.ptr<ptr<i32>>
1924+
// CHECK: %[[LOADEDADDR:.*]] = llvm.load %[[GEPADDR]] : !llvm.ptr<ptr<i32>>
1925+
// CHECK: %[[LOADEDADDRBITCAST:.*]] = llvm.bitcast %[[LOADEDADDR]] : !llvm.ptr<i32> to !llvm.ptr<i8>
1926+
// CHECK: %[[GEPADDROFFSET:.*]] = llvm.getelementptr %[[LOADEDADDRBITCAST]][%[[OFFSET]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
1927+
// CHECK: %{{.*}} = llvm.bitcast %[[GEPADDROFFSET]] : !llvm.ptr<i8> to !llvm.ptr<i32>
1928+
1929+
// Conversion with non zero shift and slice.
1930+
1931+
func @ext_array_coor4(%arg0: !fir.ref<!fir.array<100xi32>>) {
1932+
%c0 = arith.constant 0 : i64
1933+
%c10 = arith.constant 10 : i64
1934+
%c20 = arith.constant 20 : i64
1935+
%c1 = arith.constant 1 : i64
1936+
%1 = fircg.ext_array_coor %arg0(%c0) origin %c0[%c10, %c20, %c1]<%c1> : (!fir.ref<!fir.array<100xi32>>, i64, i64, i64, i64, i64, i64) -> !fir.ref<i32>
1937+
return
1938+
}
1939+
1940+
// CHECK-LABEL: llvm.func @ext_array_coor4(
1941+
// CHECK: %[[ARG0:.*]]: !llvm.ptr<array<100 x i32>>) {
1942+
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
1943+
// CHECK: %[[C10:.*]] = llvm.mlir.constant(10 : i64) : i64
1944+
// CHECK: %[[C20:.*]] = llvm.mlir.constant(20 : i64) : i64
1945+
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
1946+
// CHECK: %[[C1_1:.*]] = llvm.mlir.constant(1 : i64) : i64
1947+
// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i64) : i64
1948+
// CHECK: %[[IDX:.*]] = llvm.sub %[[C1]], %[[C0]] : i64
1949+
// CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C1]] : i64
1950+
// CHECK: %[[ADJ:.*]] = llvm.sub %[[C10]], %[[C0]] : i64
1951+
// CHECK: %[[DIFF1:.*]] = llvm.add %[[DIFF0]], %[[ADJ]] : i64
1952+
// CHECK: %[[STRIDE:.*]] = llvm.mul %[[DIFF1]], %[[C1_1]] : i64
1953+
// CHECK: %[[OFFSET:.*]] = llvm.add %[[STRIDE]], %[[C0_1]] : i64
1954+
// CHECK: %[[BITCAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr<array<100 x i32>> to !llvm.ptr<i32>
1955+
// CHECK: %{{.*}} = llvm.getelementptr %[[BITCAST]][%[[OFFSET]]] : (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>

0 commit comments

Comments
 (0)