Skip to content

Commit e86081b

Browse files
authored
[flang][cuda] Convert cuf.shared_memory operation to LLVM ops (#131396)
Convert the operation to `llvm.addressof` operation with `llvm.getelementptr` with the appropriate offset.
1 parent 4fb20b8 commit e86081b

File tree

2 files changed

+89
-1
lines changed

2 files changed

+89
-1
lines changed

flang/lib/Optimizer/Transforms/CUFGPUToLLVMConversion.cpp

Lines changed: 69 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,15 @@
77
//===----------------------------------------------------------------------===//
88

99
#include "flang/Optimizer/Transforms/CUFGPUToLLVMConversion.h"
10+
#include "flang/Optimizer/Builder/CUFCommon.h"
1011
#include "flang/Optimizer/CodeGen/TypeConverter.h"
12+
#include "flang/Optimizer/Dialect/CUF/CUFOps.h"
1113
#include "flang/Optimizer/Support/DataLayout.h"
1214
#include "flang/Runtime/CUDA/common.h"
1315
#include "flang/Support/Fortran.h"
1416
#include "mlir/Conversion/LLVMCommon/Pattern.h"
1517
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
18+
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
1619
#include "mlir/Pass/Pass.h"
1720
#include "mlir/Transforms/DialectConversion.h"
1821
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
@@ -175,6 +178,69 @@ struct GPULaunchKernelConversion
175178
}
176179
};
177180

181+
static std::string getFuncName(cuf::SharedMemoryOp op) {
182+
if (auto gpuFuncOp = op->getParentOfType<mlir::gpu::GPUFuncOp>())
183+
return gpuFuncOp.getName().str();
184+
if (auto funcOp = op->getParentOfType<mlir::func::FuncOp>())
185+
return funcOp.getName().str();
186+
if (auto llvmFuncOp = op->getParentOfType<mlir::LLVM::LLVMFuncOp>())
187+
return llvmFuncOp.getSymName().str();
188+
return "";
189+
}
190+
191+
static mlir::Value createAddressOfOp(mlir::ConversionPatternRewriter &rewriter,
192+
mlir::Location loc,
193+
gpu::GPUModuleOp gpuMod,
194+
std::string &sharedGlobalName) {
195+
auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(
196+
rewriter.getContext(), mlir::NVVM::NVVMMemorySpace::kSharedMemorySpace);
197+
if (auto g = gpuMod.lookupSymbol<fir::GlobalOp>(sharedGlobalName))
198+
return rewriter.create<mlir::LLVM::AddressOfOp>(loc, llvmPtrTy,
199+
g.getSymName());
200+
if (auto g = gpuMod.lookupSymbol<mlir::LLVM::GlobalOp>(sharedGlobalName))
201+
return rewriter.create<mlir::LLVM::AddressOfOp>(loc, llvmPtrTy,
202+
g.getSymName());
203+
return {};
204+
}
205+
206+
struct CUFSharedMemoryOpConversion
207+
: public mlir::ConvertOpToLLVMPattern<cuf::SharedMemoryOp> {
208+
explicit CUFSharedMemoryOpConversion(
209+
const fir::LLVMTypeConverter &typeConverter, mlir::PatternBenefit benefit)
210+
: mlir::ConvertOpToLLVMPattern<cuf::SharedMemoryOp>(typeConverter,
211+
benefit) {}
212+
using OpAdaptor = typename cuf::SharedMemoryOp::Adaptor;
213+
214+
mlir::LogicalResult
215+
matchAndRewrite(cuf::SharedMemoryOp op, OpAdaptor adaptor,
216+
mlir::ConversionPatternRewriter &rewriter) const override {
217+
mlir::Location loc = op->getLoc();
218+
if (!op.getOffset())
219+
mlir::emitError(loc,
220+
"cuf.shared_memory must have an offset for code gen");
221+
222+
auto gpuMod = op->getParentOfType<gpu::GPUModuleOp>();
223+
std::string sharedGlobalName =
224+
(getFuncName(op) + llvm::Twine(cudaSharedMemSuffix)).str();
225+
mlir::Value sharedGlobalAddr =
226+
createAddressOfOp(rewriter, loc, gpuMod, sharedGlobalName);
227+
228+
if (!sharedGlobalAddr)
229+
mlir::emitError(loc, "Could not find the shared global operation\n");
230+
231+
auto castPtr = rewriter.create<mlir::LLVM::AddrSpaceCastOp>(
232+
loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()),
233+
sharedGlobalAddr);
234+
mlir::Type baseType = castPtr->getResultTypes().front();
235+
llvm::SmallVector<mlir::LLVM::GEPArg> gepArgs = {
236+
static_cast<int32_t>(*op.getOffset())};
237+
mlir::Value shmemPtr = rewriter.create<mlir::LLVM::GEPOp>(
238+
loc, baseType, rewriter.getI8Type(), castPtr, gepArgs);
239+
rewriter.replaceOp(op, {shmemPtr});
240+
return mlir::success();
241+
}
242+
};
243+
178244
class CUFGPUToLLVMConversion
179245
: public fir::impl::CUFGPUToLLVMConversionBase<CUFGPUToLLVMConversion> {
180246
public:
@@ -194,6 +260,7 @@ class CUFGPUToLLVMConversion
194260
/*forceUnifiedTBAATree=*/false, *dl);
195261
cuf::populateCUFGPUToLLVMConversionPatterns(typeConverter, patterns);
196262
target.addIllegalOp<mlir::gpu::LaunchFuncOp>();
263+
target.addIllegalOp<cuf::SharedMemoryOp>();
197264
target.addLegalDialect<mlir::LLVM::LLVMDialect>();
198265
if (mlir::failed(mlir::applyPartialConversion(getOperation(), target,
199266
std::move(patterns)))) {
@@ -208,5 +275,6 @@ class CUFGPUToLLVMConversion
208275
void cuf::populateCUFGPUToLLVMConversionPatterns(
209276
const fir::LLVMTypeConverter &converter, mlir::RewritePatternSet &patterns,
210277
mlir::PatternBenefit benefit) {
211-
patterns.add<GPULaunchKernelConversion>(converter, benefit);
278+
patterns.add<CUFSharedMemoryOpConversion, GPULaunchKernelConversion>(
279+
converter, benefit);
212280
}
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
// RUN: fir-opt --split-input-file --cuf-gpu-convert-to-llvm %s | FileCheck %s
2+
3+
module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<271>, dense<32> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<270>, dense<32> : vector<4xi64>>, #dlti.dl_entry<f128, dense<128> : vector<2xi64>>, #dlti.dl_entry<f64, dense<64> : vector<2xi64>>, #dlti.dl_entry<f80, dense<128> : vector<2xi64>>, #dlti.dl_entry<f16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i32, dense<32> : vector<2xi64>>, #dlti.dl_entry<i16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i128, dense<128> : vector<2xi64>>, #dlti.dl_entry<i8, dense<8> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr<272>, dense<64> : vector<4xi64>>, #dlti.dl_entry<i64, dense<64> : vector<2xi64>>, #dlti.dl_entry<i1, dense<8> : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} {
4+
gpu.module @cuda_device_mod {
5+
llvm.func @_QPshared_static() {
6+
%0 = cuf.shared_memory i32 {bindc_name = "a", offset = 0 : i32, uniq_name = "_QFshared_staticEa"} -> !fir.ref<i32>
7+
%1 = cuf.shared_memory i32 {bindc_name = "b", offset = 4 : i32, uniq_name = "_QFshared_staticEb"} -> !fir.ref<i32>
8+
llvm.return
9+
}
10+
llvm.mlir.global common @_QPshared_static__shared_mem(dense<0> : vector<28xi8>) {addr_space = 3 : i32, alignment = 8 : i64} : !llvm.array<28 x i8>
11+
}
12+
}
13+
14+
// CHECK-LABEL: llvm.func @_QPshared_static()
15+
// CHECK: %[[ADDR0:.*]] = llvm.mlir.addressof @_QPshared_static__shared_mem : !llvm.ptr<3>
16+
// CHECK: %[[ADDRCAST0:.*]] = llvm.addrspacecast %[[ADDR0]] : !llvm.ptr<3> to !llvm.ptr
17+
// CHECK: %[[A:.*]] = llvm.getelementptr %[[ADDRCAST0]][0] : (!llvm.ptr) -> !llvm.ptr, i8
18+
// CHECK: %[[ADDR1:.*]] = llvm.mlir.addressof @_QPshared_static__shared_mem : !llvm.ptr<3>
19+
// CHECK: %[[ADDRCAST1:.*]] = llvm.addrspacecast %[[ADDR1]] : !llvm.ptr<3> to !llvm.ptr
20+
// CHECK: %[[B:.*]] = llvm.getelementptr %[[ADDRCAST1]][4] : (!llvm.ptr) -> !llvm.ptr, i8

0 commit comments

Comments
 (0)