Skip to content

Commit 423f354

Browse files
authored
[flang][cuda] Adding support for registration of boxes (#114323)
Needed to take into account that `fir::getTypeSizeAndAlignmentOrCrash` does not work with box types but requires the `fir::LLVMTypeConverter`
1 parent 05910b4 commit 423f354

File tree

3 files changed

+30
-8
lines changed

3 files changed

+30
-8
lines changed

flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,13 @@
1111
#include "flang/Optimizer/Builder/Runtime/RTBuilder.h"
1212
#include "flang/Optimizer/Builder/Todo.h"
1313
#include "flang/Optimizer/CodeGen/Target.h"
14+
#include "flang/Optimizer/CodeGen/TypeConverter.h"
1415
#include "flang/Optimizer/Dialect/CUF/CUFOps.h"
1516
#include "flang/Optimizer/Dialect/FIRAttr.h"
1617
#include "flang/Optimizer/Dialect/FIRDialect.h"
1718
#include "flang/Optimizer/Dialect/FIROps.h"
1819
#include "flang/Optimizer/Dialect/FIROpsSupport.h"
20+
#include "flang/Optimizer/Dialect/FIRType.h"
1921
#include "flang/Optimizer/Support/DataLayout.h"
2022
#include "flang/Optimizer/Transforms/CUFCommon.h"
2123
#include "flang/Runtime/CUDA/registration.h"
@@ -84,6 +86,8 @@ struct CUFAddConstructor
8486
auto registeredMod = builder.create<cuf::RegisterModuleOp>(
8587
loc, llvmPtrTy, mlir::SymbolRefAttr::get(ctx, gpuMod.getName()));
8688

89+
fir::LLVMTypeConverter typeConverter(mod, /*applyTBAA=*/false,
90+
/*forceUnifiedTBAATree=*/false, *dl);
8791
// Register kernels
8892
for (auto func : gpuMod.getOps<mlir::gpu::GPUFuncOp>()) {
8993
if (func.isKernel()) {
@@ -115,17 +119,25 @@ struct CUFAddConstructor
115119
fir::factory::createStringLiteral(builder, loc, gblNameStr));
116120

117121
// Global variable size
118-
auto sizeAndAlign = fir::getTypeSizeAndAlignmentOrCrash(
119-
loc, globalOp.getType(), *dl, kindMap);
120-
auto size =
121-
builder.createIntegerConstant(loc, idxTy, sizeAndAlign.first);
122+
std::optional<uint64_t> size;
123+
if (auto boxTy =
124+
mlir::dyn_cast<fir::BaseBoxType>(globalOp.getType())) {
125+
mlir::Type structTy = typeConverter.convertBoxTypeAsStruct(boxTy);
126+
size = dl->getTypeSizeInBits(structTy) / 8;
127+
}
128+
if (!size) {
129+
size = fir::getTypeSizeAndAlignmentOrCrash(loc, globalOp.getType(),
130+
*dl, kindMap)
131+
.first;
132+
}
133+
auto sizeVal = builder.createIntegerConstant(loc, idxTy, *size);
122134

123135
// Global variable address
124136
mlir::Value addr = builder.create<fir::AddrOfOp>(
125137
loc, globalOp.resultType(), globalOp.getSymbol());
126138

127139
llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
128-
builder, loc, fTy, registeredMod, addr, gblName, size)};
140+
builder, loc, fTy, registeredMod, addr, gblName, sizeVal)};
129141
builder.create<fir::CallOp>(loc, func, args);
130142
} break;
131143
case cuf::DataAttribute::Managed:

flang/test/Fir/CUDA/cuda-constructor-2.f90

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,12 @@
33
module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<271>, dense<32> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<270>, dense<32> : vector<4xi64>>, #dlti.dl_entry<f128, dense<128> : vector<2xi64>>, #dlti.dl_entry<f64, dense<64> : vector<2xi64>>, #dlti.dl_entry<f80, dense<128> : vector<2xi64>>, #dlti.dl_entry<f16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i32, dense<32> : vector<2xi64>>, #dlti.dl_entry<i16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i128, dense<128> : vector<2xi64>>, #dlti.dl_entry<i8, dense<8> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr<272>, dense<64> : vector<4xi64>>, #dlti.dl_entry<i64, dense<64> : vector<2xi64>>, #dlti.dl_entry<i1, dense<8> : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} {
44

55
fir.global @_QMmtestsEn(dense<[3, 4, 5, 6, 7]> : tensor<5xi32>) {data_attr = #cuf.cuda<device>} : !fir.array<5xi32>
6+
fir.global @_QMmtestsEndev {data_attr = #cuf.cuda<device>} : !fir.box<!fir.heap<!fir.array<?xi32>>> {
7+
%c0 = arith.constant 0 : index
8+
%0 = fir.zero_bits !fir.heap<!fir.array<?xi32>>
9+
%1 = fircg.ext_embox %0(%c0) {allocator_idx = 2 : i32} : (!fir.heap<!fir.array<?xi32>>, index) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
10+
fir.has_value %1 : !fir.box<!fir.heap<!fir.array<?xi32>>>
11+
}
612

713
gpu.module @cuda_device_mod [#nvvm.target] {
814
}
@@ -18,5 +24,9 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<
1824
// CHECK-DAG: %[[VAR_ADDR2:.*]] = fir.convert %[[VAR_ADDR]] : (!fir.ref<!fir.array<5xi32>>) -> !fir.ref<i8>
1925
// CHECK-DAG: %[[VAR_NAME2:.*]] = fir.convert %[[VAR_NAME]] : (!fir.ref<!fir.char<1,12>>) -> !fir.ref<i8>
2026
// CHECK-DAG: %[[CST:.*]] = arith.constant 20 : index
21-
// CHECK-DAG %[[CST2:.*]] = fir.convert %[[CST]] : (index) -> i64
22-
// CHECK fir.call @_FortranACUFRegisterVariable(%[[MODULE2]], %[[VAR_ADDR2]], %[[VAR_NAME2]], %[[CST2]]) : (!fir.ref<!fir.llvm_ptr<i8>>, !fir.ref<i8>, !fir.ref<i8>, i64) -> none
27+
// CHECK-DAG: %[[CST2:.*]] = fir.convert %[[CST]] : (index) -> i64
28+
// CHECK-DAG: fir.call @_FortranACUFRegisterVariable(%[[MODULE2]], %[[VAR_ADDR2]], %[[VAR_NAME2]], %[[CST2]]) : (!fir.ref<!fir.llvm_ptr<i8>>, !fir.ref<i8>, !fir.ref<i8>, i64) -> none
29+
// CHECK-DAG: %[[BOX:.*]] = fir.address_of(@_QMmtestsEndev) : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
30+
// CHECK-DAG: %[[BOXREF:.*]] = fir.convert %[[BOX]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<i8>
31+
// CHECK-DAG: fir.call @_FortranACUFRegisterVariable(%[[MODULE:.*]], %[[BOXREF]], %{{.*}}, %{{.*}})
32+
//

flang/test/Fir/CUDA/cuda-register-func.fir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// RUN: fir-opt --cuf-add-constructor %s | FileCheck %s
22

3-
module attributes {gpu.container_module} {
3+
module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<271>, dense<32> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<270>, dense<32> : vector<4xi64>>, #dlti.dl_entry<f128, dense<128> : vector<2xi64>>, #dlti.dl_entry<f64, dense<64> : vector<2xi64>>, #dlti.dl_entry<f80, dense<128> : vector<2xi64>>, #dlti.dl_entry<f16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i32, dense<32> : vector<2xi64>>, #dlti.dl_entry<i16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i128, dense<128> : vector<2xi64>>, #dlti.dl_entry<i8, dense<8> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr<272>, dense<64> : vector<4xi64>>, #dlti.dl_entry<i64, dense<64> : vector<2xi64>>, #dlti.dl_entry<i1, dense<8> : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} {
44
gpu.module @cuda_device_mod {
55
gpu.func @_QPsub_device1() kernel {
66
gpu.return

0 commit comments

Comments
 (0)