Skip to content

[flang] Add struct passing target rewrite hooks and partial X86-64 impl #74829

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Dec 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions flang/include/flang/Optimizer/CodeGen/CGPasses.td
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def FIRToLLVMLowering : Pass<"fir-to-llvm-ir", "mlir::ModuleOp"> {
will also convert ops in the standard and FIRCG dialects.
}];
let constructor = "::fir::createFIRToLLVMPass()";
let dependentDialects = ["mlir::LLVM::LLVMDialect"];
let dependentDialects = ["mlir::LLVM::LLVMDialect", "mlir::DLTIDialect"];
let options = [
Option<"forcedTargetTriple", "target", "std::string", /*default=*/"",
"Override module's target triple.">,
Expand Down Expand Up @@ -53,7 +53,8 @@ def TargetRewritePass : Pass<"target-rewrite", "mlir::ModuleOp"> {
representations that may differ based on the target machine.
}];
let constructor = "::fir::createFirTargetRewritePass()";
let dependentDialects = [ "fir::FIROpsDialect", "mlir::func::FuncDialect" ];
let dependentDialects = [ "fir::FIROpsDialect", "mlir::func::FuncDialect",
"mlir::DLTIDialect" ];
let options = [
Option<"forcedTargetTriple", "target", "std::string", /*default=*/"",
"Override module's target triple.">,
Expand Down
34 changes: 29 additions & 5 deletions flang/include/flang/Optimizer/CodeGen/Target.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,18 @@
#ifndef FORTRAN_OPTMIZER_CODEGEN_TARGET_H
#define FORTRAN_OPTMIZER_CODEGEN_TARGET_H

#include "flang/Optimizer/Dialect/FIRType.h"
#include "flang/Optimizer/Dialect/Support/KindMapping.h"
#include "mlir/IR/BuiltinTypes.h"
#include "llvm/TargetParser/Triple.h"
#include <memory>
#include <tuple>
#include <vector>

namespace mlir {
class DataLayout;
}

namespace fir {

namespace details {
Expand Down Expand Up @@ -62,14 +67,20 @@ class Attributes {
class CodeGenSpecifics {
public:
using Attributes = details::Attributes;
using Marshalling = std::vector<std::tuple<mlir::Type, Attributes>>;
using TypeAndAttr = std::tuple<mlir::Type, Attributes>;
using Marshalling = std::vector<TypeAndAttr>;

static std::unique_ptr<CodeGenSpecifics> get(mlir::MLIRContext *ctx,
llvm::Triple &&trp,
KindMapping &&kindMap,
const mlir::DataLayout &dl);

static std::unique_ptr<CodeGenSpecifics>
get(mlir::MLIRContext *ctx, llvm::Triple &&trp, KindMapping &&kindMap);
static TypeAndAttr getTypeAndAttr(mlir::Type t) { return TypeAndAttr{t, {}}; }

CodeGenSpecifics(mlir::MLIRContext *ctx, llvm::Triple &&trp,
KindMapping &&kindMap)
: context{*ctx}, triple{std::move(trp)}, kindMap{std::move(kindMap)} {}
KindMapping &&kindMap, const mlir::DataLayout &dl)
: context{*ctx}, triple{std::move(trp)}, kindMap{std::move(kindMap)},
dataLayout{&dl} {}
CodeGenSpecifics() = delete;
virtual ~CodeGenSpecifics() {}

Expand All @@ -90,6 +101,13 @@ class CodeGenSpecifics {
/// Type presentation of a `boxchar<n>` type value in memory.
virtual mlir::Type boxcharMemoryType(mlir::Type eleTy) const = 0;

/// Type representation of a `fir.type<T>` type argument when passed by
/// value. It may have to be split into several arguments, or be passed
/// as a byval reference argument (on the stack).
virtual Marshalling
structArgumentType(mlir::Location loc, fir::RecordType recTy,
const Marshalling &previousArguments) const = 0;

/// Type representation of a `boxchar<n>` type argument when passed by value.
/// An argument value may need to be passed as a (safe) reference argument.
///
Expand Down Expand Up @@ -143,10 +161,16 @@ class CodeGenSpecifics {
// Returns width in bits of C/C++ 'int' type size.
virtual unsigned char getCIntTypeWidth() const = 0;

const mlir::DataLayout &getDataLayout() const {
assert(dataLayout && "dataLayout must be set");
return *dataLayout;
}

protected:
mlir::MLIRContext &context;
llvm::Triple triple;
KindMapping kindMap;
const mlir::DataLayout *dataLayout = nullptr;
};

} // namespace fir
Expand Down
6 changes: 5 additions & 1 deletion flang/include/flang/Optimizer/CodeGen/TypeConverter.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,18 @@ static constexpr unsigned kDimLowerBoundPos = 0;
static constexpr unsigned kDimExtentPos = 1;
static constexpr unsigned kDimStridePos = 2;

namespace mlir {
class DataLayout;
}

namespace fir {

/// FIR type converter
/// This converts FIR types to LLVM types (for now)
class LLVMTypeConverter : public mlir::LLVMTypeConverter {
public:
LLVMTypeConverter(mlir::ModuleOp module, bool applyTBAA,
bool forceUnifiedTBAATree);
bool forceUnifiedTBAATree, const mlir::DataLayout &);

// i32 is used here because LLVM wants i32 constants when indexing into struct
// types. Indexing into other aggregate types is more flexible.
Expand Down
10 changes: 10 additions & 0 deletions flang/include/flang/Optimizer/Dialect/FIRTypes.td
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,8 @@ def fir_RealType : FIR_Type<"Real", "real"> {

let extraClassDeclaration = [{
using KindTy = unsigned;
// Get MLIR float type with same semantics.
mlir::Type getFloatType(const fir::KindMapping &kindMap) const;
}];

let genVerifyDecl = 1;
Expand Down Expand Up @@ -495,6 +497,14 @@ def fir_SequenceType : FIR_Type<"Sequence", "array"> {
static constexpr Extent getUnknownExtent() {
return mlir::ShapedType::kDynamic;
}

std::uint64_t getConstantArraySize() {
assert(!hasDynamicExtents() && "array type must have constant shape");
std::uint64_t size = 1;
for (Extent extent : getShape())
size = size * static_cast<std::uint64_t>(extent);
return size;
}
}];
}

Expand Down
12 changes: 12 additions & 0 deletions flang/include/flang/Optimizer/Support/DataLayout.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
#ifndef FORTRAN_OPTIMIZER_SUPPORT_DATALAYOUT_H
#define FORTRAN_OPTIMIZER_SUPPORT_DATALAYOUT_H

#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include <optional>

namespace mlir {
class ModuleOp;
}
Expand All @@ -34,6 +37,15 @@ void setMLIRDataLayout(mlir::ModuleOp mlirModule, const llvm::DataLayout &dl);
/// nothing.
void setMLIRDataLayoutFromAttributes(mlir::ModuleOp mlirModule,
bool allowDefaultLayout);

/// Create mlir::DataLayout from the data layout information on the
/// mlir::Module. Creates the data layout information attributes with
/// setMLIRDataLayoutFromAttributes if the DLTI attribute is not yet set. If no
/// information is present at all and \p allowDefaultLayout is false, returns
/// std::nullopt.
std::optional<mlir::DataLayout>
getOrSetDataLayout(mlir::ModuleOp mlirModule, bool allowDefaultLayout = false);

} // namespace fir::support

#endif // FORTRAN_OPTIMIZER_SUPPORT_DATALAYOUT_H
14 changes: 13 additions & 1 deletion flang/lib/Optimizer/CodeGen/CodeGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "flang/Optimizer/Dialect/FIRAttr.h"
#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/Dialect/FIRType.h"
#include "flang/Optimizer/Support/DataLayout.h"
#include "flang/Optimizer/Support/InternalNames.h"
#include "flang/Optimizer/Support/TypeCode.h"
#include "flang/Optimizer/Support/Utils.h"
Expand All @@ -34,6 +35,7 @@
#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h"
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/Transforms/AddComdats.h"
#include "mlir/Dialect/OpenACC/OpenACC.h"
Expand Down Expand Up @@ -3820,10 +3822,20 @@ class FIRToLLVMLowering
if (mlir::failed(runPipeline(mathConvertionPM, mod)))
return signalPassFailure();

std::optional<mlir::DataLayout> dl =
fir::support::getOrSetDataLayout(mod, /*allowDefaultLayout=*/true);
if (!dl) {
mlir::emitError(mod.getLoc(),
"module operation must carry a data layout attribute "
"to generate llvm IR from FIR");
signalPassFailure();
return;
}

auto *context = getModule().getContext();
fir::LLVMTypeConverter typeConverter{getModule(),
options.applyTBAA || applyTBAA,
options.forceUnifiedTBAATree};
options.forceUnifiedTBAATree, *dl};
mlir::RewritePatternSet pattern(context);
pattern.insert<
AbsentOpConversion, AddcOpConversion, AddrOfOpConversion,
Expand Down
Loading