Skip to content

[RISCV] Re-separate unaligned scalar and vector memory features in the backend. #88954

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion clang/lib/Basic/Targets/RISCV.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,8 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx"))
HasLegalHalfType = true;

FastUnalignedAccess = llvm::is_contained(Features, "+fast-unaligned-access");
FastUnalignedAccess = llvm::is_contained(Features, "+unaligned-scalar-mem") &&
llvm::is_contained(Features, "+unaligned-vector-mem");

if (llvm::is_contained(Features, "+experimental"))
HasExperimental = true;
Expand Down
16 changes: 11 additions & 5 deletions clang/lib/Driver/ToolChains/Arch/RISCV.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,10 @@ static void getRISCFeaturesFromMcpu(const Driver &D, const Arg *A,
<< A->getSpelling() << Mcpu;
}

if (llvm::RISCV::hasFastUnalignedAccess(Mcpu))
Features.push_back("+fast-unaligned-access");
if (llvm::RISCV::hasFastUnalignedAccess(Mcpu)) {
Features.push_back("+unaligned-scalar-mem");
Features.push_back("+unaligned-vector-mem");
}
}

void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
Expand Down Expand Up @@ -168,12 +170,16 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
}

// Android requires fast unaligned access on RISCV64.
if (Triple.isAndroid())
Features.push_back("+fast-unaligned-access");
if (Triple.isAndroid()) {
Features.push_back("+unaligned-scalar-mem");
Features.push_back("+unaligned-vector-mem");
}

// -mstrict-align is default, unless -mno-strict-align is specified.
AddTargetFeature(Args, Features, options::OPT_mno_strict_align,
options::OPT_mstrict_align, "fast-unaligned-access");
options::OPT_mstrict_align, "unaligned-scalar-mem");
AddTargetFeature(Args, Features, options::OPT_mno_strict_align,
options::OPT_mstrict_align, "unaligned-vector-mem");

// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
Expand Down
4 changes: 2 additions & 2 deletions clang/test/Driver/riscv-features.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@
// RUN: %clang --target=riscv32-unknown-elf -### %s -mno-strict-align 2>&1 | FileCheck %s -check-prefix=FAST-UNALIGNED-ACCESS
// RUN: %clang --target=riscv32-unknown-elf -### %s -mstrict-align 2>&1 | FileCheck %s -check-prefix=NO-FAST-UNALIGNED-ACCESS

// FAST-UNALIGNED-ACCESS: "-target-feature" "+fast-unaligned-access"
// NO-FAST-UNALIGNED-ACCESS: "-target-feature" "-fast-unaligned-access"
// FAST-UNALIGNED-ACCESS: "-target-feature" "+unaligned-scalar-mem" "-target-feature" "+unaligned-vector-mem"
// NO-FAST-UNALIGNED-ACCESS: "-target-feature" "-unaligned-scalar-mem" "-target-feature" "-unaligned-vector-mem"

// RUN: %clang --target=riscv32-unknown-elf -### %s 2>&1 | FileCheck %s -check-prefix=NOUWTABLE
// RUN: %clang --target=riscv32-unknown-elf -fasynchronous-unwind-tables -### %s 2>&1 | FileCheck %s -check-prefix=UWTABLE
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,8 +326,8 @@ bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB,
.setMemRefs(MMOLo);

if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) {
// FIXME: Zdinx RV32 can not work on unaligned memory.
assert(!STI->hasFastUnalignedAccess());
// FIXME: Zdinx RV32 can not work on unaligned scalar memory.
assert(!STI->enableUnalignedScalarMem());

assert(MBBI->getOperand(2).getOffset() % 8 == 0);
MBBI->getOperand(2).setOffset(MBBI->getOperand(2).getOffset() + 4);
Expand Down
13 changes: 9 additions & 4 deletions llvm/lib/Target/RISCV/RISCVFeatures.td
Original file line number Diff line number Diff line change
Expand Up @@ -1183,10 +1183,15 @@ def FeatureTrailingSeqCstFence : SubtargetFeature<"seq-cst-trailing-fence",
"true",
"Enable trailing fence for seq-cst store.">;

def FeatureFastUnalignedAccess
: SubtargetFeature<"fast-unaligned-access", "HasFastUnalignedAccess",
"true", "Has reasonably performant unaligned "
"loads and stores (both scalar and vector)">;
def FeatureUnalignedScalarMem
: SubtargetFeature<"unaligned-scalar-mem", "EnableUnalignedScalarMem",
"true", "Has reasonably performant unaligned scalar "
"loads and stores">;

def FeatureUnalignedVectorMem
: SubtargetFeature<"unaligned-vector-mem", "EnableUnalignedVectorMem",
"true", "Has reasonably performant unaligned vector "
"loads and stores">;

def FeaturePostRAScheduler : SubtargetFeature<"use-postra-scheduler",
"UsePostRAScheduler", "true", "Schedule again after register allocation">;
Expand Down
16 changes: 8 additions & 8 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1924,7 +1924,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
// replace. If we don't support unaligned scalar mem, prefer the constant
// pool.
// TODO: Can the caller pass down the alignment?
if (!Subtarget.hasFastUnalignedAccess())
if (!Subtarget.enableUnalignedScalarMem())
return true;

// Prefer to keep the load if it would require many instructions.
Expand Down Expand Up @@ -15794,7 +15794,7 @@ static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
if (WiderElementSize > ST.getELen()/8)
return false;

if (!ST.hasFastUnalignedAccess() && BaseAlign < WiderElementSize)
if (!ST.enableUnalignedVectorMem() && BaseAlign < WiderElementSize)
return false;

for (unsigned i = 0; i < Index->getNumOperands(); i++) {
Expand Down Expand Up @@ -20620,8 +20620,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
unsigned *Fast) const {
if (!VT.isVector()) {
if (Fast)
*Fast = Subtarget.hasFastUnalignedAccess();
return Subtarget.hasFastUnalignedAccess();
*Fast = Subtarget.enableUnalignedScalarMem();
return Subtarget.enableUnalignedScalarMem();
}

// All vector implementations must support element alignment
Expand All @@ -20637,8 +20637,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
// misaligned accesses. TODO: Work through the codegen implications of
// allowing such accesses to be formed, and considered fast.
if (Fast)
*Fast = Subtarget.hasFastUnalignedAccess();
return Subtarget.hasFastUnalignedAccess();
*Fast = Subtarget.enableUnalignedVectorMem();
return Subtarget.enableUnalignedVectorMem();
}


Expand Down Expand Up @@ -20673,7 +20673,7 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,

// Do we have sufficient alignment for our preferred VT? If not, revert
// to largest size allowed by our alignment criteria.
if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess()) {
if (PreferredVT != MVT::i8 && !Subtarget.enableUnalignedVectorMem()) {
Align RequiredAlign(PreferredVT.getStoreSize());
if (Op.isFixedDstAlign())
RequiredAlign = std::min(RequiredAlign, Op.getDstAlign());
Expand Down Expand Up @@ -20865,7 +20865,7 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
if (!isLegalElementTypeForRVV(ScalarType))
return false;

if (!Subtarget.hasFastUnalignedAccess() &&
if (!Subtarget.enableUnalignedVectorMem() &&
Alignment < ScalarType.getStoreSize())
return false;

Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Target/RISCV/RISCVProcessors.td
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,8 @@ def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
FeatureStdExtZbb,
FeatureStdExtZbs,
FeatureStdExtZfhmin,
FeatureFastUnalignedAccess],
FeatureUnalignedScalarMem,
FeatureUnalignedVectorMem],
[TuneNoDefaultUnroll,
TuneConditionalCompressedMoveFusion,
TuneLUIADDIFusion,
Expand Down Expand Up @@ -295,7 +296,8 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
FeatureStdExtZvkng,
FeatureStdExtZvksc,
FeatureStdExtZvksg,
FeatureFastUnalignedAccess],
FeatureUnalignedScalarMem,
FeatureUnalignedVectorMem],
[TuneNoDefaultUnroll,
TuneConditionalCompressedMoveFusion,
TuneLUIADDIFusion,
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
return false;

EVT ElemType = DataTypeVT.getScalarType();
if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
return false;

return TLI->isLegalElementTypeForRVV(ElemType);
Expand All @@ -253,7 +253,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
return false;

EVT ElemType = DataTypeVT.getScalarType();
if (!ST->hasFastUnalignedAccess() && Alignment < ElemType.getStoreSize())
if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
return false;

return TLI->isLegalElementTypeForRVV(ElemType);
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/memcpy-inline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
; RUN: llc < %s -mtriple=riscv64 \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST

; ----------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/memcpy.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
; RUN: llc < %s -mtriple=riscv64 \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/memset-inline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+unaligned-scalar-mem \
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }

Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/pr56110.ll
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 | FileCheck %s
; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access | FileCheck %s
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem | FileCheck %s

define void @foo_set(ptr nocapture noundef %a, i32 noundef %v) {
; CHECK-LABEL: foo_set:
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/riscv-func-target-feature.ll
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ entry:
}

; CHECK-NOT: .option push
define void @test5() "target-features"="+fast-unaligned-access" {
define void @test5() "target-features"="+unaligned-scalar-mem" {
; CHECK-LABEL: test5
; CHECK-NOT: .option pop
entry:
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/concat-vectors-constant-stride.ll
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+fast-unaligned-access -target-abi=ilp32 \
; RUN: llc -mtriple=riscv32 -mattr=+v,+unaligned-vector-mem -target-abi=ilp32 \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+fast-unaligned-access -target-abi=lp64 \
; RUN: llc -mtriple=riscv64 -mattr=+v,+unaligned-vector-mem -target-abi=lp64 \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64

define void @constant_forward_stride(ptr %s, ptr %d) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,RV64
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+fast-unaligned-access -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN
; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+unaligned-vector-mem -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64,RV64-MISALIGN

; RUN: llc -mtriple=riscv64 -mattr=+f,+zfh,+zve64f,+zvl128b,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,CHECK-NO-MISALIGN,ZVE64F

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=SLOW,RV32-SLOW
; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=SLOW,RV64-SLOW
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=FAST,RV32-FAST
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+unaligned-vector-mem -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=FAST,RV64-FAST

define <4 x i32> @load_v4i32_align1(ptr %ptr) {
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/memcpy-inline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
; RUN: llc < %s -mtriple=riscv64 -mattr=+v \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST

; ----------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+unaligned-scalar-mem,,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+fast-unaligned-access \
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+unaligned-scalar-mem,+unaligned-vector-mem \
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
%struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: -verify-machineinstrs | FileCheck %s
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v < %s \
; RUN: -verify-machineinstrs | FileCheck %s
; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
; RUN: llc -mtriple riscv32 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
; RUN: -verify-machineinstrs | FileCheck --check-prefix=FAST %s
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+fast-unaligned-access < %s \
; RUN: llc -mtriple riscv64 -mattr=+d,+zfh,+zvfh,+v,+unaligned-vector-mem < %s \
; RUN: -verify-machineinstrs | FileCheck --check-prefix=FAST %s


Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/unaligned-load-store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
; RUN: | FileCheck -check-prefixes=ALL,SLOW,RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ALL,SLOW,RV64I %s
; RUN: llc -mtriple=riscv32 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ALL,FAST,RV32I-FAST %s
; RUN: llc -mtriple=riscv64 -mattr=+fast-unaligned-access -verify-machineinstrs < %s \
; RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=ALL,FAST,RV64I-FAST %s

; A collection of cases showing codegen for unaligned loads and stores
Expand Down
12 changes: 10 additions & 2 deletions llvm/utils/TableGen/RISCVTargetDefEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,19 @@ static void EmitRISCVTargetDef(RecordKeeper &RK, raw_ostream &OS) {
if (MArch.empty())
MArch = getMArch(*Rec);

const bool FastUnalignedAccess =
bool FastScalarUnalignedAccess =
any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
return Feature->getValueAsString("Name") == "fast-unaligned-access";
return Feature->getValueAsString("Name") == "unaligned-scalar-mem";
});

bool FastVectorUnalignedAccess =
any_of(Rec->getValueAsListOfDefs("Features"), [&](auto &Feature) {
return Feature->getValueAsString("Name") == "unaligned-vector-mem";
});

bool FastUnalignedAccess =
FastScalarUnalignedAccess && FastVectorUnalignedAccess;

OS << "PROC(" << Rec->getName() << ", "
<< "{\"" << Rec->getValueAsString("Name") << "\"}, "
<< "{\"" << MArch << "\"}, " << FastUnalignedAccess << ")\n";
Expand Down