Skip to content

Commit a72772e

Browse files
author
Yonghong Song
committed
[RFC][BPF] Do atomic_fetch_*() pattern matching with memory ordering
For atomic fetch_and_*() operations, do pattern matching with memory ordering seq_cst, acq_rel, release, acquire and monotonic (relaxed). For fetch_and_*() operations with seq_cst/acq_rel/release/acquire ordering, atomic_fetch_*() instructions are generated. For monotonic ordering, locked insns are generated if return value is not used. Otherwise, atomic_fetch_*() insns are used. The main motivation is to resolve the kernel issue [1]. The following are memory ordering are supported: seq_cst, acq_rel, release, acquire, relaxed Current gcc style __sync_fetch_and_*() operations are all seq_cst. To use explicit memory ordering, the _Atomic type is needed. The following is an example: ``` $ cat test.c \#include <stdatomic.h> void f1(_Atomic int *i) { (void)__c11_atomic_fetch_and(i, 10, memory_order_relaxed); } void f2(_Atomic int *i) { (void)__c11_atomic_fetch_and(i, 10, memory_order_acquire); } void f3(_Atomic int *i) { (void)__c11_atomic_fetch_and(i, 10, memory_order_seq_cst); } $ cat run.sh clang -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -c test.c -o test.o && llvm-objdump -d test.o $ ./run.sh test.o: file format elf64-bpf Disassembly of section .text: 0000000000000000 <f1>: 0: b4 02 00 00 0a 00 00 00 w2 = 0xa 1: c3 21 00 00 50 00 00 00 lock *(u32 *)(r1 + 0x0) &= w2 2: 95 00 00 00 00 00 00 00 exit 0000000000000018 <f2>: 3: b4 02 00 00 0a 00 00 00 w2 = 0xa 4: c3 21 00 00 51 00 00 00 w2 = atomic_fetch_and((u32 *)(r1 + 0x0), w2) 5: 95 00 00 00 00 00 00 00 exit 0000000000000030 <f3>: 6: b4 02 00 00 0a 00 00 00 w2 = 0xa 7: c3 21 00 00 51 00 00 00 w2 = atomic_fetch_and((u32 *)(r1 + 0x0), w2) 8: 95 00 00 00 00 00 00 00 exit ``` The following is another example where return value is used: ``` $ cat test1.c \#include <stdatomic.h> int f1(_Atomic int *i) { return __c11_atomic_fetch_and(i, 10, memory_order_relaxed); } int f2(_Atomic int *i) { return __c11_atomic_fetch_and(i, 10, memory_order_acquire); } int f3(_Atomic int *i) { return __c11_atomic_fetch_and(i, 10, memory_order_seq_cst); } $ cat run.sh clang -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -c test1.c -o test1.o && llvm-objdump -d test1.o $ ./run.sh test.o: file format elf64-bpf Disassembly of section .text: 0000000000000000 <f1>: 0: b4 00 00 00 0a 00 00 00 w0 = 0xa 1: c3 01 00 00 51 00 00 00 w0 = atomic_fetch_and((u32 *)(r1 + 0x0), w0) 2: 95 00 00 00 00 00 00 00 exit 0000000000000018 <f2>: 3: b4 00 00 00 0a 00 00 00 w0 = 0xa 4: c3 01 00 00 51 00 00 00 w0 = atomic_fetch_and((u32 *)(r1 + 0x0), w0) 5: 95 00 00 00 00 00 00 00 exit 0000000000000030 <f3>: 6: b4 00 00 00 0a 00 00 00 w0 = 0xa 7: c3 01 00 00 51 00 00 00 w0 = atomic_fetch_and((u32 *)(r1 + 0x0), w0) 8: 95 00 00 00 00 00 00 00 exit ``` You can see that for relaxed memory ordering, if return value is used, atomic_fetch_and() insn is used. Otherwise, if return value is not used, locked insn is used. Here is another example with global _Atomic variable: ``` $ cat test3.c \#include <stdatomic.h> _Atomic int i; void f1(void) { (void)__c11_atomic_fetch_and(&i, 10, memory_order_relaxed); } void f2(void) { (void)__c11_atomic_fetch_and(&i, 10, memory_order_seq_cst); } $ cat run.sh clang -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -c test3.c -o test3.o && llvm-objdump -d test3.o $ ./run.sh test3.o: file format elf64-bpf Disassembly of section .text: 0000000000000000 <f1>: 0: b4 01 00 00 0a 00 00 00 w1 = 0xa 1: 18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0x0 ll 3: c3 12 00 00 50 00 00 00 lock *(u32 *)(r2 + 0x0) &= w1 4: 95 00 00 00 00 00 00 00 exit 0000000000000028 <f2>: 5: b4 01 00 00 0a 00 00 00 w1 = 0xa 6: 18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0x0 ll 8: c3 12 00 00 51 00 00 00 w1 = atomic_fetch_and((u32 *)(r2 + 0x0), w1) 9: 95 00 00 00 00 00 00 00 exit ``` Note that in the above compilations, '-g' is not used. The reason is due to the following IR related to _Atomic type: ``` $clang -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -g -S -emit-llvm test3.c ``` The related debug info for test3.c: ``` !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression()) !1 = distinct !DIGlobalVariable(name: "i", scope: !2, file: !3, line: 3, type: !16, isLocal: false, isDefinition: true) ... !16 = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: !17) !17 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) ... If compiling test.c, the related debug info: ``` ... !19 = distinct !DISubprogram(name: "f1", scope: !1, file: !1, line: 3, type: !20, scopeLine: 3, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !25) !20 = !DISubroutineType(types: !21) !21 = !{null, !22} !22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 64) !23 = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: !24) !24 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) !25 = !{!26} !26 = !DILocalVariable(name: "i", arg: 1, scope: !19, file: !1, line: 3, type: !22) ``` All the above suggests _Atomic behaves like a modifier (e.g. const, restrict, volatile). This seems true based on doc [1]. Without proper handling DW_TAG_atomic_type, llvm BTF generation will be incorrect since the current implementation assumes no existence of DW_TAG_atomic_type. So we have two choices here: (1). llvm bpf backend processes DW_TAG_atomic_type but ignores it in BTF encoding. (2). Add another type, e.g., BTF_KIND_ATOMIC to BTF. BTF_KIND_ATOMIC behaves as a modifier like const/volatile/restrict. For choice (1), the following is a hack which can make '-g' work for test1.c: ``` diff --git a/llvm/lib/Target/BPF/BTFDebug.cpp b/llvm/lib/Target/BPF/BTFDebug.cpp index 4d847ab..fd61bb811111 100644 --- a/llvm/lib/Target/BPF/BTFDebug.cpp +++ b/llvm/lib/Target/BPF/BTFDebug.cpp @@ -1444,8 +1444,14 @@ void BTFDebug::processGlobals(bool ProcessingMapDef) { DIGlobal = GVE->getVariable(); if (SecName.starts_with(".maps")) visitMapDefType(DIGlobal->getType(), GVTypeId); - else - visitTypeEntry(DIGlobal->getType(), GVTypeId, false, false); + else { + const DIType *Ty = DIGlobal->getType(); + auto *DTy = dyn_cast<DIDerivedType>(Ty); + if (DTy && DTy->getTag() == dwarf::DW_TAG_atomic_type) + visitTypeEntry(DTy->getBaseType(), GVTypeId, false, false); + else + visitTypeEntry(Ty, GVTypeId, false, false); + } break; } ``` You can see that basicaly dwarf::DW_TAG_atomic_type is skipped during BTF generation. Other changes are needed to avoid other usages of dwarf::DW_TAG_atomic_type. But I prefer adding BTF_KIND_ATOMIC if we indeed intends to use _Atomic in bpf programs. This probably is the only way if the _Atomic type is used e.g. at global variable where corresponding types in skeleton needs also be _Atomic. Please let me know your opinion. [1] https://lore.kernel.org/bpf/[email protected]/ [2] https://dwarfstd.org/issues/131112.1.html
1 parent ef1ef03 commit a72772e

File tree

2 files changed

+183
-21
lines changed

2 files changed

+183
-21
lines changed

llvm/lib/Target/BPF/BPFInstrInfo.td

Lines changed: 103 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -864,26 +864,119 @@ class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
864864

865865
let Constraints = "$dst = $val" in {
866866
let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
867-
def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add", atomic_load_add_i32>;
868-
def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and", atomic_load_and_i32>;
869-
def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or", atomic_load_or_i32>;
870-
def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor", atomic_load_xor_i32>;
867+
def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add", atomic_load_add_i32_seq_cst>;
868+
def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and", atomic_load_and_i32_seq_cst>;
869+
def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or", atomic_load_or_i32_seq_cst>;
870+
def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor", atomic_load_xor_i32_seq_cst>;
871871
}
872872

873873
let Predicates = [BPFHasALU32] in {
874-
def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add", atomic_load_add_i64>;
874+
def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add", atomic_load_add_i64_seq_cst>;
875875
}
876-
def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and", atomic_load_and_i64>;
877-
def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or", atomic_load_or_i64>;
878-
def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor", atomic_load_xor_i64>;
876+
def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and", atomic_load_and_i64_seq_cst>;
877+
def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or", atomic_load_or_i64_seq_cst>;
878+
def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor", atomic_load_xor_i64_seq_cst>;
879+
}
880+
881+
let Predicates = [BPFHasALU32] in {
882+
def : Pat<(atomic_load_add_i32_monotonic ADDRri:$addr, GPR32:$val),
883+
(XADDW32 ADDRri:$addr, GPR32:$val)>;
884+
def : Pat<(atomic_load_add_i32_acquire ADDRri:$addr, GPR32:$val),
885+
(XFADDW32 ADDRri:$addr, GPR32:$val)>;
886+
def : Pat<(atomic_load_add_i32_release ADDRri:$addr, GPR32:$val),
887+
(XFADDW32 ADDRri:$addr, GPR32:$val)>;
888+
def : Pat<(atomic_load_add_i32_acq_rel ADDRri:$addr, GPR32:$val),
889+
(XFADDW32 ADDRri:$addr, GPR32:$val)>;
890+
891+
def : Pat<(atomic_load_add_i64_monotonic ADDRri:$addr, GPR:$val),
892+
(XADDD ADDRri:$addr, GPR:$val)>;
893+
def : Pat<(atomic_load_add_i64_acquire ADDRri:$addr, GPR:$val),
894+
(XFADDD ADDRri:$addr, GPR:$val)>;
895+
def : Pat<(atomic_load_add_i64_release ADDRri:$addr, GPR:$val),
896+
(XFADDD ADDRri:$addr, GPR:$val)>;
897+
def : Pat<(atomic_load_add_i64_acq_rel ADDRri:$addr, GPR:$val),
898+
(XFADDD ADDRri:$addr, GPR:$val)>;
879899
}
880900

881901
// atomic_load_sub can be represented as a neg followed
882902
// by an atomic_load_add.
883-
def : Pat<(atomic_load_sub_i32 ADDRri:$addr, GPR32:$val),
903+
// FIXME: the below can probably be simplified.
904+
def : Pat<(atomic_load_sub_i32_monotonic ADDRri:$addr, GPR32:$val),
905+
(XADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
906+
def : Pat<(atomic_load_sub_i32_acquire ADDRri:$addr, GPR32:$val),
907+
(XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
908+
def : Pat<(atomic_load_sub_i32_release ADDRri:$addr, GPR32:$val),
909+
(XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
910+
def : Pat<(atomic_load_sub_i32_acq_rel ADDRri:$addr, GPR32:$val),
911+
(XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
912+
def : Pat<(atomic_load_sub_i32_seq_cst ADDRri:$addr, GPR32:$val),
884913
(XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
885-
def : Pat<(atomic_load_sub_i64 ADDRri:$addr, GPR:$val),
914+
915+
def : Pat<(atomic_load_sub_i64_monotonic ADDRri:$addr, GPR:$val),
916+
(XADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
917+
def : Pat<(atomic_load_sub_i64_acquire ADDRri:$addr, GPR:$val),
918+
(XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
919+
def : Pat<(atomic_load_sub_i64_release ADDRri:$addr, GPR:$val),
920+
(XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
921+
def : Pat<(atomic_load_sub_i64_acq_rel ADDRri:$addr, GPR:$val),
886922
(XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
923+
def : Pat<(atomic_load_sub_i64_seq_cst ADDRri:$addr, GPR:$val),
924+
(XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
925+
926+
def : Pat<(atomic_load_and_i32_monotonic ADDRri:$addr, GPR32:$val),
927+
(XANDW32 ADDRri:$addr, GPR32:$val)>;
928+
def : Pat<(atomic_load_and_i32_acquire ADDRri:$addr, GPR32:$val),
929+
(XFANDW32 ADDRri:$addr, GPR32:$val)>;
930+
def : Pat<(atomic_load_and_i32_release ADDRri:$addr, GPR32:$val),
931+
(XFANDW32 ADDRri:$addr, GPR32:$val)>;
932+
def : Pat<(atomic_load_and_i32_acq_rel ADDRri:$addr, GPR32:$val),
933+
(XFANDW32 ADDRri:$addr, GPR32:$val)>;
934+
935+
936+
def : Pat<(atomic_load_and_i64_monotonic ADDRri:$addr, GPR:$val),
937+
(XANDD ADDRri:$addr, GPR:$val)>;
938+
def : Pat<(atomic_load_and_i64_acquire ADDRri:$addr, GPR:$val),
939+
(XFANDD ADDRri:$addr, GPR:$val)>;
940+
def : Pat<(atomic_load_and_i64_release ADDRri:$addr, GPR:$val),
941+
(XFANDD ADDRri:$addr, GPR:$val)>;
942+
def : Pat<(atomic_load_and_i64_acq_rel ADDRri:$addr, GPR:$val),
943+
(XFANDD ADDRri:$addr, GPR:$val)>;
944+
945+
def : Pat<(atomic_load_or_i32_monotonic ADDRri:$addr, GPR32:$val),
946+
(XORW32 ADDRri:$addr, GPR32:$val)>;
947+
def : Pat<(atomic_load_or_i32_acquire ADDRri:$addr, GPR32:$val),
948+
(XFORW32 ADDRri:$addr, GPR32:$val)>;
949+
def : Pat<(atomic_load_or_i32_release ADDRri:$addr, GPR32:$val),
950+
(XFORW32 ADDRri:$addr, GPR32:$val)>;
951+
def : Pat<(atomic_load_or_i32_acq_rel ADDRri:$addr, GPR32:$val),
952+
(XFORW32 ADDRri:$addr, GPR32:$val)>;
953+
954+
def : Pat<(atomic_load_or_i64_monotonic ADDRri:$addr, GPR:$val),
955+
(XORD ADDRri:$addr, GPR:$val)>;
956+
def : Pat<(atomic_load_or_i64_acquire ADDRri:$addr, GPR:$val),
957+
(XFORD ADDRri:$addr, GPR:$val)>;
958+
def : Pat<(atomic_load_or_i64_release ADDRri:$addr, GPR:$val),
959+
(XFORD ADDRri:$addr, GPR:$val)>;
960+
def : Pat<(atomic_load_or_i64_acq_rel ADDRri:$addr, GPR:$val),
961+
(XFORD ADDRri:$addr, GPR:$val)>;
962+
963+
def : Pat<(atomic_load_xor_i32_monotonic ADDRri:$addr, GPR32:$val),
964+
(XXORW32 ADDRri:$addr, GPR32:$val)>;
965+
def : Pat<(atomic_load_xor_i32_acquire ADDRri:$addr, GPR32:$val),
966+
(XFXORW32 ADDRri:$addr, GPR32:$val)>;
967+
def : Pat<(atomic_load_xor_i32_release ADDRri:$addr, GPR32:$val),
968+
(XFXORW32 ADDRri:$addr, GPR32:$val)>;
969+
def : Pat<(atomic_load_xor_i32_acq_rel ADDRri:$addr, GPR32:$val),
970+
(XFXORW32 ADDRri:$addr, GPR32:$val)>;
971+
972+
def : Pat<(atomic_load_xor_i64_monotonic ADDRri:$addr, GPR:$val),
973+
(XXORD ADDRri:$addr, GPR:$val)>;
974+
def : Pat<(atomic_load_xor_i64_acquire ADDRri:$addr, GPR:$val),
975+
(XFXORD ADDRri:$addr, GPR:$val)>;
976+
def : Pat<(atomic_load_xor_i64_release ADDRri:$addr, GPR:$val),
977+
(XFXORD ADDRri:$addr, GPR:$val)>;
978+
def : Pat<(atomic_load_xor_i64_acq_rel ADDRri:$addr, GPR:$val),
979+
(XFXORD ADDRri:$addr, GPR:$val)>;
887980

888981
// Atomic Exchange
889982
class XCHG<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>

llvm/lib/Target/BPF/BPFMIChecking.cpp

Lines changed: 80 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,14 @@ struct BPFMIPreEmitChecking : public MachineFunctionPass {
4343
// Initialize class variables.
4444
void initialize(MachineFunction &MFParm);
4545

46-
void processAtomicInsts();
46+
bool processAtomicInsts();
4747

4848
public:
4949
// Main entry point for this pass.
5050
bool runOnMachineFunction(MachineFunction &MF) override {
5151
if (!skipFunction(MF.getFunction())) {
5252
initialize(MF);
53-
processAtomicInsts();
53+
return processAtomicInsts();
5454
}
5555
return false;
5656
}
@@ -152,22 +152,91 @@ static bool hasLiveDefs(const MachineInstr &MI, const TargetRegisterInfo *TRI) {
152152
return false;
153153
}
154154

155-
void BPFMIPreEmitChecking::processAtomicInsts() {
155+
bool BPFMIPreEmitChecking::processAtomicInsts() {
156+
if (!MF->getSubtarget<BPFSubtarget>().getHasJmp32()) {
157+
// Only check for cpu version 1 and 2.
158+
for (MachineBasicBlock &MBB : *MF) {
159+
for (MachineInstr &MI : MBB) {
160+
if (MI.getOpcode() != BPF::XADDW && MI.getOpcode() != BPF::XADDD)
161+
continue;
162+
163+
LLVM_DEBUG(MI.dump());
164+
if (hasLiveDefs(MI, TRI)) {
165+
DebugLoc Empty;
166+
const DebugLoc &DL = MI.getDebugLoc();
167+
const Function &F = MF->getFunction();
168+
F.getContext().diagnose(DiagnosticInfoUnsupported{
169+
F, "Invalid usage of the XADD return value", DL});
170+
}
171+
}
172+
}
173+
}
174+
175+
// Check return values of atomic_fetch_and_{add,and,or,xor}.
176+
// If the return is not used, the atomic_fetch_and_<op> instruction
177+
// is replaced with atomic_<op> instruction.
178+
MachineInstr *ToErase = nullptr;
179+
bool Changed = false;
180+
const BPFInstrInfo *TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
156181
for (MachineBasicBlock &MBB : *MF) {
157182
for (MachineInstr &MI : MBB) {
158-
if (MI.getOpcode() != BPF::XADDW && MI.getOpcode() != BPF::XADDD)
183+
if (ToErase) {
184+
ToErase->eraseFromParent();
185+
ToErase = nullptr;
186+
}
187+
188+
if (MI.getOpcode() != BPF::XADDW32 && MI.getOpcode() != BPF::XADDD &&
189+
MI.getOpcode() != BPF::XANDW32 && MI.getOpcode() != BPF::XANDD &&
190+
MI.getOpcode() != BPF::XXORW32 && MI.getOpcode() != BPF::XXORD &&
191+
MI.getOpcode() != BPF::XORW32 && MI.getOpcode() != BPF::XORD)
159192
continue;
160193

161-
LLVM_DEBUG(MI.dump());
162-
if (hasLiveDefs(MI, TRI)) {
163-
DebugLoc Empty;
164-
const DebugLoc &DL = MI.getDebugLoc();
165-
const Function &F = MF->getFunction();
166-
F.getContext().diagnose(DiagnosticInfoUnsupported{
167-
F, "Invalid usage of the XADD return value", DL});
194+
if (!hasLiveDefs(MI, TRI))
195+
continue;
196+
197+
LLVM_DEBUG(dbgs() << "Transforming "; MI.dump());
198+
unsigned newOpcode;
199+
switch (MI.getOpcode()) {
200+
case BPF::XADDW32:
201+
newOpcode = BPF::XFADDW32;
202+
break;
203+
case BPF::XADDD:
204+
newOpcode = BPF::XFADDD;
205+
break;
206+
case BPF::XANDW32:
207+
newOpcode = BPF::XFANDW32;
208+
break;
209+
case BPF::XANDD:
210+
newOpcode = BPF::XFANDD;
211+
break;
212+
case BPF::XXORW32:
213+
newOpcode = BPF::XFXORW32;
214+
break;
215+
case BPF::XXORD:
216+
newOpcode = BPF::XFXORD;
217+
break;
218+
case BPF::XORW32:
219+
newOpcode = BPF::XFORW32;
220+
break;
221+
case BPF::XORD:
222+
newOpcode = BPF::XFORD;
223+
break;
224+
default:
225+
llvm_unreachable("Incorrect Atomic Instruction Opcode");
168226
}
227+
228+
BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(newOpcode))
229+
.add(MI.getOperand(0))
230+
.add(MI.getOperand(1))
231+
.add(MI.getOperand(2))
232+
.add(MI.getOperand(3));
233+
234+
ToErase = &MI;
235+
Changed = true;
169236
}
170237
}
238+
239+
return Changed;
171240
}
172241

173242
} // namespace

0 commit comments

Comments
 (0)