-
Notifications
You must be signed in to change notification settings - Fork 14.3k
Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and malloc parameters bound" #117020
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
…alloca a…" This reverts commit 02b8ee2.
@llvm/pr-subscribers-llvm-transforms Author: Florian Mayer (fmayer) ChangesReverts llvm/llvm-project#115522 This caused UBSan errors in multi-stage clang build. Full diff: https://github.com/llvm/llvm-project/pull/117020.diff 3 Files Affected:
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index d444a768a65436..945081b77e9536 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -723,16 +723,12 @@ class Value {
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
-
- Value *stripAndAccumulateConstantOffsets(
- const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
- bool AllowInvariantGroup = false,
- function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
- nullptr) {
+ Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
+ bool AllowNonInbounds,
+ bool AllowInvariantGroup = false) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
- DL, Offset, AllowNonInbounds, AllowInvariantGroup,
- ExternalAnalysis));
+ DL, Offset, AllowNonInbounds, AllowInvariantGroup));
}
/// This is a wrapper around stripAndAccumulateConstantOffsets with the
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 6c0940c4c81ebe..cd8594d670502d 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -673,69 +673,6 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
-static std::optional<APInt>
-combinePossibleConstantValues(std::optional<APInt> LHS,
- std::optional<APInt> RHS,
- ObjectSizeOpts::Mode EvalMode) {
- if (!LHS || !RHS)
- return std::nullopt;
- if (EvalMode == ObjectSizeOpts::Mode::Max)
- return LHS->sge(*RHS) ? *LHS : *RHS;
- else
- return LHS->sle(*RHS) ? *LHS : *RHS;
-}
-
-static std::optional<APInt> aggregatePossibleConstantValuesImpl(
- const Value *V, ObjectSizeOpts::Mode EvalMode, unsigned recursionDepth) {
- constexpr unsigned maxRecursionDepth = 4;
- if (recursionDepth == maxRecursionDepth)
- return std::nullopt;
-
- if (const auto *CI = dyn_cast<ConstantInt>(V)) {
- return CI->getValue();
- }
-
- else if (const auto *SI = dyn_cast<SelectInst>(V)) {
- return combinePossibleConstantValues(
- aggregatePossibleConstantValuesImpl(SI->getTrueValue(), EvalMode,
- recursionDepth + 1),
- aggregatePossibleConstantValuesImpl(SI->getFalseValue(), EvalMode,
- recursionDepth + 1),
- EvalMode);
- }
-
- else if (const auto *PN = dyn_cast<PHINode>(V)) {
- unsigned Count = PN->getNumIncomingValues();
- if (Count == 0)
- return std::nullopt;
- auto Acc = aggregatePossibleConstantValuesImpl(
- PN->getIncomingValue(0), EvalMode, recursionDepth + 1);
- for (unsigned I = 1; Acc && I < Count; ++I) {
- auto Tmp = aggregatePossibleConstantValuesImpl(
- PN->getIncomingValue(1), EvalMode, recursionDepth + 1);
- Acc = combinePossibleConstantValues(Acc, Tmp, EvalMode);
- }
- return Acc;
- }
-
- return std::nullopt;
-}
-
-static std::optional<APInt>
-aggregatePossibleConstantValues(const Value *V, ObjectSizeOpts::Mode EvalMode) {
- if (auto *CI = dyn_cast<ConstantInt>(V))
- return CI->getValue();
-
- if (EvalMode != ObjectSizeOpts::Mode::Min &&
- EvalMode != ObjectSizeOpts::Mode::Max)
- return std::nullopt;
-
- // Not using computeConstantRange here because we cannot guarantee it's not
- // doing optimization based on UB which we want to avoid when expanding
- // __builtin_object_size.
- return aggregatePossibleConstantValuesImpl(V, EvalMode, 0u);
-}
-
/// Align \p Size according to \p Alignment. If \p Size is greater than
/// getSignedMaxValue(), set it as unknown as we can only represent signed value
/// in OffsetSpan.
@@ -783,36 +720,11 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);
- // Give it another try with approximated analysis. We don't start with this
- // one because stripAndAccumulateConstantOffsets behaves differently wrt.
- // overflows if we provide an external Analysis.
- if ((Options.EvalMode == ObjectSizeOpts::Mode::Min ||
- Options.EvalMode == ObjectSizeOpts::Mode::Max) &&
- isa<GEPOperator>(V)) {
- // External Analysis used to compute the Min/Max value of individual Offsets
- // within a GEP.
- ObjectSizeOpts::Mode EvalMode =
- Options.EvalMode == ObjectSizeOpts::Mode::Min
- ? ObjectSizeOpts::Mode::Max
- : ObjectSizeOpts::Mode::Min;
- auto OffsetRangeAnalysis = [EvalMode](Value &VOffset, APInt &Offset) {
- if (auto PossibleOffset =
- aggregatePossibleConstantValues(&VOffset, EvalMode)) {
- Offset = *PossibleOffset;
- return true;
- }
- return false;
- };
-
- V = V->stripAndAccumulateConstantOffsets(
- DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
- /*ExternalAnalysis=*/OffsetRangeAnalysis);
- }
-
// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);
+
OffsetSpan ORT = computeValue(V);
bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
@@ -900,9 +812,8 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return OffsetSpan(Zero, align(Size, I.getAlign()));
Value *ArraySize = I.getArraySize();
- if (auto PossibleSize =
- aggregatePossibleConstantValues(ArraySize, Options.EvalMode)) {
- APInt NumElems = *PossibleSize;
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
+ APInt NumElems = C->getValue();
if (!CheckedZextOrTrunc(NumElems))
return ObjectSizeOffsetVisitor::unknown();
@@ -928,18 +839,7 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- auto Mapper = [this](const Value *V) -> const Value * {
- if (!V->getType()->isIntegerTy())
- return V;
-
- if (auto PossibleBound =
- aggregatePossibleConstantValues(V, Options.EvalMode))
- return ConstantInt::get(V->getType(), *PossibleBound);
-
- return V;
- };
-
- if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
+ if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
// Very large unsigned value cannot be represented as OffsetSpan.
if (Size->isNegative())
return ObjectSizeOffsetVisitor::unknown();
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
deleted file mode 100644
index f84ebee1442893..00000000000000
--- a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
+++ /dev/null
@@ -1,109 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s
-
-target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
-declare noalias ptr @malloc(i64 noundef) #0
-
-define i64 @select_alloc_size(i1 %cond) {
-; CHECK-LABEL: @select_alloc_size(
-; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 [[SIZE]], align 1
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %size = select i1 %cond, i64 3, i64 4
- %ptr = alloca i8, i64 %size
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_malloc_size(i1 %cond) {
-; CHECK-LABEL: @select_malloc_size(
-; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR:%.*]] = call noalias ptr @malloc(i64 noundef [[SIZE]])
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %size = select i1 %cond, i64 3, i64 4
- %ptr = call noalias ptr @malloc(i64 noundef %size)
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_offset(i1 %cond) {
-; CHECK-LABEL: @select_gep_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 7, i64 6
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca i8, i64 10
- %offset = select i1 %cond, i64 3, i64 4
- %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_neg_offset(i1 %c0, i1 %c1) {
-; CHECK-LABEL: @select_gep_neg_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[PTR_SLIDE_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 5
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 -3, i64 -4
-; CHECK-NEXT: [[PTR_SLIDE_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_SLIDE_1]], i64 [[OFFSET]]
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[C1:%.*]], i64 9, i64 8
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca i8, i64 10
- %ptr.slide.1 = getelementptr inbounds i8, ptr %ptr, i64 5
- %offset = select i1 %c0, i64 -3, i64 -4
- %ptr.slide.2 = getelementptr inbounds i8, ptr %ptr.slide.1, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 true, i1 true, i1 false)
- %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_neg_oob_offset(i1 %c0, i1 %c1) {
-; CHECK-LABEL: @select_neg_oob_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[C0:%.*]], i64 -3, i64 -4
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
-; CHECK-NEXT: ret i64 0
-;
- %ptr = alloca i8, i64 10
- %offset = select i1 %c0, i64 -3, i64 -4
- %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_offsets(i1 %cond) {
-; CHECK-LABEL: @select_gep_offsets(
-; CHECK-NEXT: [[PTR:%.*]] = alloca [10 x i8], i64 2, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i32 0, i32 1
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds [10 x i8], ptr [[PTR]], i32 [[OFFSET]], i32 5
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 15, i64 5
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca [10 x i8], i64 2
- %offset = select i1 %cond, i32 0, i32 1
- %ptr.slide = getelementptr inbounds [10 x i8], ptr %ptr, i32 %offset, i32 5
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-attributes #0 = { nounwind allocsize(0) }
|
@llvm/pr-subscribers-llvm-analysis Author: Florian Mayer (fmayer) ChangesReverts llvm/llvm-project#115522 This caused UBSan errors in multi-stage clang build. Full diff: https://github.com/llvm/llvm-project/pull/117020.diff 3 Files Affected:
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index d444a768a65436..945081b77e9536 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -723,16 +723,12 @@ class Value {
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
-
- Value *stripAndAccumulateConstantOffsets(
- const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
- bool AllowInvariantGroup = false,
- function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
- nullptr) {
+ Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
+ bool AllowNonInbounds,
+ bool AllowInvariantGroup = false) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
- DL, Offset, AllowNonInbounds, AllowInvariantGroup,
- ExternalAnalysis));
+ DL, Offset, AllowNonInbounds, AllowInvariantGroup));
}
/// This is a wrapper around stripAndAccumulateConstantOffsets with the
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 6c0940c4c81ebe..cd8594d670502d 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -673,69 +673,6 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
-static std::optional<APInt>
-combinePossibleConstantValues(std::optional<APInt> LHS,
- std::optional<APInt> RHS,
- ObjectSizeOpts::Mode EvalMode) {
- if (!LHS || !RHS)
- return std::nullopt;
- if (EvalMode == ObjectSizeOpts::Mode::Max)
- return LHS->sge(*RHS) ? *LHS : *RHS;
- else
- return LHS->sle(*RHS) ? *LHS : *RHS;
-}
-
-static std::optional<APInt> aggregatePossibleConstantValuesImpl(
- const Value *V, ObjectSizeOpts::Mode EvalMode, unsigned recursionDepth) {
- constexpr unsigned maxRecursionDepth = 4;
- if (recursionDepth == maxRecursionDepth)
- return std::nullopt;
-
- if (const auto *CI = dyn_cast<ConstantInt>(V)) {
- return CI->getValue();
- }
-
- else if (const auto *SI = dyn_cast<SelectInst>(V)) {
- return combinePossibleConstantValues(
- aggregatePossibleConstantValuesImpl(SI->getTrueValue(), EvalMode,
- recursionDepth + 1),
- aggregatePossibleConstantValuesImpl(SI->getFalseValue(), EvalMode,
- recursionDepth + 1),
- EvalMode);
- }
-
- else if (const auto *PN = dyn_cast<PHINode>(V)) {
- unsigned Count = PN->getNumIncomingValues();
- if (Count == 0)
- return std::nullopt;
- auto Acc = aggregatePossibleConstantValuesImpl(
- PN->getIncomingValue(0), EvalMode, recursionDepth + 1);
- for (unsigned I = 1; Acc && I < Count; ++I) {
- auto Tmp = aggregatePossibleConstantValuesImpl(
- PN->getIncomingValue(1), EvalMode, recursionDepth + 1);
- Acc = combinePossibleConstantValues(Acc, Tmp, EvalMode);
- }
- return Acc;
- }
-
- return std::nullopt;
-}
-
-static std::optional<APInt>
-aggregatePossibleConstantValues(const Value *V, ObjectSizeOpts::Mode EvalMode) {
- if (auto *CI = dyn_cast<ConstantInt>(V))
- return CI->getValue();
-
- if (EvalMode != ObjectSizeOpts::Mode::Min &&
- EvalMode != ObjectSizeOpts::Mode::Max)
- return std::nullopt;
-
- // Not using computeConstantRange here because we cannot guarantee it's not
- // doing optimization based on UB which we want to avoid when expanding
- // __builtin_object_size.
- return aggregatePossibleConstantValuesImpl(V, EvalMode, 0u);
-}
-
/// Align \p Size according to \p Alignment. If \p Size is greater than
/// getSignedMaxValue(), set it as unknown as we can only represent signed value
/// in OffsetSpan.
@@ -783,36 +720,11 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);
- // Give it another try with approximated analysis. We don't start with this
- // one because stripAndAccumulateConstantOffsets behaves differently wrt.
- // overflows if we provide an external Analysis.
- if ((Options.EvalMode == ObjectSizeOpts::Mode::Min ||
- Options.EvalMode == ObjectSizeOpts::Mode::Max) &&
- isa<GEPOperator>(V)) {
- // External Analysis used to compute the Min/Max value of individual Offsets
- // within a GEP.
- ObjectSizeOpts::Mode EvalMode =
- Options.EvalMode == ObjectSizeOpts::Mode::Min
- ? ObjectSizeOpts::Mode::Max
- : ObjectSizeOpts::Mode::Min;
- auto OffsetRangeAnalysis = [EvalMode](Value &VOffset, APInt &Offset) {
- if (auto PossibleOffset =
- aggregatePossibleConstantValues(&VOffset, EvalMode)) {
- Offset = *PossibleOffset;
- return true;
- }
- return false;
- };
-
- V = V->stripAndAccumulateConstantOffsets(
- DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
- /*ExternalAnalysis=*/OffsetRangeAnalysis);
- }
-
// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);
+
OffsetSpan ORT = computeValue(V);
bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
@@ -900,9 +812,8 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return OffsetSpan(Zero, align(Size, I.getAlign()));
Value *ArraySize = I.getArraySize();
- if (auto PossibleSize =
- aggregatePossibleConstantValues(ArraySize, Options.EvalMode)) {
- APInt NumElems = *PossibleSize;
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
+ APInt NumElems = C->getValue();
if (!CheckedZextOrTrunc(NumElems))
return ObjectSizeOffsetVisitor::unknown();
@@ -928,18 +839,7 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- auto Mapper = [this](const Value *V) -> const Value * {
- if (!V->getType()->isIntegerTy())
- return V;
-
- if (auto PossibleBound =
- aggregatePossibleConstantValues(V, Options.EvalMode))
- return ConstantInt::get(V->getType(), *PossibleBound);
-
- return V;
- };
-
- if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
+ if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
// Very large unsigned value cannot be represented as OffsetSpan.
if (Size->isNegative())
return ObjectSizeOffsetVisitor::unknown();
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
deleted file mode 100644
index f84ebee1442893..00000000000000
--- a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
+++ /dev/null
@@ -1,109 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s
-
-target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
-declare noalias ptr @malloc(i64 noundef) #0
-
-define i64 @select_alloc_size(i1 %cond) {
-; CHECK-LABEL: @select_alloc_size(
-; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 [[SIZE]], align 1
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %size = select i1 %cond, i64 3, i64 4
- %ptr = alloca i8, i64 %size
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_malloc_size(i1 %cond) {
-; CHECK-LABEL: @select_malloc_size(
-; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR:%.*]] = call noalias ptr @malloc(i64 noundef [[SIZE]])
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %size = select i1 %cond, i64 3, i64 4
- %ptr = call noalias ptr @malloc(i64 noundef %size)
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_offset(i1 %cond) {
-; CHECK-LABEL: @select_gep_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 7, i64 6
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca i8, i64 10
- %offset = select i1 %cond, i64 3, i64 4
- %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_neg_offset(i1 %c0, i1 %c1) {
-; CHECK-LABEL: @select_gep_neg_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[PTR_SLIDE_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 5
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 -3, i64 -4
-; CHECK-NEXT: [[PTR_SLIDE_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_SLIDE_1]], i64 [[OFFSET]]
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[C1:%.*]], i64 9, i64 8
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca i8, i64 10
- %ptr.slide.1 = getelementptr inbounds i8, ptr %ptr, i64 5
- %offset = select i1 %c0, i64 -3, i64 -4
- %ptr.slide.2 = getelementptr inbounds i8, ptr %ptr.slide.1, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 true, i1 true, i1 false)
- %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_neg_oob_offset(i1 %c0, i1 %c1) {
-; CHECK-LABEL: @select_neg_oob_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[C0:%.*]], i64 -3, i64 -4
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
-; CHECK-NEXT: ret i64 0
-;
- %ptr = alloca i8, i64 10
- %offset = select i1 %c0, i64 -3, i64 -4
- %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_offsets(i1 %cond) {
-; CHECK-LABEL: @select_gep_offsets(
-; CHECK-NEXT: [[PTR:%.*]] = alloca [10 x i8], i64 2, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i32 0, i32 1
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds [10 x i8], ptr [[PTR]], i32 [[OFFSET]], i32 5
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 15, i64 5
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca [10 x i8], i64 2
- %offset = select i1 %cond, i32 0, i32 1
- %ptr.slide = getelementptr inbounds [10 x i8], ptr %ptr, i32 %offset, i32 5
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-attributes #0 = { nounwind allocsize(0) }
|
@llvm/pr-subscribers-llvm-ir Author: Florian Mayer (fmayer) ChangesReverts llvm/llvm-project#115522 This caused UBSan errors in multi-stage clang build. Full diff: https://github.com/llvm/llvm-project/pull/117020.diff 3 Files Affected:
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index d444a768a65436..945081b77e9536 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -723,16 +723,12 @@ class Value {
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
-
- Value *stripAndAccumulateConstantOffsets(
- const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
- bool AllowInvariantGroup = false,
- function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
- nullptr) {
+ Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
+ bool AllowNonInbounds,
+ bool AllowInvariantGroup = false) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
- DL, Offset, AllowNonInbounds, AllowInvariantGroup,
- ExternalAnalysis));
+ DL, Offset, AllowNonInbounds, AllowInvariantGroup));
}
/// This is a wrapper around stripAndAccumulateConstantOffsets with the
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 6c0940c4c81ebe..cd8594d670502d 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -673,69 +673,6 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
-static std::optional<APInt>
-combinePossibleConstantValues(std::optional<APInt> LHS,
- std::optional<APInt> RHS,
- ObjectSizeOpts::Mode EvalMode) {
- if (!LHS || !RHS)
- return std::nullopt;
- if (EvalMode == ObjectSizeOpts::Mode::Max)
- return LHS->sge(*RHS) ? *LHS : *RHS;
- else
- return LHS->sle(*RHS) ? *LHS : *RHS;
-}
-
-static std::optional<APInt> aggregatePossibleConstantValuesImpl(
- const Value *V, ObjectSizeOpts::Mode EvalMode, unsigned recursionDepth) {
- constexpr unsigned maxRecursionDepth = 4;
- if (recursionDepth == maxRecursionDepth)
- return std::nullopt;
-
- if (const auto *CI = dyn_cast<ConstantInt>(V)) {
- return CI->getValue();
- }
-
- else if (const auto *SI = dyn_cast<SelectInst>(V)) {
- return combinePossibleConstantValues(
- aggregatePossibleConstantValuesImpl(SI->getTrueValue(), EvalMode,
- recursionDepth + 1),
- aggregatePossibleConstantValuesImpl(SI->getFalseValue(), EvalMode,
- recursionDepth + 1),
- EvalMode);
- }
-
- else if (const auto *PN = dyn_cast<PHINode>(V)) {
- unsigned Count = PN->getNumIncomingValues();
- if (Count == 0)
- return std::nullopt;
- auto Acc = aggregatePossibleConstantValuesImpl(
- PN->getIncomingValue(0), EvalMode, recursionDepth + 1);
- for (unsigned I = 1; Acc && I < Count; ++I) {
- auto Tmp = aggregatePossibleConstantValuesImpl(
- PN->getIncomingValue(1), EvalMode, recursionDepth + 1);
- Acc = combinePossibleConstantValues(Acc, Tmp, EvalMode);
- }
- return Acc;
- }
-
- return std::nullopt;
-}
-
-static std::optional<APInt>
-aggregatePossibleConstantValues(const Value *V, ObjectSizeOpts::Mode EvalMode) {
- if (auto *CI = dyn_cast<ConstantInt>(V))
- return CI->getValue();
-
- if (EvalMode != ObjectSizeOpts::Mode::Min &&
- EvalMode != ObjectSizeOpts::Mode::Max)
- return std::nullopt;
-
- // Not using computeConstantRange here because we cannot guarantee it's not
- // doing optimization based on UB which we want to avoid when expanding
- // __builtin_object_size.
- return aggregatePossibleConstantValuesImpl(V, EvalMode, 0u);
-}
-
/// Align \p Size according to \p Alignment. If \p Size is greater than
/// getSignedMaxValue(), set it as unknown as we can only represent signed value
/// in OffsetSpan.
@@ -783,36 +720,11 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);
- // Give it another try with approximated analysis. We don't start with this
- // one because stripAndAccumulateConstantOffsets behaves differently wrt.
- // overflows if we provide an external Analysis.
- if ((Options.EvalMode == ObjectSizeOpts::Mode::Min ||
- Options.EvalMode == ObjectSizeOpts::Mode::Max) &&
- isa<GEPOperator>(V)) {
- // External Analysis used to compute the Min/Max value of individual Offsets
- // within a GEP.
- ObjectSizeOpts::Mode EvalMode =
- Options.EvalMode == ObjectSizeOpts::Mode::Min
- ? ObjectSizeOpts::Mode::Max
- : ObjectSizeOpts::Mode::Min;
- auto OffsetRangeAnalysis = [EvalMode](Value &VOffset, APInt &Offset) {
- if (auto PossibleOffset =
- aggregatePossibleConstantValues(&VOffset, EvalMode)) {
- Offset = *PossibleOffset;
- return true;
- }
- return false;
- };
-
- V = V->stripAndAccumulateConstantOffsets(
- DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
- /*ExternalAnalysis=*/OffsetRangeAnalysis);
- }
-
// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);
+
OffsetSpan ORT = computeValue(V);
bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
@@ -900,9 +812,8 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return OffsetSpan(Zero, align(Size, I.getAlign()));
Value *ArraySize = I.getArraySize();
- if (auto PossibleSize =
- aggregatePossibleConstantValues(ArraySize, Options.EvalMode)) {
- APInt NumElems = *PossibleSize;
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
+ APInt NumElems = C->getValue();
if (!CheckedZextOrTrunc(NumElems))
return ObjectSizeOffsetVisitor::unknown();
@@ -928,18 +839,7 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- auto Mapper = [this](const Value *V) -> const Value * {
- if (!V->getType()->isIntegerTy())
- return V;
-
- if (auto PossibleBound =
- aggregatePossibleConstantValues(V, Options.EvalMode))
- return ConstantInt::get(V->getType(), *PossibleBound);
-
- return V;
- };
-
- if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
+ if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
// Very large unsigned value cannot be represented as OffsetSpan.
if (Size->isNegative())
return ObjectSizeOffsetVisitor::unknown();
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
deleted file mode 100644
index f84ebee1442893..00000000000000
--- a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
+++ /dev/null
@@ -1,109 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s
-
-target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
-declare noalias ptr @malloc(i64 noundef) #0
-
-define i64 @select_alloc_size(i1 %cond) {
-; CHECK-LABEL: @select_alloc_size(
-; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 [[SIZE]], align 1
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %size = select i1 %cond, i64 3, i64 4
- %ptr = alloca i8, i64 %size
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_malloc_size(i1 %cond) {
-; CHECK-LABEL: @select_malloc_size(
-; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR:%.*]] = call noalias ptr @malloc(i64 noundef [[SIZE]])
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %size = select i1 %cond, i64 3, i64 4
- %ptr = call noalias ptr @malloc(i64 noundef %size)
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_offset(i1 %cond) {
-; CHECK-LABEL: @select_gep_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 7, i64 6
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca i8, i64 10
- %offset = select i1 %cond, i64 3, i64 4
- %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_neg_offset(i1 %c0, i1 %c1) {
-; CHECK-LABEL: @select_gep_neg_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[PTR_SLIDE_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 5
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 -3, i64 -4
-; CHECK-NEXT: [[PTR_SLIDE_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_SLIDE_1]], i64 [[OFFSET]]
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[C1:%.*]], i64 9, i64 8
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca i8, i64 10
- %ptr.slide.1 = getelementptr inbounds i8, ptr %ptr, i64 5
- %offset = select i1 %c0, i64 -3, i64 -4
- %ptr.slide.2 = getelementptr inbounds i8, ptr %ptr.slide.1, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 true, i1 true, i1 false)
- %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_neg_oob_offset(i1 %c0, i1 %c1) {
-; CHECK-LABEL: @select_neg_oob_offset(
-; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[C0:%.*]], i64 -3, i64 -4
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
-; CHECK-NEXT: ret i64 0
-;
- %ptr = alloca i8, i64 10
- %offset = select i1 %c0, i64 -3, i64 -4
- %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-define i64 @select_gep_offsets(i1 %cond) {
-; CHECK-LABEL: @select_gep_offsets(
-; CHECK-NEXT: [[PTR:%.*]] = alloca [10 x i8], i64 2, align 1
-; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i32 0, i32 1
-; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds [10 x i8], ptr [[PTR]], i32 [[OFFSET]], i32 5
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 15, i64 5
-; CHECK-NEXT: ret i64 [[RES]]
-;
- %ptr = alloca [10 x i8], i64 2
- %offset = select i1 %cond, i32 0, i32 1
- %ptr.slide = getelementptr inbounds [10 x i8], ptr %ptr, i32 %offset, i32 5
- %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
- %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
- %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
- ret i64 %res
-}
-
-attributes #0 = { nounwind allocsize(0) }
|
Does #116955 fixes the issue instead? |
Let's see: https://lab.llvm.org/buildbot/#/buildrequests/2114124?redirect_to_build=true |
looks like the ref wasn't found by the bot - and it's bed time in my TZ |
(Of course it didn't find it, I didn't see that the change is in your repo, not in a I will try again and revert if it doesn't fix, SGTY? |
I ran locally and it still happened. submitting this for now |
Reverts #115522
This caused UBSan errors in multi-stage clang build:
https://lab.llvm.org/buildbot/#/builders/25/builds/4241/steps/10/logs/stdio