Skip to content

Commit e5859af

Browse files
committed
[sanitizer] Remove use_count from StackDepotNode
This is msan/dfsan data which does not need waste cache of other sanitizers. Depends on D111614. Differential Revision: https://reviews.llvm.org/D111615
1 parent 54405a4 commit e5859af

File tree

1 file changed

+17
-23
lines changed

1 file changed

+17
-23
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp

Lines changed: 17 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,9 @@ struct StackDepotNode {
2525
using hash_type = u64;
2626
hash_type stack_hash;
2727
u32 link;
28-
atomic_uint32_t tag_and_use_count; // tag : 12 high bits; use_count : 20;
28+
u32 tag;
2929

3030
static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
31-
static const u32 kUseCountBits = 20;
32-
static const u32 kMaxUseCount = 1 << kUseCountBits;
33-
static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
3431

3532
typedef StackTrace args_type;
3633
bool eq(hash_type hash, const args_type &args) const {
@@ -53,19 +50,6 @@ struct StackDepotNode {
5350
typedef StackDepotHandle handle_type;
5451
};
5552

56-
COMPILER_CHECK(StackDepotNode::kMaxUseCount >= (u32)kStackDepotMaxUseCount);
57-
58-
int StackDepotHandle::use_count() const {
59-
return atomic_load(&node_->tag_and_use_count, memory_order_relaxed) &
60-
StackDepotNode::kUseCountMask;
61-
}
62-
void StackDepotHandle::inc_use_count_unsafe() {
63-
u32 prev =
64-
atomic_fetch_add(&node_->tag_and_use_count, 1, memory_order_relaxed) &
65-
StackDepotNode::kUseCountMask;
66-
CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
67-
}
68-
6953
// FIXME(dvyukov): this single reserved bit is used in TSan.
7054
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
7155
StackDepot;
@@ -74,15 +58,27 @@ static StackDepot theDepot;
7458
// caching efficiency.
7559
static TwoLevelMap<uptr *, StackDepot::kNodesSize1, StackDepot::kNodesSize2>
7660
tracePtrs;
61+
// Keep mutable data out of frequently access nodes to improve caching
62+
// efficiency.
63+
static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
64+
StackDepot::kNodesSize2>
65+
useCounts;
66+
67+
int StackDepotHandle::use_count() const {
68+
return atomic_load_relaxed(&useCounts[id_]);
69+
}
70+
71+
void StackDepotHandle::inc_use_count_unsafe() {
72+
atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
73+
}
7774

7875
uptr StackDepotNode::allocated() {
79-
return traceAllocator.allocated() + tracePtrs.MemoryUsage();
76+
return traceAllocator.allocated() + tracePtrs.MemoryUsage() +
77+
useCounts.MemoryUsage();
8078
}
8179

8280
void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
83-
CHECK_EQ(args.tag & (~kUseCountMask >> kUseCountBits), args.tag);
84-
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
85-
memory_order_relaxed);
81+
tag = args.tag;
8682
stack_hash = hash;
8783
uptr *stack_trace = traceAllocator.alloc(args.size + 1);
8884
*stack_trace = args.size;
@@ -94,8 +90,6 @@ StackDepotNode::args_type StackDepotNode::load(u32 id) const {
9490
const uptr *stack_trace = tracePtrs[id];
9591
if (!stack_trace)
9692
return {};
97-
u32 tag =
98-
atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
9993
return args_type(stack_trace + 1, *stack_trace, tag);
10094
}
10195

0 commit comments

Comments
 (0)