Skip to content

Commit b5b1b3a

Browse files
committed
[sanitizer] Switch to StackDepotNode to 64bit hash
Now we can avoid scanning the stack on fast path. The price is the false stack trace with probability of the hash collision. This increase performance of lsan by 6% and pre-requirement for stack compression. Depends on D111182. Reviewed By: morehouse, dvyukov Differential Revision: https://reviews.llvm.org/D111183
1 parent 4c48f7e commit b5b1b3a

File tree

1 file changed

+5
-12
lines changed

1 file changed

+5
-12
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@
1919
namespace __sanitizer {
2020

2121
struct StackDepotNode {
22-
using hash_type = u32;
22+
using hash_type = u64;
23+
hash_type stack_hash;
2324
StackDepotNode *link;
2425
u32 id;
25-
hash_type stack_hash;
2626
u32 size;
2727
atomic_uint32_t tag_and_use_count; // tag : 12 high bits; use_count : 20;
2828
uptr stack[1]; // [size]
@@ -34,22 +34,15 @@ struct StackDepotNode {
3434

3535
typedef StackTrace args_type;
3636
bool eq(hash_type hash, const args_type &args) const {
37-
u32 tag =
38-
atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
39-
if (stack_hash != hash || args.size != size || args.tag != tag)
40-
return false;
41-
uptr i = 0;
42-
for (; i < size; i++) {
43-
if (stack[i] != args.trace[i]) return false;
44-
}
45-
return true;
37+
return hash == stack_hash;
4638
}
4739
static uptr storage_size(const args_type &args) {
4840
return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
4941
}
5042
static hash_type hash(const args_type &args) {
51-
MurMur2HashBuilder H(args.size * sizeof(uptr));
43+
MurMur2Hash64Builder H(args.size * sizeof(uptr));
5244
for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
45+
H.add(args.tag);
5346
return H.get();
5447
}
5548
static bool is_valid(const args_type &args) {

0 commit comments

Comments
 (0)