Skip to content

Commit 8e7ea9d

Browse files
committed
[sanitizer] re-apply r279572 and r279595 reverted in r279643: change the 64-bit allocator to use a single array for free-d chunks instead of a lock-free linked list of tranfer batches. This change simplifies the code, makes the allocator more 'hardened', and will allow simpler code to release RAM to OS. This may also slowdown malloc stress tests due to lock contension, but I did not observe noticeable slowdown on various real multi-threaded benchmarks.
llvm-svn: 279664
1 parent b09db22 commit 8e7ea9d

File tree

3 files changed

+144
-182
lines changed

3 files changed

+144
-182
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h

Lines changed: 30 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,9 @@ struct SizeClassAllocatorLocalCache
2626
template <class SizeClassAllocator>
2727
struct SizeClassAllocator64LocalCache {
2828
typedef SizeClassAllocator Allocator;
29-
typedef typename Allocator::TransferBatch TransferBatch;
3029
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
30+
typedef typename Allocator::SizeClassMapT SizeClassMap;
31+
typedef typename Allocator::CompactPtrT CompactPtrT;
3132

3233
void Init(AllocatorGlobalStats *s) {
3334
stats_.Init();
@@ -47,9 +48,11 @@ struct SizeClassAllocator64LocalCache {
4748
stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
4849
PerClass *c = &per_class_[class_id];
4950
if (UNLIKELY(c->count == 0))
50-
Refill(allocator, class_id);
51-
void *res = c->batch[--c->count];
52-
PREFETCH(c->batch[c->count - 1]);
51+
Refill(c, allocator, class_id);
52+
CHECK_GT(c->count, 0);
53+
CompactPtrT chunk = c->chunks[--c->count];
54+
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
55+
allocator->GetRegionBeginBySizeClass(class_id), chunk));
5356
return res;
5457
}
5558

@@ -63,24 +66,26 @@ struct SizeClassAllocator64LocalCache {
6366
PerClass *c = &per_class_[class_id];
6467
CHECK_NE(c->max_count, 0UL);
6568
if (UNLIKELY(c->count == c->max_count))
66-
Drain(allocator, class_id);
67-
c->batch[c->count++] = p;
69+
Drain(c, allocator, class_id, c->max_count / 2);
70+
CompactPtrT chunk = allocator->PointerToCompactPtr(
71+
allocator->GetRegionBeginBySizeClass(class_id),
72+
reinterpret_cast<uptr>(p));
73+
c->chunks[c->count++] = chunk;
6874
}
6975

7076
void Drain(SizeClassAllocator *allocator) {
7177
for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
7278
PerClass *c = &per_class_[class_id];
7379
while (c->count > 0)
74-
Drain(allocator, class_id);
80+
Drain(c, allocator, class_id, c->count);
7581
}
7682
}
7783

7884
// private:
79-
typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
8085
struct PerClass {
81-
uptr count;
82-
uptr max_count;
83-
void *batch[2 * TransferBatch::kMaxNumCached];
86+
u32 count;
87+
u32 max_count;
88+
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
8489
};
8590
PerClass per_class_[kNumClasses];
8691
AllocatorStats stats_;
@@ -90,77 +95,27 @@ struct SizeClassAllocator64LocalCache {
9095
return;
9196
for (uptr i = 0; i < kNumClasses; i++) {
9297
PerClass *c = &per_class_[i];
93-
c->max_count = 2 * TransferBatch::MaxCached(i);
98+
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
9499
}
95100
}
96101

97-
// TransferBatch class is declared in SizeClassAllocator.
98-
// We transfer chunks between central and thread-local free lists in batches.
99-
// For small size classes we allocate batches separately.
100-
// For large size classes we may use one of the chunks to store the batch.
101-
// sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
102-
103-
// If kUseSeparateSizeClassForBatch is true,
104-
// all TransferBatch objects are allocated from kBatchClassID
105-
// size class (except for those that are needed for kBatchClassID itself).
106-
// The goal is to have TransferBatches in a totally different region of RAM
107-
// to improve security and allow more efficient RAM reclamation.
108-
// This is experimental and may currently increase memory usage by up to 3%
109-
// in extreme cases.
110-
static const bool kUseSeparateSizeClassForBatch = false;
111-
112-
static uptr SizeClassForTransferBatch(uptr class_id) {
113-
if (kUseSeparateSizeClassForBatch)
114-
return class_id == SizeClassMap::kBatchClassID
115-
? 0
116-
: SizeClassMap::kBatchClassID;
117-
if (Allocator::ClassIdToSize(class_id) <
118-
TransferBatch::AllocationSizeRequiredForNElements(
119-
TransferBatch::MaxCached(class_id)))
120-
return SizeClassMap::ClassID(sizeof(TransferBatch));
121-
return 0;
122-
}
123-
124-
// Returns a TransferBatch suitable for class_id.
125-
// For small size classes allocates the batch from the allocator.
126-
// For large size classes simply returns b.
127-
TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
128-
TransferBatch *b) {
129-
if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
130-
return (TransferBatch*)Allocate(allocator, batch_class_id);
131-
return b;
132-
}
133-
134-
// Destroys TransferBatch b.
135-
// For small size classes deallocates b to the allocator.
136-
// Does notthing for large size classes.
137-
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
138-
TransferBatch *b) {
139-
if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
140-
Deallocate(allocator, batch_class_id, b);
141-
}
142-
143-
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
102+
NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
103+
uptr class_id) {
144104
InitCache();
145-
PerClass *c = &per_class_[class_id];
146-
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
147-
CHECK_GT(b->Count(), 0);
148-
b->CopyToArray(c->batch);
149-
c->count = b->Count();
150-
DestroyBatch(class_id, allocator, b);
105+
uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
106+
allocator->GetFromAllocator(&stats_, class_id, c->chunks,
107+
num_requested_chunks);
108+
c->count = num_requested_chunks;
151109
}
152110

153-
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
111+
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
112+
uptr count) {
154113
InitCache();
155-
PerClass *c = &per_class_[class_id];
156-
uptr cnt = Min(c->max_count / 2, c->count);
157-
uptr first_idx_to_drain = c->count - cnt;
158-
TransferBatch *b = CreateBatch(
159-
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
160-
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
161-
&c->batch[first_idx_to_drain], cnt);
162-
c->count -= cnt;
163-
allocator->DeallocateBatch(&stats_, class_id, b);
114+
CHECK_GE(c->count, count);
115+
uptr first_idx_to_drain = c->count - count;
116+
c->count -= count;
117+
allocator->ReturnToAllocator(&stats_, class_id,
118+
&c->chunks[first_idx_to_drain], count);
164119
}
165120
};
166121

0 commit comments

Comments
 (0)