Skip to content

[scudo] Remove unused field in BatchGroup #109322

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 2 additions & 5 deletions compiler-rt/lib/scudo/standalone/allocator_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,16 +75,13 @@ template <class SizeClassAllocator> struct BatchGroup {
BatchGroup *Next;
// The compact base address of each group
uptr CompactPtrGroupBase;
// Cache value of SizeClassAllocatorLocalCache::getMaxCached()
u16 MaxCachedPerBatch;
// Number of blocks pushed into this group. This is an increment-only
// counter.
uptr PushedBlocks;
// This is used to track how many bytes are not in-use since last time we
// tried to release pages.
uptr BytesInBGAtLastCheckpoint;
// Blocks are managed by TransferBatch in a list.
SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
// Cache value of SizeClassAllocatorLocalCache::getMaxCached()
u16 MaxCachedPerBatch;
};

} // namespace scudo
Expand Down
16 changes: 1 addition & 15 deletions compiler-rt/lib/scudo/standalone/primary32.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,9 @@ template <typename Config> class SizeClassAllocator32 {
typedef TransferBatch<ThisT> TransferBatchT;
typedef BatchGroup<ThisT> BatchGroupT;

static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
"BatchGroupT uses the same class size as TransferBatchT");

static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
? sizeof(TransferBatchT)
? Max(sizeof(BatchGroupT), sizeof(TransferBatchT))
: SizeClassMap::getSizeByClassId(ClassId);
}

Expand Down Expand Up @@ -531,9 +528,6 @@ template <typename Config> class SizeClassAllocator32 {
// BatchClass hasn't enabled memory group. Use `0` to indicate there's no
// memory group here.
BG->CompactPtrGroupBase = 0;
// `BG` is also the block of BatchClassId. Note that this is different
// from `CreateGroup` in `pushBlocksImpl`
BG->PushedBlocks = 1;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
Expand All @@ -558,9 +552,6 @@ template <typename Config> class SizeClassAllocator32 {
TB->add(
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
--Size;
DCHECK_EQ(BG->PushedBlocks, 1U);
// `TB` is also the block of BatchClassId.
BG->PushedBlocks += 1;
BG->Batches.push_front(TB);
}

Expand All @@ -587,8 +578,6 @@ template <typename Config> class SizeClassAllocator32 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}

BG->PushedBlocks += Size;
}
// Push the blocks to their batch group. The layout will be like,
//
Expand Down Expand Up @@ -624,7 +613,6 @@ template <typename Config> class SizeClassAllocator32 {

BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;

Expand Down Expand Up @@ -652,8 +640,6 @@ template <typename Config> class SizeClassAllocator32 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}

BG->PushedBlocks += Size;
};

Sci->FreeListInfo.PushedBlocks += Size;
Expand Down
34 changes: 11 additions & 23 deletions compiler-rt/lib/scudo/standalone/primary64.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,12 @@ template <typename Config> class SizeClassAllocator64 {
typedef TransferBatch<ThisT> TransferBatchT;
typedef BatchGroup<ThisT> BatchGroupT;

static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
"BatchGroupT uses the same class size as TransferBatchT");

// BachClass is used to store internal metadata so it needs to be at least as
// large as the largest data structure.
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
? roundUp(Max(sizeof(TransferBatchT), sizeof(BatchGroupT)),
1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}

Expand Down Expand Up @@ -236,7 +236,7 @@ template <typename Config> class SizeClassAllocator64 {
} else {
while (true) {
// When two threads compete for `Region->MMLock`, we only want one of
// them to call populateFreeListAndPopBatch(). To avoid both of them
// them to call populateFreeListAndPopBlocks(). To avoid both of them
// doing that, always check the freelist before mapping new pages.
ScopedLock ML(Region->MMLock);
{
Expand Down Expand Up @@ -690,9 +690,6 @@ template <typename Config> class SizeClassAllocator64 {
// BatchClass hasn't enabled memory group. Use `0` to indicate there's no
// memory group here.
BG->CompactPtrGroupBase = 0;
// `BG` is also the block of BatchClassId. Note that this is different
// from `CreateGroup` in `pushBlocksImpl`
BG->PushedBlocks = 1;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
Expand All @@ -717,9 +714,6 @@ template <typename Config> class SizeClassAllocator64 {
TB->add(
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
--Size;
DCHECK_EQ(BG->PushedBlocks, 1U);
// `TB` is also the block of BatchClassId.
BG->PushedBlocks += 1;
BG->Batches.push_front(TB);
}

Expand All @@ -746,8 +740,6 @@ template <typename Config> class SizeClassAllocator64 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}

BG->PushedBlocks += Size;
}

// Push the blocks to their batch group. The layout will be like,
Expand Down Expand Up @@ -782,7 +774,6 @@ template <typename Config> class SizeClassAllocator64 {

BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;

Expand Down Expand Up @@ -810,8 +801,6 @@ template <typename Config> class SizeClassAllocator64 {
CurBatch->appendFromArray(&Array[I], AppendSize);
I += AppendSize;
}

BG->PushedBlocks += Size;
};

Region->FreeListInfo.PushedBlocks += Size;
Expand Down Expand Up @@ -884,7 +873,7 @@ template <typename Config> class SizeClassAllocator64 {
while (true) {
// We only expect one thread doing the freelist refillment and other
// threads will be waiting for either the completion of the
// `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
// `populateFreeListAndPopBlocks()` or `pushBlocks()` called by other
// threads.
bool PopulateFreeList = false;
{
Expand Down Expand Up @@ -922,7 +911,7 @@ template <typename Config> class SizeClassAllocator64 {
// At here, there are two preconditions to be met before waiting,
// 1. The freelist is empty.
// 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
// `populateFreeListAndPopBatch()`.
// `populateFreeListAndPopBlocks()`.
//
// Note that it has the chance that freelist is empty but
// Region->isPopulatingFreeList == false because all the new populated
Expand All @@ -938,8 +927,8 @@ template <typename Config> class SizeClassAllocator64 {

// Now the freelist is empty and someone's doing the refillment. We will
// wait until anyone refills the freelist or someone finishes doing
// `populateFreeListAndPopBatch()`. The refillment can be done by
// `populateFreeListAndPopBatch()`, `pushBlocks()`,
// `populateFreeListAndPopBlocks()`. The refillment can be done by
// `populateFreeListAndPopBlocks()`, `pushBlocks()`,
// `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
Region->FLLockCV.wait(Region->FLLock);

Expand Down Expand Up @@ -1119,8 +1108,8 @@ template <typename Config> class SizeClassAllocator64 {

// Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
// the requests from `PushBlocks` and `PopBatch` which are external
// interfaces. `populateFreeListAndPopBatch` is the internal interface so we
// should set the values back to avoid incorrectly setting the stats.
// interfaces. `populateFreeListAndPopBlocks` is the internal interface so
// we should set the values back to avoid incorrectly setting the stats.
Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;

const uptr AllocatedUser = Size * NumberOfBlocks;
Expand Down Expand Up @@ -1689,7 +1678,6 @@ template <typename Config> class SizeClassAllocator64 {
GroupsToRelease.pop_front();

if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
BG->PushedBlocks += Cur->PushedBlocks;
// We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
// collecting the `GroupsToRelease`.
BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
Expand Down
Loading