Skip to content

Commit fe0cb7b

Browse files
committed
[scudo] Move the blocks marking logic into markFreeBlocks()
This change is only in SizeClassAllocator32. SizeClassAllocator64 has it implemented. Reviewed By: cferris Differential Revision: https://reviews.llvm.org/D158455
1 parent 56241b6 commit fe0cb7b

File tree

1 file changed

+98
-80
lines changed

1 file changed

+98
-80
lines changed

compiler-rt/lib/scudo/standalone/primary32.h

Lines changed: 98 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -866,7 +866,6 @@ template <typename Config> class SizeClassAllocator32 {
866866
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
867867
REQUIRES(Sci->Mutex) {
868868
const uptr BlockSize = getSizeByClassId(ClassId);
869-
const uptr PageSize = getPageSizeCached();
870869

871870
DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
872871
const uptr BytesInFreeList =
@@ -878,7 +877,7 @@ template <typename Config> class SizeClassAllocator32 {
878877
return 0;
879878

880879
// ====================================================================== //
881-
// Check if we have enough free blocks and if it's worth doing a page
880+
// 1. Check if we have enough free blocks and if it's worth doing a page
882881
// release.
883882
// ====================================================================== //
884883
if (ReleaseType != ReleaseToOS::ForceAll &&
@@ -894,88 +893,20 @@ template <typename Config> class SizeClassAllocator32 {
894893
uptr TotalReleasedBytes = 0;
895894
const uptr Base = First * RegionSize;
896895
const uptr NumberOfRegions = Last - First + 1U;
897-
const uptr GroupSize = (1UL << GroupSizeLog);
898-
const uptr CurGroupBase =
899-
compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
900-
901-
ReleaseRecorder Recorder(Base);
902-
PageReleaseContext Context(BlockSize, NumberOfRegions,
903-
/*ReleaseSize=*/RegionSize);
904-
905-
auto DecompactPtr = [](CompactPtrT CompactPtr) {
906-
return reinterpret_cast<uptr>(CompactPtr);
907-
};
908-
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
909-
const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
910-
// The `GroupSize` may not be divided by `BlockSize`, which means there is
911-
// an unused space at the end of Region. Exclude that space to avoid
912-
// unused page map entry.
913-
uptr AllocatedGroupSize = GroupBase == CurGroupBase
914-
? Sci->CurrentRegionAllocated
915-
: roundDownSlow(GroupSize, BlockSize);
916-
if (AllocatedGroupSize == 0)
917-
continue;
918-
919-
// TransferBatches are pushed in front of BG.Batches. The first one may
920-
// not have all caches used.
921-
const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
922-
BG.Batches.front()->getCount();
923-
const uptr BytesInBG = NumBlocks * BlockSize;
924-
925-
if (ReleaseType != ReleaseToOS::ForceAll) {
926-
if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
927-
BG.BytesInBGAtLastCheckpoint = BytesInBG;
928-
continue;
929-
}
930-
931-
const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
932-
if (PushedBytesDelta < PageSize)
933-
continue;
934-
935-
// Given the randomness property, we try to release the pages only if
936-
// the bytes used by free blocks exceed certain proportion of allocated
937-
// spaces.
938-
if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
939-
(100U - 1U - BlockSize / 16U)) {
940-
continue;
941-
}
942-
}
943-
944-
// TODO: Consider updating this after page release if `ReleaseRecorder`
945-
// can tell the releasd bytes in each group.
946-
BG.BytesInBGAtLastCheckpoint = BytesInBG;
947-
948-
const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
949-
const uptr RegionIndex = (GroupBase - Base) / RegionSize;
950-
951-
if (NumBlocks == MaxContainedBlocks) {
952-
for (const auto &It : BG.Batches)
953-
for (u16 I = 0; I < It.getCount(); ++I)
954-
DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
955-
956-
const uptr To = GroupBase + AllocatedGroupSize;
957-
Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
958-
AllocatedGroupSize);
959-
} else {
960-
DCHECK_LT(NumBlocks, MaxContainedBlocks);
961-
962-
// Note that we don't always visit blocks in each BatchGroup so that we
963-
// may miss the chance of releasing certain pages that cross
964-
// BatchGroups.
965-
Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
966-
RegionIndex, AllocatedGroupSize,
967-
/*MayContainLastBlockInRegion=*/true);
968-
}
969-
970-
// We may not be able to do the page release In a rare case that we may
971-
// fail on PageMap allocation.
972-
if (UNLIKELY(!Context.hasBlockMarked()))
973-
return 0;
974-
}
975896

897+
// ==================================================================== //
898+
// 2. Mark the free blocks and we can tell which pages are in-use by
899+
// querying `PageReleaseContext`.
900+
// ==================================================================== //
901+
PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
902+
NumberOfRegions, ReleaseType);
976903
if (!Context.hasBlockMarked())
977904
return 0;
978905

906+
// ==================================================================== //
907+
// 3. Release the unused physical pages back to the OS.
908+
// ==================================================================== //
909+
ReleaseRecorder Recorder(Base);
979910
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
980911
ScopedLock L(ByteMapMutex);
981912
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
@@ -1054,6 +985,93 @@ template <typename Config> class SizeClassAllocator32 {
1054985
return true;
1055986
}
1056987

988+
PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
989+
const uptr BlockSize, const uptr Base,
990+
const uptr NumberOfRegions,
991+
ReleaseToOS ReleaseType)
992+
REQUIRES(Sci->Mutex) {
993+
const uptr PageSize = getPageSizeCached();
994+
const uptr GroupSize = (1UL << GroupSizeLog);
995+
const uptr CurGroupBase =
996+
compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
997+
998+
PageReleaseContext Context(BlockSize, NumberOfRegions,
999+
/*ReleaseSize=*/RegionSize);
1000+
1001+
auto DecompactPtr = [](CompactPtrT CompactPtr) {
1002+
return reinterpret_cast<uptr>(CompactPtr);
1003+
};
1004+
for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
1005+
const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
1006+
// The `GroupSize` may not be divided by `BlockSize`, which means there is
1007+
// an unused space at the end of Region. Exclude that space to avoid
1008+
// unused page map entry.
1009+
uptr AllocatedGroupSize = GroupBase == CurGroupBase
1010+
? Sci->CurrentRegionAllocated
1011+
: roundDownSlow(GroupSize, BlockSize);
1012+
if (AllocatedGroupSize == 0)
1013+
continue;
1014+
1015+
// TransferBatches are pushed in front of BG.Batches. The first one may
1016+
// not have all caches used.
1017+
const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1018+
BG.Batches.front()->getCount();
1019+
const uptr BytesInBG = NumBlocks * BlockSize;
1020+
1021+
if (ReleaseType != ReleaseToOS::ForceAll) {
1022+
if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
1023+
BG.BytesInBGAtLastCheckpoint = BytesInBG;
1024+
continue;
1025+
}
1026+
1027+
const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
1028+
if (PushedBytesDelta < PageSize)
1029+
continue;
1030+
1031+
// Given the randomness property, we try to release the pages only if
1032+
// the bytes used by free blocks exceed certain proportion of allocated
1033+
// spaces.
1034+
if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
1035+
(100U - 1U - BlockSize / 16U)) {
1036+
continue;
1037+
}
1038+
}
1039+
1040+
// TODO: Consider updating this after page release if `ReleaseRecorder`
1041+
// can tell the released bytes in each group.
1042+
BG.BytesInBGAtLastCheckpoint = BytesInBG;
1043+
1044+
const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
1045+
const uptr RegionIndex = (GroupBase - Base) / RegionSize;
1046+
1047+
if (NumBlocks == MaxContainedBlocks) {
1048+
for (const auto &It : BG.Batches)
1049+
for (u16 I = 0; I < It.getCount(); ++I)
1050+
DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
1051+
1052+
const uptr To = GroupBase + AllocatedGroupSize;
1053+
Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
1054+
AllocatedGroupSize);
1055+
} else {
1056+
DCHECK_LT(NumBlocks, MaxContainedBlocks);
1057+
1058+
// Note that we don't always visit blocks in each BatchGroup so that we
1059+
// may miss the chance of releasing certain pages that cross
1060+
// BatchGroups.
1061+
Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
1062+
RegionIndex, AllocatedGroupSize,
1063+
/*MayContainLastBlockInRegion=*/true);
1064+
}
1065+
1066+
// We may not be able to do the page release In a rare case that we may
1067+
// fail on PageMap allocation.
1068+
if (UNLIKELY(!Context.hasBlockMarked()))
1069+
break;
1070+
}
1071+
1072+
return Context;
1073+
}
1074+
10571075
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
10581076

10591077
HybridMutex ByteMapMutex;

0 commit comments

Comments
 (0)