@@ -866,7 +866,6 @@ template <typename Config> class SizeClassAllocator32 {
866
866
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
867
867
REQUIRES(Sci->Mutex) {
868
868
const uptr BlockSize = getSizeByClassId (ClassId);
869
- const uptr PageSize = getPageSizeCached ();
870
869
871
870
DCHECK_GE (Sci->FreeListInfo .PoppedBlocks , Sci->FreeListInfo .PushedBlocks );
872
871
const uptr BytesInFreeList =
@@ -878,7 +877,7 @@ template <typename Config> class SizeClassAllocator32 {
878
877
return 0 ;
879
878
880
879
// ====================================================================== //
881
- // Check if we have enough free blocks and if it's worth doing a page
880
+ // 1. Check if we have enough free blocks and if it's worth doing a page
882
881
// release.
883
882
// ====================================================================== //
884
883
if (ReleaseType != ReleaseToOS::ForceAll &&
@@ -894,88 +893,20 @@ template <typename Config> class SizeClassAllocator32 {
894
893
uptr TotalReleasedBytes = 0 ;
895
894
const uptr Base = First * RegionSize;
896
895
const uptr NumberOfRegions = Last - First + 1U ;
897
- const uptr GroupSize = (1UL << GroupSizeLog);
898
- const uptr CurGroupBase =
899
- compactPtrGroupBase (compactPtr (ClassId, Sci->CurrentRegion ));
900
-
901
- ReleaseRecorder Recorder (Base);
902
- PageReleaseContext Context (BlockSize, NumberOfRegions,
903
- /* ReleaseSize=*/ RegionSize);
904
-
905
- auto DecompactPtr = [](CompactPtrT CompactPtr) {
906
- return reinterpret_cast <uptr>(CompactPtr);
907
- };
908
- for (BatchGroup &BG : Sci->FreeListInfo .BlockList ) {
909
- const uptr GroupBase = decompactGroupBase (BG.CompactPtrGroupBase );
910
- // The `GroupSize` may not be divided by `BlockSize`, which means there is
911
- // an unused space at the end of Region. Exclude that space to avoid
912
- // unused page map entry.
913
- uptr AllocatedGroupSize = GroupBase == CurGroupBase
914
- ? Sci->CurrentRegionAllocated
915
- : roundDownSlow (GroupSize, BlockSize);
916
- if (AllocatedGroupSize == 0 )
917
- continue ;
918
-
919
- // TransferBatches are pushed in front of BG.Batches. The first one may
920
- // not have all caches used.
921
- const uptr NumBlocks = (BG.Batches .size () - 1 ) * BG.MaxCachedPerBatch +
922
- BG.Batches .front ()->getCount ();
923
- const uptr BytesInBG = NumBlocks * BlockSize;
924
-
925
- if (ReleaseType != ReleaseToOS::ForceAll) {
926
- if (BytesInBG <= BG.BytesInBGAtLastCheckpoint ) {
927
- BG.BytesInBGAtLastCheckpoint = BytesInBG;
928
- continue ;
929
- }
930
-
931
- const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint ;
932
- if (PushedBytesDelta < PageSize)
933
- continue ;
934
-
935
- // Given the randomness property, we try to release the pages only if
936
- // the bytes used by free blocks exceed certain proportion of allocated
937
- // spaces.
938
- if (isSmallBlock (BlockSize) && (BytesInBG * 100U ) / AllocatedGroupSize <
939
- (100U - 1U - BlockSize / 16U )) {
940
- continue ;
941
- }
942
- }
943
-
944
- // TODO: Consider updating this after page release if `ReleaseRecorder`
945
- // can tell the releasd bytes in each group.
946
- BG.BytesInBGAtLastCheckpoint = BytesInBG;
947
-
948
- const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
949
- const uptr RegionIndex = (GroupBase - Base) / RegionSize;
950
-
951
- if (NumBlocks == MaxContainedBlocks) {
952
- for (const auto &It : BG.Batches )
953
- for (u16 I = 0 ; I < It.getCount (); ++I)
954
- DCHECK_EQ (compactPtrGroupBase (It.get (I)), BG.CompactPtrGroupBase );
955
-
956
- const uptr To = GroupBase + AllocatedGroupSize;
957
- Context.markRangeAsAllCounted (GroupBase, To, GroupBase, RegionIndex,
958
- AllocatedGroupSize);
959
- } else {
960
- DCHECK_LT (NumBlocks, MaxContainedBlocks);
961
-
962
- // Note that we don't always visit blocks in each BatchGroup so that we
963
- // may miss the chance of releasing certain pages that cross
964
- // BatchGroups.
965
- Context.markFreeBlocksInRegion (BG.Batches , DecompactPtr, GroupBase,
966
- RegionIndex, AllocatedGroupSize,
967
- /* MayContainLastBlockInRegion=*/ true );
968
- }
969
-
970
- // We may not be able to do the page release In a rare case that we may
971
- // fail on PageMap allocation.
972
- if (UNLIKELY (!Context.hasBlockMarked ()))
973
- return 0 ;
974
- }
975
896
897
+ // ==================================================================== //
898
+ // 2. Mark the free blocks and we can tell which pages are in-use by
899
+ // querying `PageReleaseContext`.
900
+ // ==================================================================== //
901
+ PageReleaseContext Context = markFreeBlocks (Sci, ClassId, BlockSize, Base,
902
+ NumberOfRegions, ReleaseType);
976
903
if (!Context.hasBlockMarked ())
977
904
return 0 ;
978
905
906
+ // ==================================================================== //
907
+ // 3. Release the unused physical pages back to the OS.
908
+ // ==================================================================== //
909
+ ReleaseRecorder Recorder (Base);
979
910
auto SkipRegion = [this , First, ClassId](uptr RegionIndex) {
980
911
ScopedLock L (ByteMapMutex);
981
912
return (PossibleRegions[First + RegionIndex] - 1U ) != ClassId;
@@ -1054,6 +985,93 @@ template <typename Config> class SizeClassAllocator32 {
1054
985
return true ;
1055
986
}
1056
987
988
+ PageReleaseContext markFreeBlocks (SizeClassInfo *Sci, const uptr ClassId,
989
+ const uptr BlockSize, const uptr Base,
990
+ const uptr NumberOfRegions,
991
+ ReleaseToOS ReleaseType)
992
+ REQUIRES(Sci->Mutex) {
993
+ const uptr PageSize = getPageSizeCached ();
994
+ const uptr GroupSize = (1UL << GroupSizeLog);
995
+ const uptr CurGroupBase =
996
+ compactPtrGroupBase (compactPtr (ClassId, Sci->CurrentRegion ));
997
+
998
+ PageReleaseContext Context (BlockSize, NumberOfRegions,
999
+ /* ReleaseSize=*/ RegionSize);
1000
+
1001
+ auto DecompactPtr = [](CompactPtrT CompactPtr) {
1002
+ return reinterpret_cast <uptr>(CompactPtr);
1003
+ };
1004
+ for (BatchGroup &BG : Sci->FreeListInfo .BlockList ) {
1005
+ const uptr GroupBase = decompactGroupBase (BG.CompactPtrGroupBase );
1006
+ // The `GroupSize` may not be divided by `BlockSize`, which means there is
1007
+ // an unused space at the end of Region. Exclude that space to avoid
1008
+ // unused page map entry.
1009
+ uptr AllocatedGroupSize = GroupBase == CurGroupBase
1010
+ ? Sci->CurrentRegionAllocated
1011
+ : roundDownSlow (GroupSize, BlockSize);
1012
+ if (AllocatedGroupSize == 0 )
1013
+ continue ;
1014
+
1015
+ // TransferBatches are pushed in front of BG.Batches. The first one may
1016
+ // not have all caches used.
1017
+ const uptr NumBlocks = (BG.Batches .size () - 1 ) * BG.MaxCachedPerBatch +
1018
+ BG.Batches .front ()->getCount ();
1019
+ const uptr BytesInBG = NumBlocks * BlockSize;
1020
+
1021
+ if (ReleaseType != ReleaseToOS::ForceAll) {
1022
+ if (BytesInBG <= BG.BytesInBGAtLastCheckpoint ) {
1023
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
1024
+ continue ;
1025
+ }
1026
+
1027
+ const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint ;
1028
+ if (PushedBytesDelta < PageSize)
1029
+ continue ;
1030
+
1031
+ // Given the randomness property, we try to release the pages only if
1032
+ // the bytes used by free blocks exceed certain proportion of allocated
1033
+ // spaces.
1034
+ if (isSmallBlock (BlockSize) && (BytesInBG * 100U ) / AllocatedGroupSize <
1035
+ (100U - 1U - BlockSize / 16U )) {
1036
+ continue ;
1037
+ }
1038
+ }
1039
+
1040
+ // TODO: Consider updating this after page release if `ReleaseRecorder`
1041
+ // can tell the released bytes in each group.
1042
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
1043
+
1044
+ const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
1045
+ const uptr RegionIndex = (GroupBase - Base) / RegionSize;
1046
+
1047
+ if (NumBlocks == MaxContainedBlocks) {
1048
+ for (const auto &It : BG.Batches )
1049
+ for (u16 I = 0 ; I < It.getCount (); ++I)
1050
+ DCHECK_EQ (compactPtrGroupBase (It.get (I)), BG.CompactPtrGroupBase );
1051
+
1052
+ const uptr To = GroupBase + AllocatedGroupSize;
1053
+ Context.markRangeAsAllCounted (GroupBase, To, GroupBase, RegionIndex,
1054
+ AllocatedGroupSize);
1055
+ } else {
1056
+ DCHECK_LT (NumBlocks, MaxContainedBlocks);
1057
+
1058
+ // Note that we don't always visit blocks in each BatchGroup so that we
1059
+ // may miss the chance of releasing certain pages that cross
1060
+ // BatchGroups.
1061
+ Context.markFreeBlocksInRegion (BG.Batches , DecompactPtr, GroupBase,
1062
+ RegionIndex, AllocatedGroupSize,
1063
+ /* MayContainLastBlockInRegion=*/ true );
1064
+ }
1065
+
1066
+ // We may not be able to do the page release In a rare case that we may
1067
+ // fail on PageMap allocation.
1068
+ if (UNLIKELY (!Context.hasBlockMarked ()))
1069
+ break ;
1070
+ }
1071
+
1072
+ return Context;
1073
+ }
1074
+
1057
1075
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
1058
1076
1059
1077
HybridMutex ByteMapMutex;
0 commit comments