@@ -64,32 +64,8 @@ template <typename Config> class SizeClassAllocator64 {
64
64
65
65
void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
66
66
DCHECK (isAligned (reinterpret_cast <uptr>(this ), alignof (ThisT)));
67
- DCHECK_EQ (PrimaryBase, 0U );
68
-
69
- // Reserve the space required for the Primary.
70
- PrimaryBase = reinterpret_cast <uptr>(map (
71
- nullptr , PrimarySize, " scudo:primary_reserve" , MAP_NOACCESS, &Data));
72
67
73
- u32 Seed;
74
- const u64 Time = getMonotonicTimeFast ();
75
- if (!getRandom (reinterpret_cast <void *>(&Seed), sizeof (Seed)))
76
- Seed = static_cast <u32 >(Time ^ (PrimaryBase >> 12 ));
77
68
const uptr PageSize = getPageSizeCached ();
78
- for (uptr I = 0 ; I < NumClasses; I++) {
79
- RegionInfo *Region = getRegionInfo (I);
80
- // The actual start of a region is offset by a random number of pages
81
- // when PrimaryEnableRandomOffset is set.
82
- Region->RegionBeg = (PrimaryBase + (I << Config::PrimaryRegionSizeLog)) +
83
- (Config::PrimaryEnableRandomOffset
84
- ? ((getRandomModN (&Seed, 16 ) + 1 ) * PageSize)
85
- : 0 );
86
- Region->RandState = getRandomU32 (&Seed);
87
- Region->ReleaseInfo .LastReleaseAtNs = Time;
88
- }
89
- shuffle (RegionInfoArray, NumClasses, &Seed);
90
-
91
- setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
92
-
93
69
const uptr GroupSize = (1U << GroupSizeLog);
94
70
const uptr PagesInGroup = GroupSize / PageSize;
95
71
const uptr MinSizeClass = getSizeByClassId (1 );
@@ -126,6 +102,37 @@ template <typename Config> class SizeClassAllocator64 {
126
102
// use its size of in-use blocks as a heuristic.
127
103
SmallerBlockReleasePageDelta =
128
104
PagesInGroup * (1 + MinSizeClass / 16U ) / 100 ;
105
+
106
+ DCHECK_EQ (PrimaryBase, 0U );
107
+ // Reserve the space required for the Primary.
108
+ PrimaryBase = reinterpret_cast <uptr>(map (
109
+ nullptr , PrimarySize, " scudo:primary_reserve" , MAP_NOACCESS, &Data));
110
+
111
+ u32 Seed;
112
+ const u64 Time = getMonotonicTimeFast ();
113
+ if (!getRandom (reinterpret_cast <void *>(&Seed), sizeof (Seed)))
114
+ Seed = static_cast <u32 >(Time ^ (PrimaryBase >> 12 ));
115
+
116
+ for (uptr I = 0 ; I < NumClasses; I++) {
117
+ RegionInfo *Region = getRegionInfo (I);
118
+ // The actual start of a region is offset by a random number of pages
119
+ // when PrimaryEnableRandomOffset is set.
120
+ Region->RegionBeg = (PrimaryBase + (I << Config::PrimaryRegionSizeLog)) +
121
+ (Config::PrimaryEnableRandomOffset
122
+ ? ((getRandomModN (&Seed, 16 ) + 1 ) * PageSize)
123
+ : 0 );
124
+ Region->RandState = getRandomU32 (&Seed);
125
+ // Releasing small blocks is expensive, set a higher threshold to avoid
126
+ // frequent page releases.
127
+ if (isSmallBlock (getSizeByClassId (I)))
128
+ Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
129
+ else
130
+ Region->TryReleaseThreshold = PageSize;
131
+ Region->ReleaseInfo .LastReleaseAtNs = Time;
132
+ }
133
+ shuffle (RegionInfoArray, NumClasses, &Seed);
134
+
135
+ setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
129
136
}
130
137
131
138
void unmapTestOnly () NO_THREAD_SAFETY_ANALYSIS {
@@ -440,6 +447,8 @@ template <typename Config> class SizeClassAllocator64 {
440
447
uptr MappedUser GUARDED_BY (Mutex) = 0;
441
448
// Bytes allocated for user memory.
442
449
uptr AllocatedUser GUARDED_BY (Mutex) = 0;
450
+ // The minimum size of pushed blocks to trigger page release.
451
+ uptr TryReleaseThreshold GUARDED_BY (Mutex) = 0;
443
452
MapPlatformData Data GUARDED_BY (Mutex) = {};
444
453
ReleaseToOsInfo ReleaseInfo GUARDED_BY (Mutex) = {};
445
454
bool Exhausted GUARDED_BY (Mutex) = false;
@@ -486,6 +495,11 @@ template <typename Config> class SizeClassAllocator64 {
486
495
return Base + (CompactPtrGroupBase << CompactPtrScale);
487
496
}
488
497
498
+ ALWAYS_INLINE static bool isSmallBlock (uptr BlockSize) {
499
+ const uptr PageSize = getPageSizeCached ();
500
+ return BlockSize < PageSize / 16U ;
501
+ }
502
+
489
503
// Push the blocks to their batch group. The layout will be like,
490
504
//
491
505
// FreeList - > BG -> BG -> BG
@@ -823,14 +837,15 @@ template <typename Config> class SizeClassAllocator64 {
823
837
return 0 ; // Nothing new to release.
824
838
825
839
const bool CheckDensity =
826
- BlockSize < PageSize / 16U && ReleaseType != ReleaseToOS::ForceAll;
840
+ isSmallBlock ( BlockSize) && ReleaseType != ReleaseToOS::ForceAll;
827
841
// Releasing smaller blocks is expensive, so we want to make sure that a
828
842
// significant amount of bytes are free, and that there has been a good
829
843
// amount of batches pushed to the freelist before attempting to release.
830
844
if (CheckDensity) {
831
845
if (ReleaseType == ReleaseToOS::Normal &&
832
- BytesPushed < Region->AllocatedUser / 16U )
846
+ BytesPushed < Region->TryReleaseThreshold ) {
833
847
return 0 ;
848
+ }
834
849
}
835
850
836
851
if (ReleaseType == ReleaseToOS::Normal) {
@@ -865,11 +880,18 @@ template <typename Config> class SizeClassAllocator64 {
865
880
// of groups.
866
881
uptr NumberOfBatchGroups = Region->FreeList .size ();
867
882
883
+ // We are examining each group and will take the minimum distance to the
884
+ // release threshold as the next Region::TryReleaseThreshold(). Note that if
885
+ // the size of free blocks has reached the release threshold, the distance
886
+ // to the next release will be PageSize * SmallerBlockReleasePageDelta. See
887
+ // the comment on `SmallerBlockReleasePageDelta` for more details.
888
+ uptr MinDistToThreshold = GroupSize;
889
+
868
890
for (BatchGroup *BG = Region->FreeList .front (), *Prev = nullptr ;
869
891
BG != nullptr ;) {
870
892
const uptr PushedBytesDelta =
871
- BG->PushedBlocks - BG->PushedBlocksAtLastCheckpoint ;
872
- if (PushedBytesDelta * BlockSize < PageSize) {
893
+ ( BG->PushedBlocks - BG->PushedBlocksAtLastCheckpoint ) * BlockSize ;
894
+ if (PushedBytesDelta < PageSize) {
873
895
Prev = BG;
874
896
BG = BG->Next ;
875
897
continue ;
@@ -913,16 +935,38 @@ template <typename Config> class SizeClassAllocator64 {
913
935
// that this heuristic only applies when all the spaces in a BatchGroup
914
936
// are allocated.
915
937
if (CheckDensity) {
916
- const bool HighDensity = (BytesInBG * 100U ) / AllocatedGroupSize >=
917
- (100U - 1U - BlockSize / 16U );
938
+ const uptr ReleaseThreshold =
939
+ (AllocatedGroupSize * (100 - 1U - BlockSize / 16U )) / 100U ;
940
+ const bool HighDensity = BytesInBG >= ReleaseThreshold;
918
941
const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize);
919
942
// If all blocks in the group are released, we will do range marking
920
943
// which is fast. Otherwise, we will wait until we have accumulated
921
944
// a certain amount of free memory.
922
945
const bool ReachReleaseDelta =
923
- MayHaveReleasedAll ? true
924
- : PushedBytesDelta * BlockSize >=
925
- PageSize * SmallerBlockReleasePageDelta;
946
+ MayHaveReleasedAll
947
+ ? true
948
+ : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta;
949
+
950
+ if (!HighDensity) {
951
+ DCHECK_LE (BytesInBG, ReleaseThreshold);
952
+ // The following is the usage of a memroy group,
953
+ //
954
+ // BytesInBG ReleaseThreshold
955
+ // / \ v
956
+ // +---+---------------------------+-----+
957
+ // | | | | |
958
+ // +---+---------------------------+-----+
959
+ // \ / ^
960
+ // PushedBytesDelta GroupEnd
961
+ MinDistToThreshold =
962
+ Min (MinDistToThreshold,
963
+ ReleaseThreshold - BytesInBG + PushedBytesDelta);
964
+ } else {
965
+ // If it reaches high density at this round, the next time we will try
966
+ // to release is based on SmallerBlockReleasePageDelta
967
+ MinDistToThreshold =
968
+ Min (MinDistToThreshold, PageSize * SmallerBlockReleasePageDelta);
969
+ }
926
970
927
971
if (!HighDensity || !ReachReleaseDelta) {
928
972
Prev = BG;
@@ -976,6 +1020,16 @@ template <typename Config> class SizeClassAllocator64 {
976
1020
GroupToRelease.push_back (Cur);
977
1021
}
978
1022
1023
+ // Only small blocks have the adaptive `TryReleaseThreshold`.
1024
+ if (isSmallBlock (BlockSize)) {
1025
+ // If the MinDistToThreshold is not updated, that means each memory group
1026
+ // may have only pushed less than a page size. In that case, just set it
1027
+ // back to normal.
1028
+ if (MinDistToThreshold == GroupSize)
1029
+ MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta;
1030
+ Region->TryReleaseThreshold = MinDistToThreshold;
1031
+ }
1032
+
979
1033
if (GroupToRelease.empty ())
980
1034
return 0 ;
981
1035
0 commit comments