@@ -312,9 +312,15 @@ template <typename Config> class SizeClassAllocator32 {
312
312
}
313
313
314
314
void getFragmentationInfo (ScopedString *Str) {
315
- // TODO(chiahungduan): Organize the steps in releaseToOSMaybe() into
316
- // functions which make the collection of fragmentation data easier.
317
- Str->append (" Fragmentation Stats: SizeClassAllocator32: Unsupported yet\n " );
315
+ Str->append (
316
+ " Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n " ,
317
+ getPageSizeCached ());
318
+
319
+ for (uptr I = 1 ; I < NumClasses; I++) {
320
+ SizeClassInfo *Sci = getSizeClassInfo (I);
321
+ ScopedLock L (Sci->Mutex );
322
+ getSizeClassFragmentationInfo (Sci, I, Str);
323
+ }
318
324
}
319
325
320
326
bool setOption (Option O, sptr Value) {
@@ -862,6 +868,52 @@ template <typename Config> class SizeClassAllocator32 {
862
868
PushedBytesDelta >> 10 );
863
869
}
864
870
871
+ void getSizeClassFragmentationInfo (SizeClassInfo *Sci, uptr ClassId,
872
+ ScopedString *Str) REQUIRES(Sci->Mutex) {
873
+ const uptr BlockSize = getSizeByClassId (ClassId);
874
+ const uptr First = Sci->MinRegionIndex ;
875
+ const uptr Last = Sci->MaxRegionIndex ;
876
+ const uptr Base = First * RegionSize;
877
+ const uptr NumberOfRegions = Last - First + 1U ;
878
+ auto SkipRegion = [this , First, ClassId](uptr RegionIndex) {
879
+ ScopedLock L (ByteMapMutex);
880
+ return (PossibleRegions[First + RegionIndex] - 1U ) != ClassId;
881
+ };
882
+
883
+ FragmentationRecorder Recorder;
884
+ if (!Sci->FreeListInfo .BlockList .empty ()) {
885
+ PageReleaseContext Context =
886
+ markFreeBlocks (Sci, ClassId, BlockSize, Base, NumberOfRegions,
887
+ ReleaseToOS::ForceAll);
888
+ releaseFreeMemoryToOS (Context, Recorder, SkipRegion);
889
+ }
890
+
891
+ const uptr PageSize = getPageSizeCached ();
892
+ const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
893
+ const uptr InUseBlocks =
894
+ Sci->FreeListInfo .PoppedBlocks - Sci->FreeListInfo .PushedBlocks ;
895
+ uptr AllocatedPagesCount = 0 ;
896
+ if (TotalBlocks != 0U ) {
897
+ for (uptr I = 0 ; I < NumberOfRegions; ++I) {
898
+ if (SkipRegion (I))
899
+ continue ;
900
+ AllocatedPagesCount += RegionSize / PageSize;
901
+ }
902
+
903
+ DCHECK_NE (AllocatedPagesCount, 0U );
904
+ }
905
+
906
+ DCHECK_GE (AllocatedPagesCount, Recorder.getReleasedPagesCount ());
907
+ const uptr InUsePages =
908
+ AllocatedPagesCount - Recorder.getReleasedPagesCount ();
909
+ const uptr InUseBytes = InUsePages * PageSize;
910
+
911
+ Str->append (" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
912
+ " pages: %6zu/%6zu inuse bytes: %6zuK\n " ,
913
+ ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
914
+ AllocatedPagesCount, InUseBytes >> 10 );
915
+ }
916
+
865
917
NOINLINE uptr releaseToOSMaybe (SizeClassInfo *Sci, uptr ClassId,
866
918
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
867
919
REQUIRES(Sci->Mutex) {
0 commit comments