@@ -72,13 +72,20 @@ static inline void unmap(LargeBlock::Header *H) {
72
72
MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
73
73
}
74
74
75
+ namespace {
76
+ struct CachedBlock {
77
+ uptr CommitBase = 0 ;
78
+ uptr CommitSize = 0 ;
79
+ uptr BlockBegin = 0 ;
80
+ MemMapT MemMap = {};
81
+ u64 Time = 0 ;
82
+ };
83
+ } // namespace
84
+
75
85
template <typename Config> class MapAllocatorNoCache {
76
86
public:
77
87
void init (UNUSED s32 ReleaseToOsInterval) {}
78
- bool retrieve (UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
79
- UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
80
- return false ;
81
- }
88
+ bool retrieve (UNUSED uptr Size, UNUSED CachedBlock &Entry) { return false ; }
82
89
void store (UNUSED Options Options, LargeBlock::Header *H) { unmap (H); }
83
90
bool canCache (UNUSED uptr Size) { return false ; }
84
91
void disable () {}
@@ -248,62 +255,26 @@ template <typename Config> class MapAllocatorCache {
248
255
Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
249
256
}
250
257
251
- bool retrieve (Options Options, uptr Size, uptr Alignment,
252
- LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
253
- const uptr PageSize = getPageSizeCached ();
258
+ bool retrieve (uptr Size, CachedBlock &Entry) EXCLUDES(Mutex) {
254
259
const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
255
260
bool Found = false ;
256
- CachedBlock Entry;
257
- uptr HeaderPos = 0 ;
258
261
{
259
262
ScopedLock L (Mutex);
260
263
if (EntriesCount == 0 )
261
264
return false ;
262
265
for (u32 I = 0 ; I < MaxCount; I++) {
263
- const uptr CommitBase = Entries[I].CommitBase ;
264
- if (!CommitBase)
265
- continue ;
266
- const uptr CommitSize = Entries[I].CommitSize ;
267
- const uptr AllocPos =
268
- roundDown (CommitBase + CommitSize - Size, Alignment);
269
- HeaderPos =
270
- AllocPos - Chunk::getHeaderSize () - LargeBlock::getHeaderSize ();
271
- if (HeaderPos > CommitBase + CommitSize)
266
+ if (!Entries[I].CommitBase )
272
267
continue ;
273
- if (HeaderPos < CommitBase ||
274
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
268
+ if (Size > Entries[I].CommitSize )
275
269
continue ;
276
- }
277
270
Found = true ;
278
271
Entry = Entries[I];
279
272
Entries[I].CommitBase = 0 ;
280
273
EntriesCount--;
281
274
break ;
282
275
}
283
276
}
284
- if (!Found)
285
- return false ;
286
-
287
- *H = reinterpret_cast <LargeBlock::Header *>(
288
- LargeBlock::addHeaderTag<Config>(HeaderPos));
289
- *Zeroed = Entry.Time == 0 ;
290
- if (useMemoryTagging<Config>(Options))
291
- Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
292
- uptr NewBlockBegin = reinterpret_cast <uptr>(*H + 1 );
293
- if (useMemoryTagging<Config>(Options)) {
294
- if (*Zeroed) {
295
- storeTags (LargeBlock::addHeaderTag<Config>(Entry.CommitBase ),
296
- NewBlockBegin);
297
- } else if (Entry.BlockBegin < NewBlockBegin) {
298
- storeTags (Entry.BlockBegin , NewBlockBegin);
299
- } else {
300
- storeTags (untagPointer (NewBlockBegin), untagPointer (Entry.BlockBegin ));
301
- }
302
- }
303
- (*H)->CommitBase = Entry.CommitBase ;
304
- (*H)->CommitSize = Entry.CommitSize ;
305
- (*H)->MemMap = Entry.MemMap ;
306
- return true ;
277
+ return Found;
307
278
}
308
279
309
280
bool canCache (uptr Size) {
@@ -383,14 +354,6 @@ template <typename Config> class MapAllocatorCache {
383
354
}
384
355
}
385
356
386
- struct CachedBlock {
387
- uptr CommitBase = 0 ;
388
- uptr CommitSize = 0 ;
389
- uptr BlockBegin = 0 ;
390
- MemMapT MemMap = {};
391
- u64 Time = 0 ;
392
- };
393
-
394
357
void releaseIfOlderThan (CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
395
358
if (!Entry.CommitBase || !Entry.Time )
396
359
return ;
@@ -476,6 +439,27 @@ template <typename Config> class MapAllocator {
476
439
}
477
440
}
478
441
442
+ inline void setHeader (Options Options, CachedBlock &Entry,
443
+ LargeBlock::Header *H, bool &Zeroed) {
444
+ Zeroed = Entry.Time == 0 ;
445
+ if (useMemoryTagging<Config>(Options)) {
446
+ Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
447
+ // Block begins after the LargeBlock::Header
448
+ uptr NewBlockBegin = reinterpret_cast <uptr>(H + 1 );
449
+ if (Zeroed) {
450
+ storeTags (LargeBlock::addHeaderTag<Config>(Entry.CommitBase ),
451
+ NewBlockBegin);
452
+ } else if (Entry.BlockBegin < NewBlockBegin) {
453
+ storeTags (Entry.BlockBegin , NewBlockBegin);
454
+ } else {
455
+ storeTags (untagPointer (NewBlockBegin), untagPointer (Entry.BlockBegin ));
456
+ }
457
+ }
458
+ H->CommitBase = Entry.CommitBase ;
459
+ H->CommitSize = Entry.CommitSize ;
460
+ H->MemMap = Entry.MemMap ;
461
+ }
462
+
479
463
bool canCache (uptr Size) { return Cache.canCache (Size); }
480
464
481
465
bool setOption (Option O, sptr Value) { return Cache.setOption (O, Value); }
@@ -530,7 +514,15 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
530
514
if (Alignment < PageSize && Cache.canCache (RoundedSize)) {
531
515
LargeBlock::Header *H;
532
516
bool Zeroed;
533
- if (Cache.retrieve (Options, Size, Alignment, &H, &Zeroed)) {
517
+ CachedBlock Entry;
518
+ if (Cache.retrieve (RoundedSize, Entry)) {
519
+ const uptr AllocPos =
520
+ roundDown (Entry.CommitBase + Entry.CommitSize - Size, Alignment);
521
+ const uptr HeaderPos =
522
+ AllocPos - LargeBlock::getHeaderSize () - Chunk::getHeaderSize ();
523
+ H = reinterpret_cast <LargeBlock::Header *>(
524
+ LargeBlock::addHeaderTag<Config>(HeaderPos));
525
+ setHeader (Options, Entry, H, Zeroed);
534
526
const uptr BlockEnd = H->CommitBase + H->CommitSize ;
535
527
if (BlockEndPtr)
536
528
*BlockEndPtr = BlockEnd;
0 commit comments