@@ -288,23 +288,33 @@ static inline bool MaybeUserPointer(uptr p) {
288
288
# endif
289
289
}
290
290
291
+ namespace {
292
+ struct DirectMemoryAccessor {
293
+ void Init (uptr begin, uptr end) {};
294
+ void *LoadPtr (uptr p) const { return *reinterpret_cast <void **>(p); }
295
+ };
296
+ } // namespace
297
+
291
298
// Scans the memory range, looking for byte patterns that point into allocator
292
299
// chunks. Marks those chunks with |tag| and adds them to |frontier|.
293
300
// There are two usage modes for this function: finding reachable chunks
294
301
// (|tag| = kReachable) and finding indirectly leaked chunks
295
302
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
296
303
// so |frontier| = 0.
297
- void ScanRangeForPointers (uptr begin, uptr end, Frontier *frontier,
298
- const char *region_type, ChunkTag tag) {
304
+ template <class Accessor >
305
+ void ScanForPointers (uptr begin, uptr end, Frontier *frontier,
306
+ const char *region_type, ChunkTag tag,
307
+ Accessor &accessor) {
299
308
CHECK (tag == kReachable || tag == kIndirectlyLeaked );
300
309
const uptr alignment = flags ()->pointer_alignment ();
301
310
LOG_POINTERS (" Scanning %s range %p-%p.\n " , region_type, (void *)begin,
302
311
(void *)end);
312
+ accessor.Init (begin, end);
303
313
uptr pp = begin;
304
314
if (pp % alignment)
305
315
pp = pp + alignment - pp % alignment;
306
316
for (; pp + sizeof (void *) <= end; pp += alignment) {
307
- void *p = * reinterpret_cast < void **> (pp);
317
+ void *p = accessor. LoadPtr (pp);
308
318
# if SANITIZER_APPLE
309
319
p = TransformPointer (p);
310
320
# endif
@@ -339,6 +349,12 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
339
349
}
340
350
}
341
351
352
+ void ScanRangeForPointers (uptr begin, uptr end, Frontier *frontier,
353
+ const char *region_type, ChunkTag tag) {
354
+ DirectMemoryAccessor accessor;
355
+ ScanForPointers (begin, end, frontier, region_type, tag, accessor);
356
+ }
357
+
342
358
// Scans a global range for pointers
343
359
void ScanGlobalRange (uptr begin, uptr end, Frontier *frontier) {
344
360
uptr allocator_begin = 0 , allocator_end = 0 ;
@@ -356,14 +372,21 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
356
372
}
357
373
}
358
374
359
- void ScanExtraStackRanges (const InternalMmapVector<Range> &ranges,
360
- Frontier *frontier) {
375
+ template <class Accessor >
376
+ void ScanExtraStack (const InternalMmapVector<Range> &ranges, Frontier *frontier,
377
+ Accessor &accessor) {
361
378
for (uptr i = 0 ; i < ranges.size (); i++) {
362
- ScanRangeForPointers (ranges[i].begin , ranges[i].end , frontier, " FAKE STACK" ,
363
- kReachable );
379
+ ScanForPointers (ranges[i].begin , ranges[i].end , frontier, " FAKE STACK" ,
380
+ kReachable , accessor );
364
381
}
365
382
}
366
383
384
+ void ScanExtraStackRanges (const InternalMmapVector<Range> &ranges,
385
+ Frontier *frontier) {
386
+ DirectMemoryAccessor accessor;
387
+ ScanExtraStack (ranges, frontier, accessor);
388
+ }
389
+
367
390
# if SANITIZER_FUCHSIA
368
391
369
392
// Fuchsia handles all threads together with its own callback.
@@ -399,10 +422,11 @@ static void ProcessThreadRegistry(Frontier *frontier) {
399
422
}
400
423
401
424
// Scans thread data (stacks and TLS) for heap pointers.
425
+ template <class Accessor >
402
426
static void ProcessThread (tid_t os_id, uptr sp,
403
427
const InternalMmapVector<uptr> ®isters,
404
428
InternalMmapVector<Range> &extra_ranges,
405
- Frontier *frontier) {
429
+ Frontier *frontier, Accessor &accessor ) {
406
430
// `extra_ranges` is outside of the function and the loop to reused mapped
407
431
// memory.
408
432
CHECK (extra_ranges.empty ());
@@ -426,8 +450,8 @@ static void ProcessThread(tid_t os_id, uptr sp,
426
450
uptr registers_begin = reinterpret_cast <uptr>(registers.data ());
427
451
uptr registers_end =
428
452
reinterpret_cast <uptr>(registers.data () + registers.size ());
429
- ScanRangeForPointers (registers_begin, registers_end, frontier, " REGISTERS" ,
430
- kReachable );
453
+ ScanForPointers (registers_begin, registers_end, frontier, " REGISTERS" ,
454
+ kReachable , accessor );
431
455
}
432
456
433
457
if (flags ()->use_stacks ) {
@@ -451,9 +475,10 @@ static void ProcessThread(tid_t os_id, uptr sp,
451
475
// Shrink the stack range to ignore out-of-scope values.
452
476
stack_begin = sp;
453
477
}
454
- ScanRangeForPointers (stack_begin, stack_end, frontier, " STACK" , kReachable );
478
+ ScanForPointers (stack_begin, stack_end, frontier, " STACK" , kReachable ,
479
+ accessor);
455
480
GetThreadExtraStackRangesLocked (os_id, &extra_ranges);
456
- ScanExtraStackRanges (extra_ranges, frontier);
481
+ ScanExtraStack (extra_ranges, frontier, accessor );
457
482
}
458
483
459
484
if (flags ()->use_tls ) {
@@ -463,21 +488,23 @@ static void ProcessThread(tid_t os_id, uptr sp,
463
488
// otherwise, only scan the non-overlapping portions
464
489
if (cache_begin == cache_end || tls_end < cache_begin ||
465
490
tls_begin > cache_end) {
466
- ScanRangeForPointers (tls_begin, tls_end, frontier, " TLS" , kReachable );
491
+ ScanForPointers (tls_begin, tls_end, frontier, " TLS" , kReachable ,
492
+ accessor);
467
493
} else {
468
494
if (tls_begin < cache_begin)
469
- ScanRangeForPointers (tls_begin, cache_begin, frontier, " TLS" ,
470
- kReachable );
495
+ ScanForPointers (tls_begin, cache_begin, frontier, " TLS" , kReachable ,
496
+ accessor );
471
497
if (tls_end > cache_end)
472
- ScanRangeForPointers (cache_end, tls_end, frontier, " TLS" , kReachable );
498
+ ScanForPointers (cache_end, tls_end, frontier, " TLS" , kReachable ,
499
+ accessor);
473
500
}
474
501
}
475
502
# if SANITIZER_ANDROID
476
503
auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /* dso_idd*/ ,
477
504
void *arg) -> void {
478
- ScanRangeForPointers (
505
+ ScanForPointers (
479
506
reinterpret_cast <uptr>(dtls_begin), reinterpret_cast <uptr>(dtls_end),
480
- reinterpret_cast <Frontier *>(arg), " DTLS" , kReachable );
507
+ reinterpret_cast <Frontier *>(arg), " DTLS" , kReachable , accessor );
481
508
};
482
509
483
510
// FIXME: There might be a race-condition here (and in Bionic) if the
@@ -492,8 +519,8 @@ static void ProcessThread(tid_t os_id, uptr sp,
492
519
if (dtls_beg < dtls_end) {
493
520
LOG_THREADS (" DTLS %d at %p-%p.\n " , id, (void *)dtls_beg,
494
521
(void *)dtls_end);
495
- ScanRangeForPointers (dtls_beg, dtls_end, frontier, " DTLS" ,
496
- kReachable );
522
+ ScanForPointers (dtls_beg, dtls_end, frontier, " DTLS" , kReachable ,
523
+ accessor );
497
524
}
498
525
});
499
526
} else {
@@ -530,7 +557,8 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
530
557
if (os_id == caller_tid)
531
558
sp = caller_sp;
532
559
533
- ProcessThread (os_id, sp, registers, extra_ranges, frontier);
560
+ DirectMemoryAccessor accessor;
561
+ ProcessThread (os_id, sp, registers, extra_ranges, frontier, accessor);
534
562
}
535
563
536
564
// Add pointers reachable from ThreadContexts
0 commit comments