@@ -84,7 +84,6 @@ static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
84
84
// ---------------------|
85
85
// M -- magic value kAllocBegMagic
86
86
// B -- address of ChunkHeader pointing to the first 'H'
87
- static const uptr kAllocBegMagic = 0xCC6E96B9 ;
88
87
89
88
class ChunkHeader {
90
89
public:
@@ -161,13 +160,47 @@ class AsanChunk : public ChunkBase {
161
160
uptr Beg () { return reinterpret_cast <uptr>(this ) + kChunkHeaderSize ; }
162
161
};
163
162
163
+ class LargeChunkHeader {
164
+ static constexpr uptr kAllocBegMagic = 0xCC6E96B9 ;
165
+ atomic_uint64_t magic;
166
+ AsanChunk *chunk_header;
167
+
168
+ public:
169
+ AsanChunk *Get () {
170
+ return atomic_load (&magic, memory_order_acquire) == kAllocBegMagic
171
+ ? chunk_header
172
+ : reinterpret_cast <AsanChunk *>(this );
173
+ }
174
+
175
+ void Set (AsanChunk *p) {
176
+ if (p) {
177
+ chunk_header = p;
178
+ atomic_store (&magic, kAllocBegMagic , memory_order_release);
179
+ return ;
180
+ }
181
+
182
+ u64 old = kAllocBegMagic ;
183
+ if (!atomic_compare_exchange_strong (&magic, &old, 0 ,
184
+ memory_order_release)) {
185
+ CHECK_EQ (old, kAllocBegMagic );
186
+ }
187
+ }
188
+ };
189
+
164
190
struct QuarantineCallback {
165
191
QuarantineCallback (AllocatorCache *cache, BufferedStackTrace *stack)
166
192
: cache_(cache),
167
193
stack_ (stack) {
168
194
}
169
195
170
196
void Recycle (AsanChunk *m) {
197
+ void *p = get_allocator ().GetBlockBegin (m);
198
+ if (p != m) {
199
+ // Clear the magic value, as allocator internals may overwrite the
200
+ // contents of deallocated chunk, confusing GetAsanChunk lookup.
201
+ reinterpret_cast <LargeChunkHeader *>(p)->Set (nullptr );
202
+ }
203
+
171
204
u8 old_chunk_state = CHUNK_QUARANTINE;
172
205
if (!atomic_compare_exchange_strong (&m->chunk_state , &old_chunk_state,
173
206
CHUNK_INVALID, memory_order_acquire)) {
@@ -177,15 +210,6 @@ struct QuarantineCallback {
177
210
PoisonShadow (m->Beg (),
178
211
RoundUpTo (m->UsedSize (), SHADOW_GRANULARITY),
179
212
kAsanHeapLeftRedzoneMagic );
180
- void *p = get_allocator ().GetBlockBegin (m);
181
- if (p != m) {
182
- uptr *alloc_magic = reinterpret_cast <uptr *>(p);
183
- CHECK_EQ (alloc_magic[0 ], kAllocBegMagic );
184
- // Clear the magic value, as allocator internals may overwrite the
185
- // contents of deallocated chunk, confusing GetAsanChunk lookup.
186
- alloc_magic[0 ] = 0 ;
187
- CHECK_EQ (alloc_magic[1 ], reinterpret_cast <uptr>(m));
188
- }
189
213
190
214
// Statistics.
191
215
AsanStats &thread_stats = GetCurrentThreadStats ();
@@ -541,11 +565,6 @@ struct Allocator {
541
565
uptr chunk_beg = user_beg - kChunkHeaderSize ;
542
566
AsanChunk *m = reinterpret_cast <AsanChunk *>(chunk_beg);
543
567
m->alloc_type = alloc_type;
544
- if (alloc_beg != chunk_beg) {
545
- CHECK_LE (alloc_beg + 2 * sizeof (uptr), chunk_beg);
546
- reinterpret_cast <uptr *>(alloc_beg)[0 ] = kAllocBegMagic ;
547
- reinterpret_cast <uptr *>(alloc_beg)[1 ] = chunk_beg;
548
- }
549
568
CHECK (size);
550
569
m->SetUsedSize (size);
551
570
if (using_primary_allocator) {
@@ -591,6 +610,10 @@ struct Allocator {
591
610
#endif
592
611
// Must be the last mutation of metadata in this function.
593
612
atomic_store (&m->chunk_state , CHUNK_ALLOCATED, memory_order_release);
613
+ if (alloc_beg != chunk_beg) {
614
+ CHECK_LE (alloc_beg + sizeof (LargeChunkHeader), chunk_beg);
615
+ reinterpret_cast <LargeChunkHeader *>(alloc_beg)->Set (m);
616
+ }
594
617
ASAN_MALLOC_HOOK (res, size);
595
618
return res;
596
619
}
@@ -763,11 +786,7 @@ struct Allocator {
763
786
uptr *meta = reinterpret_cast <uptr *>(allocator.GetMetaData (alloc_beg));
764
787
p = reinterpret_cast <AsanChunk *>(meta[1 ]);
765
788
} else {
766
- uptr *alloc_magic = reinterpret_cast <uptr *>(alloc_beg);
767
- if (alloc_magic[0 ] == kAllocBegMagic )
768
- p = reinterpret_cast <AsanChunk *>(alloc_magic[1 ]);
769
- else
770
- p = reinterpret_cast <AsanChunk *>(alloc_beg);
789
+ p = reinterpret_cast <LargeChunkHeader *>(alloc_beg)->Get ();
771
790
}
772
791
if (!p)
773
792
return nullptr ;
0 commit comments