Skip to content

Commit 3c87126

Browse files
committed
Use memory tracking annotations in base alloc, disjoint_pool and jemalloc_pool
1 parent 1e3b8dc commit 3c87126

File tree

6 files changed

+102
-4
lines changed

6 files changed

+102
-4
lines changed

src/base_alloc/base_alloc.c

Lines changed: 42 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include "base_alloc_internal.h"
1212
#include "utils_common.h"
1313
#include "utils_concurrency.h"
14+
#include "utils_sanitizers.h"
1415

1516
// minimum size of a single pool of the base allocator
1617
#define MINIMUM_POOL_SIZE (ba_os_get_page_size())
@@ -80,7 +81,10 @@ static void ba_debug_checks(umf_ba_pool_t *pool) {
8081
umf_ba_chunk_t *next_chunk = pool->metadata.free_list;
8182
while (next_chunk) {
8283
n_free_chunks++;
84+
utils_annotate_memory_defined(next_chunk, sizeof(umf_ba_chunk_t));
85+
umf_ba_chunk_t *tmp = next_chunk;
8386
next_chunk = next_chunk->next;
87+
utils_annotate_memory_inaccessible(tmp, sizeof(umf_ba_chunk_t));
8488
}
8589
assert(n_free_chunks == pool->metadata.n_chunks - pool->metadata.n_allocs);
8690
}
@@ -89,6 +93,9 @@ static void ba_debug_checks(umf_ba_pool_t *pool) {
8993
// ba_divide_memory_into_chunks - divide given memory into chunks of chunk_size and add them to the free_list
9094
static void ba_divide_memory_into_chunks(umf_ba_pool_t *pool, void *ptr,
9195
size_t size) {
96+
// mark the memory temporarily accessible to perform the division
97+
utils_annotate_memory_undefined(ptr, size);
98+
9299
assert(pool->metadata.free_list == NULL);
93100
assert(size > pool->metadata.chunk_size);
94101

@@ -112,6 +119,17 @@ static void ba_divide_memory_into_chunks(umf_ba_pool_t *pool, void *ptr,
112119

113120
current_chunk->next = NULL;
114121
pool->metadata.free_list = ptr; // address of the first chunk
122+
123+
// mark the memory as unaccessible again
124+
utils_annotate_memory_inaccessible(ptr, size);
125+
}
126+
127+
static void *ba_os_alloc_annotated(size_t pool_size) {
128+
void *ptr = ba_os_alloc(pool_size);
129+
if (ptr) {
130+
utils_annotate_memory_inaccessible(ptr, pool_size);
131+
}
132+
return ptr;
115133
}
116134

117135
umf_ba_pool_t *umf_ba_create(size_t size) {
@@ -127,11 +145,14 @@ umf_ba_pool_t *umf_ba_create(size_t size) {
127145

128146
pool_size = ALIGN_UP(pool_size, ba_os_get_page_size());
129147

130-
umf_ba_pool_t *pool = (umf_ba_pool_t *)ba_os_alloc(pool_size);
148+
umf_ba_pool_t *pool = (umf_ba_pool_t *)ba_os_alloc_annotated(pool_size);
131149
if (!pool) {
132150
return NULL;
133151
}
134152

153+
// annotate metadata region as accessible
154+
utils_annotate_memory_undefined(pool, offsetof(umf_ba_pool_t, data));
155+
135156
pool->metadata.pool_size = pool_size;
136157
pool->metadata.chunk_size = chunk_size;
137158
pool->next_pool = NULL; // this is the only pool now
@@ -141,6 +162,8 @@ umf_ba_pool_t *umf_ba_create(size_t size) {
141162
pool->metadata.n_chunks = 0;
142163
#endif /* NDEBUG */
143164

165+
utils_annotate_memory_defined(pool, offsetof(umf_ba_pool_t, data));
166+
144167
char *data_ptr = (char *)&pool->data;
145168
size_t size_left = pool_size - offsetof(umf_ba_pool_t, data);
146169

@@ -163,12 +186,16 @@ void *umf_ba_alloc(umf_ba_pool_t *pool) {
163186
util_mutex_lock(&pool->metadata.free_lock);
164187
if (pool->metadata.free_list == NULL) {
165188
umf_ba_next_pool_t *new_pool =
166-
(umf_ba_next_pool_t *)ba_os_alloc(pool->metadata.pool_size);
189+
(umf_ba_next_pool_t *)ba_os_alloc_annotated(
190+
pool->metadata.pool_size);
167191
if (!new_pool) {
168192
util_mutex_unlock(&pool->metadata.free_lock);
169193
return NULL;
170194
}
171195

196+
// annotate metadata region as accessible
197+
utils_annotate_memory_undefined(new_pool, sizeof(umf_ba_next_pool_t));
198+
172199
// add the new pool to the list of pools
173200
new_pool->next_pool = pool->next_pool;
174201
pool->next_pool = new_pool;
@@ -186,11 +213,20 @@ void *umf_ba_alloc(umf_ba_pool_t *pool) {
186213
}
187214

188215
umf_ba_chunk_t *chunk = pool->metadata.free_list;
216+
217+
// mark the memory defined to read the next ptr, after this is done
218+
// we'll mark the memory as undefined
219+
utils_annotate_memory_defined(chunk, sizeof(chunk));
220+
189221
pool->metadata.free_list = pool->metadata.free_list->next;
190222
pool->metadata.n_allocs++;
191223
#ifndef NDEBUG
192224
ba_debug_checks(pool);
193225
#endif /* NDEBUG */
226+
227+
VALGRIND_DO_MALLOCLIKE_BLOCK(chunk, pool->metadata.chunk_size, 0, 0);
228+
utils_annotate_memory_undefined(chunk, pool->metadata.chunk_size);
229+
194230
util_mutex_unlock(&pool->metadata.free_lock);
195231

196232
return chunk;
@@ -234,6 +270,10 @@ void umf_ba_free(umf_ba_pool_t *pool, void *ptr) {
234270
#ifndef NDEBUG
235271
ba_debug_checks(pool);
236272
#endif /* NDEBUG */
273+
274+
VALGRIND_DO_FREELIKE_BLOCK(chunk, 0);
275+
utils_annotate_memory_inaccessible(chunk, pool->metadata.chunk_size);
276+
237277
util_mutex_unlock(&pool->metadata.free_lock);
238278
}
239279

src/pool/pool_disjoint.cpp

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include "pool_disjoint.h"
2727
#include "umf.h"
2828
#include "utils_math.h"
29+
#include "utils_sanitizers.h"
2930

3031
typedef struct umf_disjoint_pool_shared_limits_t {
3132
size_t MaxSize;
@@ -329,6 +330,8 @@ class DisjointPool::AllocImpl {
329330
umf_disjoint_pool_params_t *params)
330331
: MemHandle{hProvider}, params(*params) {
331332

333+
VALGRIND_DO_CREATE_MEMPOOL(this, 0, 0);
334+
332335
// Generate buckets sized such as: 64, 96, 128, 192, ..., CutOff.
333336
// Powers of 2 and the value halfway between the powers of 2.
334337
auto Size1 = this->params.MinBucketSize;
@@ -352,6 +355,8 @@ class DisjointPool::AllocImpl {
352355
}
353356
}
354357

358+
~AllocImpl() { VALGRIND_DO_DESTROY_MEMPOOL(this); }
359+
355360
void *allocate(size_t Size, size_t Alignment, bool &FromPool);
356361
void *allocate(size_t Size, bool &FromPool);
357362
void deallocate(void *Ptr, bool &ToPool);
@@ -392,6 +397,7 @@ static void *memoryProviderAlloc(umf_memory_provider_handle_t hProvider,
392397
if (ret != UMF_RESULT_SUCCESS) {
393398
throw MemoryProviderError{ret};
394399
}
400+
utils_annotate_memory_inaccessible(ptr, size);
395401
return ptr;
396402
}
397403

@@ -798,7 +804,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, bool &FromPool) try {
798804

799805
FromPool = false;
800806
if (Size > getParams().MaxPoolableSize) {
801-
return memoryProviderAlloc(getMemHandle(), Size);
807+
Ptr = memoryProviderAlloc(getMemHandle(), Size);
808+
utils_annotate_memory_undefined(Ptr, Size);
809+
return Ptr;
802810
}
803811

804812
auto &Bucket = findBucket(Size);
@@ -813,6 +821,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, bool &FromPool) try {
813821
Bucket.countAlloc(FromPool);
814822
}
815823

824+
VALGRIND_DO_MEMPOOL_ALLOC(this, Ptr, Size);
825+
utils_annotate_memory_undefined(Ptr, Bucket.getSize());
826+
816827
return Ptr;
817828
} catch (MemoryProviderError &e) {
818829
umf::getPoolLastStatusRef<DisjointPool>() = e.code;
@@ -848,7 +859,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, size_t Alignment,
848859
// If not, just request aligned pointer from the system.
849860
FromPool = false;
850861
if (AlignedSize > getParams().MaxPoolableSize) {
851-
return memoryProviderAlloc(getMemHandle(), Size, Alignment);
862+
Ptr = memoryProviderAlloc(getMemHandle(), Size, Alignment);
863+
utils_annotate_memory_undefined(Ptr, Size);
864+
return Ptr;
852865
}
853866

854867
auto &Bucket = findBucket(AlignedSize);
@@ -863,6 +876,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, size_t Alignment,
863876
Bucket.countAlloc(FromPool);
864877
}
865878

879+
VALGRIND_DO_MEMPOOL_ALLOC(this, AlignPtrUp(Ptr, Alignment), Size);
880+
utils_annotate_memory_undefined(AlignPtrUp(Ptr, Alignment), Size);
881+
866882
return AlignPtrUp(Ptr, Alignment);
867883
} catch (MemoryProviderError &e) {
868884
umf::getPoolLastStatusRef<DisjointPool>() = e.code;
@@ -929,6 +945,9 @@ void DisjointPool::AllocImpl::deallocate(void *Ptr, bool &ToPool) {
929945
Bucket.countFree();
930946
}
931947

948+
VALGRIND_DO_MEMPOOL_FREE(this, Ptr);
949+
utils_annotate_memory_inaccessible(Ptr, Bucket.getSize());
950+
932951
if (Bucket.getSize() <= Bucket.ChunkCutOff()) {
933952
Bucket.freeChunk(Ptr, Slab, ToPool);
934953
} else {

src/pool/pool_jemalloc.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
#include "base_alloc_global.h"
1717
#include "utils_common.h"
1818
#include "utils_concurrency.h"
19+
#include "utils_sanitizers.h"
20+
1921
#include <umf/memory_pool.h>
2022
#include <umf/memory_pool_ops.h>
2123
#include <umf/pools/pool_jemalloc.h>
@@ -72,7 +74,14 @@ static void *arena_extent_alloc(extent_hooks_t *extent_hooks, void *new_addr,
7274
return NULL;
7375
}
7476

77+
#ifndef __SANITIZE_ADDRESS__
78+
// jemalloc might write to new extents in realloc, so we cannot
79+
// mark them as unaccessible under asan
80+
utils_annotate_memory_inaccessible(ptr, size);
81+
#endif
82+
7583
if (*zero) {
84+
utils_annotate_memory_defined(ptr, size);
7685
memset(ptr, 0, size); // TODO: device memory is not accessible by host
7786
}
7887

@@ -282,6 +291,8 @@ static void *je_malloc(void *pool, size_t size) {
282291
return NULL;
283292
}
284293

294+
VALGRIND_DO_MEMPOOL_ALLOC(pool, ptr, size);
295+
285296
return ptr;
286297
}
287298

@@ -290,6 +301,7 @@ static umf_result_t je_free(void *pool, void *ptr) {
290301
assert(pool);
291302

292303
if (ptr != NULL) {
304+
VALGRIND_DO_MEMPOOL_FREE(pool, ptr);
293305
dallocx(ptr, MALLOCX_TCACHE_NONE);
294306
}
295307

@@ -305,6 +317,8 @@ static void *je_calloc(void *pool, size_t num, size_t size) {
305317
return NULL;
306318
}
307319

320+
utils_annotate_memory_defined(ptr, num * size);
321+
308322
memset(ptr, 0, csize); // TODO: device memory is not accessible by host
309323
return ptr;
310324
}
@@ -314,6 +328,7 @@ static void *je_realloc(void *pool, void *ptr, size_t size) {
314328
if (size == 0 && ptr != NULL) {
315329
dallocx(ptr, MALLOCX_TCACHE_NONE);
316330
TLS_last_allocation_error = UMF_RESULT_SUCCESS;
331+
VALGRIND_DO_MEMPOOL_FREE(pool, ptr);
317332
return NULL;
318333
} else if (ptr == NULL) {
319334
return je_malloc(pool, size);
@@ -329,6 +344,14 @@ static void *je_realloc(void *pool, void *ptr, size_t size) {
329344
return NULL;
330345
}
331346

347+
if (new_ptr != ptr) {
348+
VALGRIND_DO_MEMPOOL_ALLOC(pool, new_ptr, size);
349+
VALGRIND_DO_MEMPOOL_FREE(pool, ptr);
350+
351+
// memory was copied from old ptr so it's now defined
352+
utils_annotate_memory_defined(new_ptr, size);
353+
}
354+
332355
return new_ptr;
333356
}
334357

@@ -346,6 +369,8 @@ static void *je_aligned_alloc(void *pool, size_t size, size_t alignment) {
346369
return NULL;
347370
}
348371

372+
VALGRIND_DO_MEMPOOL_ALLOC(pool, ptr, size);
373+
349374
return ptr;
350375
}
351376

@@ -392,6 +417,8 @@ static umf_result_t je_initialize(umf_memory_provider_handle_t provider,
392417

393418
*out_pool = (umf_memory_pool_handle_t)pool;
394419

420+
VALGRIND_DO_CREATE_MEMPOOL(pool, 0, 0);
421+
395422
return UMF_RESULT_SUCCESS;
396423

397424
err_free_pool:
@@ -407,6 +434,8 @@ static void je_finalize(void *pool) {
407434
mallctl(cmd, NULL, 0, NULL, 0);
408435
pool_by_arena_index[je_pool->arena_index] = NULL;
409436
umf_ba_global_free(je_pool);
437+
438+
VALGRIND_DO_DESTROY_MEMPOOL(pool);
410439
}
411440

412441
static size_t je_malloc_usable_size(void *pool, void *ptr) {

test/common/pool.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ bool isReallocSupported(umf_memory_pool_handle_t hPool) {
4545
static constexpr size_t allocSize = 8;
4646
bool supported = false;
4747
auto *ptr = umfPoolMalloc(hPool, allocSize);
48+
memset(ptr, 0, allocSize);
4849
auto *new_ptr = umfPoolRealloc(hPool, ptr, allocSize * 2);
4950

5051
if (new_ptr) {

test/poolFixtures.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ TEST_P(umfPoolTest, reallocFree) {
115115
static constexpr size_t multiplier = 3;
116116
auto *ptr = umfPoolMalloc(pool.get(), allocSize);
117117
ASSERT_NE(ptr, nullptr);
118+
memset(ptr, 0, allocSize);
118119
auto *new_ptr = umfPoolRealloc(pool.get(), ptr, allocSize * multiplier);
119120
ASSERT_NE(new_ptr, nullptr);
120121
std::memset(new_ptr, 0, allocSize * multiplier);

test/supp/umf_test-jemalloc_pool.supp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
{
2+
realloc false-positive suppresion - see pool_jemalloc.c for more details
3+
Memcheck:Addr8
4+
fun:memmove
5+
...
6+
fun:do_rallocx
7+
fun:je_realloc
8+
}

0 commit comments

Comments
 (0)