Skip to content

Add memcheck and asan annotations #275

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ jobs:
-DUMF_BUILD_LIBUMF_POOL_DISJOINT=ON
-DUMF_BUILD_LIBUMF_POOL_JEMALLOC=ON
-DUMF_BUILD_LEVEL_ZERO_PROVIDER=OFF
-DUSE_VALGRIND=1

- name: Build
run: cmake --build ${{github.workspace}}/build --config Debug -j$(nproc)
Expand Down
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ option(USE_ASAN "Enable AddressSanitizer checks" OFF)
option(USE_UBSAN "Enable UndefinedBehaviorSanitizer checks" OFF)
option(USE_TSAN "Enable ThreadSanitizer checks" OFF)
option(USE_MSAN "Enable MemorySanitizer checks" OFF)
option(USE_VALGRIND "Enable Valgrind instrumentation" OFF)

# For using the options listed in the OPTIONS_REQUIRING_CXX variable
# a C++17 compiler is required. Moreover, if these options are not set,
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ List of options provided by CMake:
| USE_UBSAN | Enable UndefinedBehaviorSanitizer checks | ON/OFF | OFF |
| USE_TSAN | Enable ThreadSanitizer checks | ON/OFF | OFF |
| USE_MSAN | Enable MemorySanitizer checks | ON/OFF | OFF |
| USE_VALGRIND | Enable Valgrind instrumentation | ON/OFF | OFF |

## Architecture: memory pools and providers

Expand Down
44 changes: 42 additions & 2 deletions src/base_alloc/base_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "base_alloc_internal.h"
#include "utils_common.h"
#include "utils_concurrency.h"
#include "utils_sanitizers.h"

// minimum size of a single pool of the base allocator
#define MINIMUM_POOL_SIZE (ba_os_get_page_size())
Expand Down Expand Up @@ -80,7 +81,10 @@ static void ba_debug_checks(umf_ba_pool_t *pool) {
umf_ba_chunk_t *next_chunk = pool->metadata.free_list;
while (next_chunk) {
n_free_chunks++;
utils_annotate_memory_defined(next_chunk, sizeof(umf_ba_chunk_t));
umf_ba_chunk_t *tmp = next_chunk;
next_chunk = next_chunk->next;
utils_annotate_memory_inaccessible(tmp, sizeof(umf_ba_chunk_t));
}
assert(n_free_chunks == pool->metadata.n_chunks - pool->metadata.n_allocs);
}
Expand All @@ -89,6 +93,9 @@ static void ba_debug_checks(umf_ba_pool_t *pool) {
// ba_divide_memory_into_chunks - divide given memory into chunks of chunk_size and add them to the free_list
static void ba_divide_memory_into_chunks(umf_ba_pool_t *pool, void *ptr,
size_t size) {
// mark the memory temporarily accessible to perform the division
utils_annotate_memory_undefined(ptr, size);

assert(pool->metadata.free_list == NULL);
assert(size > pool->metadata.chunk_size);

Expand All @@ -112,6 +119,17 @@ static void ba_divide_memory_into_chunks(umf_ba_pool_t *pool, void *ptr,

current_chunk->next = NULL;
pool->metadata.free_list = ptr; // address of the first chunk

// mark the memory as unaccessible again
utils_annotate_memory_inaccessible(ptr, size);
}

static void *ba_os_alloc_annotated(size_t pool_size) {
void *ptr = ba_os_alloc(pool_size);
if (ptr) {
utils_annotate_memory_inaccessible(ptr, pool_size);
}
return ptr;
}

umf_ba_pool_t *umf_ba_create(size_t size) {
Expand All @@ -127,11 +145,14 @@ umf_ba_pool_t *umf_ba_create(size_t size) {

pool_size = ALIGN_UP(pool_size, ba_os_get_page_size());

umf_ba_pool_t *pool = (umf_ba_pool_t *)ba_os_alloc(pool_size);
umf_ba_pool_t *pool = (umf_ba_pool_t *)ba_os_alloc_annotated(pool_size);
if (!pool) {
return NULL;
}

// annotate metadata region as accessible
utils_annotate_memory_undefined(pool, offsetof(umf_ba_pool_t, data));

pool->metadata.pool_size = pool_size;
pool->metadata.chunk_size = chunk_size;
pool->next_pool = NULL; // this is the only pool now
Expand All @@ -141,6 +162,8 @@ umf_ba_pool_t *umf_ba_create(size_t size) {
pool->metadata.n_chunks = 0;
#endif /* NDEBUG */

utils_annotate_memory_defined(pool, offsetof(umf_ba_pool_t, data));

char *data_ptr = (char *)&pool->data;
size_t size_left = pool_size - offsetof(umf_ba_pool_t, data);

Expand All @@ -163,12 +186,16 @@ void *umf_ba_alloc(umf_ba_pool_t *pool) {
util_mutex_lock(&pool->metadata.free_lock);
if (pool->metadata.free_list == NULL) {
umf_ba_next_pool_t *new_pool =
(umf_ba_next_pool_t *)ba_os_alloc(pool->metadata.pool_size);
(umf_ba_next_pool_t *)ba_os_alloc_annotated(
pool->metadata.pool_size);
if (!new_pool) {
util_mutex_unlock(&pool->metadata.free_lock);
return NULL;
}

// annotate metadata region as accessible
utils_annotate_memory_undefined(new_pool, sizeof(umf_ba_next_pool_t));

// add the new pool to the list of pools
new_pool->next_pool = pool->next_pool;
pool->next_pool = new_pool;
Expand All @@ -186,11 +213,20 @@ void *umf_ba_alloc(umf_ba_pool_t *pool) {
}

umf_ba_chunk_t *chunk = pool->metadata.free_list;

// mark the memory defined to read the next ptr, after this is done
// we'll mark the memory as undefined
utils_annotate_memory_defined(chunk, sizeof(chunk));

pool->metadata.free_list = pool->metadata.free_list->next;
pool->metadata.n_allocs++;
#ifndef NDEBUG
ba_debug_checks(pool);
#endif /* NDEBUG */

VALGRIND_DO_MALLOCLIKE_BLOCK(chunk, pool->metadata.chunk_size, 0, 0);
utils_annotate_memory_undefined(chunk, pool->metadata.chunk_size);

util_mutex_unlock(&pool->metadata.free_lock);

return chunk;
Expand Down Expand Up @@ -234,6 +270,10 @@ void umf_ba_free(umf_ba_pool_t *pool, void *ptr) {
#ifndef NDEBUG
ba_debug_checks(pool);
#endif /* NDEBUG */

VALGRIND_DO_FREELIKE_BLOCK(chunk, 0);
utils_annotate_memory_inaccessible(chunk, pool->metadata.chunk_size);

util_mutex_unlock(&pool->metadata.free_lock);
}

Expand Down
23 changes: 21 additions & 2 deletions src/pool/pool_disjoint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include "pool_disjoint.h"
#include "umf.h"
#include "utils_math.h"
#include "utils_sanitizers.h"

typedef struct umf_disjoint_pool_shared_limits_t {
size_t MaxSize;
Expand Down Expand Up @@ -329,6 +330,8 @@ class DisjointPool::AllocImpl {
umf_disjoint_pool_params_t *params)
: MemHandle{hProvider}, params(*params) {

VALGRIND_DO_CREATE_MEMPOOL(this, 0, 0);

// Generate buckets sized such as: 64, 96, 128, 192, ..., CutOff.
// Powers of 2 and the value halfway between the powers of 2.
auto Size1 = this->params.MinBucketSize;
Expand All @@ -352,6 +355,8 @@ class DisjointPool::AllocImpl {
}
}

~AllocImpl() { VALGRIND_DO_DESTROY_MEMPOOL(this); }

void *allocate(size_t Size, size_t Alignment, bool &FromPool);
void *allocate(size_t Size, bool &FromPool);
void deallocate(void *Ptr, bool &ToPool);
Expand Down Expand Up @@ -392,6 +397,7 @@ static void *memoryProviderAlloc(umf_memory_provider_handle_t hProvider,
if (ret != UMF_RESULT_SUCCESS) {
throw MemoryProviderError{ret};
}
utils_annotate_memory_inaccessible(ptr, size);
return ptr;
}

Expand Down Expand Up @@ -798,7 +804,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, bool &FromPool) try {

FromPool = false;
if (Size > getParams().MaxPoolableSize) {
return memoryProviderAlloc(getMemHandle(), Size);
Ptr = memoryProviderAlloc(getMemHandle(), Size);
utils_annotate_memory_undefined(Ptr, Size);
return Ptr;
}

auto &Bucket = findBucket(Size);
Expand All @@ -813,6 +821,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, bool &FromPool) try {
Bucket.countAlloc(FromPool);
}

VALGRIND_DO_MEMPOOL_ALLOC(this, Ptr, Size);
utils_annotate_memory_undefined(Ptr, Bucket.getSize());

return Ptr;
} catch (MemoryProviderError &e) {
umf::getPoolLastStatusRef<DisjointPool>() = e.code;
Expand Down Expand Up @@ -848,7 +859,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, size_t Alignment,
// If not, just request aligned pointer from the system.
FromPool = false;
if (AlignedSize > getParams().MaxPoolableSize) {
return memoryProviderAlloc(getMemHandle(), Size, Alignment);
Ptr = memoryProviderAlloc(getMemHandle(), Size, Alignment);
utils_annotate_memory_undefined(Ptr, Size);
return Ptr;
}

auto &Bucket = findBucket(AlignedSize);
Expand All @@ -863,6 +876,9 @@ void *DisjointPool::AllocImpl::allocate(size_t Size, size_t Alignment,
Bucket.countAlloc(FromPool);
}

VALGRIND_DO_MEMPOOL_ALLOC(this, AlignPtrUp(Ptr, Alignment), Size);
utils_annotate_memory_undefined(AlignPtrUp(Ptr, Alignment), Size);

return AlignPtrUp(Ptr, Alignment);
} catch (MemoryProviderError &e) {
umf::getPoolLastStatusRef<DisjointPool>() = e.code;
Expand Down Expand Up @@ -929,6 +945,9 @@ void DisjointPool::AllocImpl::deallocate(void *Ptr, bool &ToPool) {
Bucket.countFree();
}

VALGRIND_DO_MEMPOOL_FREE(this, Ptr);
utils_annotate_memory_inaccessible(Ptr, Bucket.getSize());

if (Bucket.getSize() <= Bucket.ChunkCutOff()) {
Bucket.freeChunk(Ptr, Slab, ToPool);
} else {
Expand Down
29 changes: 29 additions & 0 deletions src/pool/pool_jemalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
#include "base_alloc_global.h"
#include "utils_common.h"
#include "utils_concurrency.h"
#include "utils_sanitizers.h"

#include <umf/memory_pool.h>
#include <umf/memory_pool_ops.h>
#include <umf/pools/pool_jemalloc.h>
Expand Down Expand Up @@ -72,7 +74,14 @@ static void *arena_extent_alloc(extent_hooks_t *extent_hooks, void *new_addr,
return NULL;
}

#ifndef __SANITIZE_ADDRESS__
// jemalloc might write to new extents in realloc, so we cannot
// mark them as unaccessible under asan
utils_annotate_memory_inaccessible(ptr, size);
#endif

if (*zero) {
utils_annotate_memory_defined(ptr, size);
memset(ptr, 0, size); // TODO: device memory is not accessible by host
}

Expand Down Expand Up @@ -282,6 +291,8 @@ static void *je_malloc(void *pool, size_t size) {
return NULL;
}

VALGRIND_DO_MEMPOOL_ALLOC(pool, ptr, size);

return ptr;
}

Expand All @@ -290,6 +301,7 @@ static umf_result_t je_free(void *pool, void *ptr) {
assert(pool);

if (ptr != NULL) {
VALGRIND_DO_MEMPOOL_FREE(pool, ptr);
dallocx(ptr, MALLOCX_TCACHE_NONE);
}

Expand All @@ -305,6 +317,8 @@ static void *je_calloc(void *pool, size_t num, size_t size) {
return NULL;
}

utils_annotate_memory_defined(ptr, num * size);

memset(ptr, 0, csize); // TODO: device memory is not accessible by host
return ptr;
}
Expand All @@ -314,6 +328,7 @@ static void *je_realloc(void *pool, void *ptr, size_t size) {
if (size == 0 && ptr != NULL) {
dallocx(ptr, MALLOCX_TCACHE_NONE);
TLS_last_allocation_error = UMF_RESULT_SUCCESS;
VALGRIND_DO_MEMPOOL_FREE(pool, ptr);
return NULL;
} else if (ptr == NULL) {
return je_malloc(pool, size);
Expand All @@ -329,6 +344,14 @@ static void *je_realloc(void *pool, void *ptr, size_t size) {
return NULL;
}

if (new_ptr != ptr) {
VALGRIND_DO_MEMPOOL_ALLOC(pool, new_ptr, size);
VALGRIND_DO_MEMPOOL_FREE(pool, ptr);

// memory was copied from old ptr so it's now defined
utils_annotate_memory_defined(new_ptr, size);
}

return new_ptr;
}

Expand All @@ -346,6 +369,8 @@ static void *je_aligned_alloc(void *pool, size_t size, size_t alignment) {
return NULL;
}

VALGRIND_DO_MEMPOOL_ALLOC(pool, ptr, size);

return ptr;
}

Expand Down Expand Up @@ -392,6 +417,8 @@ static umf_result_t je_initialize(umf_memory_provider_handle_t provider,

*out_pool = (umf_memory_pool_handle_t)pool;

VALGRIND_DO_CREATE_MEMPOOL(pool, 0, 0);

return UMF_RESULT_SUCCESS;

err_free_pool:
Expand All @@ -407,6 +434,8 @@ static void je_finalize(void *pool) {
mallctl(cmd, NULL, 0, NULL, 0);
pool_by_arena_index[je_pool->arena_index] = NULL;
umf_ba_global_free(je_pool);

VALGRIND_DO_DESTROY_MEMPOOL(pool);
}

static size_t je_malloc_usable_size(void *pool, void *ptr) {
Expand Down
20 changes: 19 additions & 1 deletion src/utils/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,19 @@ set(UMF_UTILS_SOURCES_WINDOWS
utils_windows_math.c
)

if(USE_VALGRIND)
if (USE_ASAN OR USE_TSAN OR USE_UBSAN OR USE_MSAN)
message(FATAL_ERROR "Cannot use valgrind and sanitizers together")
endif()

if(PkgConfig_FOUND)
pkg_check_modules(VALGRIND valgrind)
endif()
if(NOT VALGRIND_FOUND)
find_package(VALGRIND REQUIRED valgrind)
endif()
endif()

if(LINUX OR MACOSX)
set(UMF_UTILS_SOURCES ${UMF_UTILS_SOURCES_POSIX})
elseif(WINDOWS)
Expand All @@ -30,11 +43,16 @@ add_umf_library(NAME umf_utils

add_library(${PROJECT_NAME}::utils ALIAS umf_utils)

target_include_directories(umf_utils PUBLIC
target_include_directories(umf_utils PUBLIC
${VALGRIND_INCLUDE_DIRS}
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
)

if(USE_VALGRIND)
target_compile_definitions(umf_utils PUBLIC UMF_VG_ENABLED=1)
endif()

install(TARGETS umf_utils
EXPORT ${PROJECT_NAME}-targets)
Loading