10
10
// Temporary solution for disabling memory poisoning. This is needed because
11
11
// AddressSanitizer does not support memory poisoning for GPU allocations.
12
12
// More info: https://github.com/oneapi-src/unified-memory-framework/issues/634
13
+
14
+ // TODO - add a param to disjoint pool to disable memory poisoning
13
15
#ifndef POISON_MEMORY
14
16
#undef __SANITIZE_ADDRESS__
15
17
#endif
@@ -94,9 +96,6 @@ static slab_t *create_slab(bucket_t *bucket) {
94
96
goto free_slab_chunks ;
95
97
}
96
98
97
- // TODO
98
- // ASSERT_IS_ALIGNED((uintptr_t)slab->mem_ptr, bucket->size);
99
-
100
99
// raw allocation is not available for user so mark it as inaccessible
101
100
utils_annotate_memory_inaccessible (slab -> mem_ptr , slab -> slab_size );
102
101
@@ -178,7 +177,8 @@ static void slab_free_chunk(slab_t *slab, void *ptr) {
178
177
// Even if the pointer p was previously aligned, it's still inside the
179
178
// corresponding chunk, so we get the correct index here.
180
179
size_t chunk_idx =
181
- ((uintptr_t )ptr - (uintptr_t )slab -> mem_ptr ) / slab -> bucket -> size ;
180
+ floor ((double )((uintptr_t )ptr - (uintptr_t )slab -> mem_ptr ) /
181
+ slab -> bucket -> size );
182
182
183
183
// Make sure that the chunk was allocated
184
184
assert (slab -> chunks [chunk_idx ] && "double free detected" );
@@ -738,8 +738,6 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
738
738
}
739
739
}
740
740
741
- utils_mutex_unlock (& bucket -> bucket_lock );
742
-
743
741
if (disjoint_pool -> params .pool_trace > 2 ) {
744
742
LOG_DEBUG ("Allocated %8zu %s bytes aligned at %zu from %s -> %p" , size ,
745
743
disjoint_pool -> params .name , alignment ,
@@ -749,6 +747,8 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
749
747
void * aligned_ptr = (void * )ALIGN_UP_SAFE ((size_t )ptr , alignment );
750
748
VALGRIND_DO_MEMPOOL_ALLOC (disjoint_pool , aligned_ptr , size );
751
749
utils_annotate_memory_undefined (aligned_ptr , size );
750
+
751
+ utils_mutex_unlock (& bucket -> bucket_lock );
752
752
return aligned_ptr ;
753
753
}
754
754
@@ -804,10 +804,20 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
804
804
805
805
bucket_t * bucket = slab -> bucket ;
806
806
807
- VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
808
807
utils_mutex_lock (& bucket -> bucket_lock );
809
808
810
- utils_annotate_memory_inaccessible (ptr , bucket -> size );
809
+ // TODO valgrind
810
+ VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
811
+
812
+ // Get the unalgined pointer
813
+ // NOTE: the base pointer slab->mem_ptr needn't to be aligned to bucket size
814
+ size_t chunk_idx =
815
+ floor ((double )((uintptr_t )ptr - (uintptr_t )slab -> mem_ptr ) /
816
+ slab -> bucket -> size );
817
+ void * unaligned_ptr =
818
+ (void * )((uintptr_t )slab -> mem_ptr + chunk_idx * slab -> bucket -> size );
819
+
820
+ utils_annotate_memory_inaccessible (unaligned_ptr , bucket -> size );
811
821
bucket_free_chunk (bucket , ptr , slab , & to_pool );
812
822
813
823
if (disjoint_pool -> params .pool_trace > 1 ) {
0 commit comments