5
5
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
6
*/
7
7
8
+ #include <assert.h>
9
+ #include <ctype.h>
10
+ #include <errno.h>
11
+ #include <stdlib.h>
12
+ #include <string.h>
13
+
14
+ #include <umf/memory_pool.h>
15
+ #include <umf/memory_pool_ops.h>
16
+ #include <umf/memory_provider.h>
17
+
18
+ #include "base_alloc_global.h"
8
19
#include "pool_disjoint_internal.h"
20
+ #include "provider/provider_tracking.h"
21
+ #include "uthash/utlist.h"
22
+ #include "utils_common.h"
23
+ #include "utils_log.h"
24
+ #include "utils_math.h"
9
25
10
26
// Temporary solution for disabling memory poisoning. This is needed because
11
27
// AddressSanitizer does not support memory poisoning for GPU allocations.
@@ -94,9 +110,6 @@ static slab_t *create_slab(bucket_t *bucket) {
94
110
goto free_slab_chunks ;
95
111
}
96
112
97
- // TODO
98
- // ASSERT_IS_ALIGNED((uintptr_t)slab->mem_ptr, bucket->size);
99
-
100
113
// raw allocation is not available for user so mark it as inaccessible
101
114
utils_annotate_memory_inaccessible (slab -> mem_ptr , slab -> slab_size );
102
115
@@ -175,10 +188,10 @@ static void slab_free_chunk(slab_t *slab, void *ptr) {
175
188
// Make sure that we're in the right slab
176
189
assert (ptr >= slab_get (slab ) && ptr < slab_get_end (slab ));
177
190
178
- // Even if the pointer p was previously aligned, it's still inside the
179
- // corresponding chunk, so we get the correct index here.
180
- size_t chunk_idx =
181
- (( uintptr_t ) ptr - ( uintptr_t ) slab -> mem_ptr ) / slab -> bucket -> size ;
191
+ // Get the chunk index
192
+ uintptr_t ptr_diff = ( uintptr_t ) ptr - ( uintptr_t ) slab -> mem_ptr ;
193
+ assert (( ptr_diff % slab -> bucket -> size ) == 0 );
194
+ size_t chunk_idx = ptr_diff / slab -> bucket -> size ;
182
195
183
196
// Make sure that the chunk was allocated
184
197
assert (slab -> chunks [chunk_idx ] && "double free detected" );
@@ -738,6 +751,10 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
738
751
}
739
752
}
740
753
754
+ void * aligned_ptr = (void * )ALIGN_UP_SAFE ((size_t )ptr , alignment );
755
+ VALGRIND_DO_MEMPOOL_ALLOC (disjoint_pool , aligned_ptr , size );
756
+ utils_annotate_memory_undefined (aligned_ptr , size );
757
+
741
758
utils_mutex_unlock (& bucket -> bucket_lock );
742
759
743
760
if (disjoint_pool -> params .pool_trace > 2 ) {
@@ -746,9 +763,6 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
746
763
(from_pool ? "pool" : "provider" ), ptr );
747
764
}
748
765
749
- void * aligned_ptr = (void * )ALIGN_UP_SAFE ((size_t )ptr , alignment );
750
- VALGRIND_DO_MEMPOOL_ALLOC (disjoint_pool , aligned_ptr , size );
751
- utils_annotate_memory_undefined (aligned_ptr , size );
752
766
return aligned_ptr ;
753
767
}
754
768
@@ -804,11 +818,18 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
804
818
805
819
bucket_t * bucket = slab -> bucket ;
806
820
807
- VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
808
821
utils_mutex_lock (& bucket -> bucket_lock );
822
+ VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
823
+
824
+ // Get the unaligned pointer
825
+ // NOTE: the base pointer slab->mem_ptr needn't to be aligned to bucket size
826
+ size_t chunk_idx =
827
+ (((uintptr_t )ptr - (uintptr_t )slab -> mem_ptr ) / slab -> bucket -> size );
828
+ void * unaligned_ptr =
829
+ (void * )((uintptr_t )slab -> mem_ptr + chunk_idx * slab -> bucket -> size );
809
830
810
- utils_annotate_memory_inaccessible (ptr , bucket -> size );
811
- bucket_free_chunk (bucket , ptr , slab , & to_pool );
831
+ utils_annotate_memory_inaccessible (unaligned_ptr , bucket -> size );
832
+ bucket_free_chunk (bucket , unaligned_ptr , slab , & to_pool );
812
833
813
834
if (disjoint_pool -> params .pool_trace > 1 ) {
814
835
bucket -> free_count ++ ;
0 commit comments