14
14
* The following locks and mutexes are used by kmemleak:
15
15
*
16
16
* - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17
- * accesses to the object_tree_root. The object_list is the main list
18
- * holding the metadata (struct kmemleak_object) for the allocated memory
19
- * blocks. The object_tree_root is a red black tree used to look-up
20
- * metadata based on a pointer to the corresponding memory block. The
21
- * kmemleak_object structures are added to the object_list and
22
- * object_tree_root in the create_object() function called from the
23
- * kmemleak_alloc() callback and removed in delete_object() called from the
24
- * kmemleak_free() callback
17
+ * accesses to the object_tree_root (or object_phys_tree_root). The
18
+ * object_list is the main list holding the metadata (struct kmemleak_object)
19
+ * for the allocated memory blocks. The object_tree_root and object_phys_tree_root
20
+ * are red black trees used to look-up metadata based on a pointer to the
21
+ * corresponding memory block. The object_phys_tree_root is for objects
22
+ * allocated with physical address. The kmemleak_object structures are
23
+ * added to the object_list and object_tree_root (or object_phys_tree_root)
24
+ * in the create_object() function called from the kmemleak_alloc() (or
25
+ * kmemleak_alloc_phys()) callback and removed in delete_object() called from
26
+ * the kmemleak_free() callback
25
27
* - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26
28
* Accesses to the metadata (e.g. count) are protected by this lock. Note
27
29
* that some members of this structure may be protected by other means
@@ -195,7 +197,9 @@ static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
195
197
static LIST_HEAD (mem_pool_free_list );
196
198
/* search tree for object boundaries */
197
199
static struct rb_root object_tree_root = RB_ROOT ;
198
- /* protecting the access to object_list and object_tree_root */
200
+ /* search tree for object (with OBJECT_PHYS flag) boundaries */
201
+ static struct rb_root object_phys_tree_root = RB_ROOT ;
202
+ /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
199
203
static DEFINE_RAW_SPINLOCK (kmemleak_lock );
200
204
201
205
/* allocation caches for kmemleak internal data */
@@ -287,6 +291,9 @@ static void hex_dump_object(struct seq_file *seq,
287
291
const u8 * ptr = (const u8 * )object -> pointer ;
288
292
size_t len ;
289
293
294
+ if (WARN_ON_ONCE (object -> flags & OBJECT_PHYS ))
295
+ return ;
296
+
290
297
/* limit the number of lines to HEX_MAX_LINES */
291
298
len = min_t (size_t , object -> size , HEX_MAX_LINES * HEX_ROW_SIZE );
292
299
@@ -380,9 +387,11 @@ static void dump_object_info(struct kmemleak_object *object)
380
387
* beginning of the memory block are allowed. The kmemleak_lock must be held
381
388
* when calling this function.
382
389
*/
383
- static struct kmemleak_object * lookup_object (unsigned long ptr , int alias )
390
+ static struct kmemleak_object * __lookup_object (unsigned long ptr , int alias ,
391
+ bool is_phys )
384
392
{
385
- struct rb_node * rb = object_tree_root .rb_node ;
393
+ struct rb_node * rb = is_phys ? object_phys_tree_root .rb_node :
394
+ object_tree_root .rb_node ;
386
395
unsigned long untagged_ptr = (unsigned long )kasan_reset_tag ((void * )ptr );
387
396
388
397
while (rb ) {
@@ -408,6 +417,12 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
408
417
return NULL ;
409
418
}
410
419
420
+ /* Look-up a kmemleak object which allocated with virtual address. */
421
+ static struct kmemleak_object * lookup_object (unsigned long ptr , int alias )
422
+ {
423
+ return __lookup_object (ptr , alias , false);
424
+ }
425
+
411
426
/*
412
427
* Increment the object use_count. Return 1 if successful or 0 otherwise. Note
413
428
* that once an object's use_count reached 0, the RCU freeing was already
@@ -517,14 +532,15 @@ static void put_object(struct kmemleak_object *object)
517
532
/*
518
533
* Look up an object in the object search tree and increase its use_count.
519
534
*/
520
- static struct kmemleak_object * find_and_get_object (unsigned long ptr , int alias )
535
+ static struct kmemleak_object * __find_and_get_object (unsigned long ptr , int alias ,
536
+ bool is_phys )
521
537
{
522
538
unsigned long flags ;
523
539
struct kmemleak_object * object ;
524
540
525
541
rcu_read_lock ();
526
542
raw_spin_lock_irqsave (& kmemleak_lock , flags );
527
- object = lookup_object (ptr , alias );
543
+ object = __lookup_object (ptr , alias , is_phys );
528
544
raw_spin_unlock_irqrestore (& kmemleak_lock , flags );
529
545
530
546
/* check whether the object is still available */
@@ -535,28 +551,39 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
535
551
return object ;
536
552
}
537
553
554
+ /* Look up and get an object which allocated with virtual address. */
555
+ static struct kmemleak_object * find_and_get_object (unsigned long ptr , int alias )
556
+ {
557
+ return __find_and_get_object (ptr , alias , false);
558
+ }
559
+
538
560
/*
539
- * Remove an object from the object_tree_root and object_list. Must be called
540
- * with the kmemleak_lock held _if_ kmemleak is still enabled.
561
+ * Remove an object from the object_tree_root (or object_phys_tree_root)
562
+ * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
563
+ * is still enabled.
541
564
*/
542
565
static void __remove_object (struct kmemleak_object * object )
543
566
{
544
- rb_erase (& object -> rb_node , & object_tree_root );
567
+ rb_erase (& object -> rb_node , object -> flags & OBJECT_PHYS ?
568
+ & object_phys_tree_root :
569
+ & object_tree_root );
545
570
list_del_rcu (& object -> object_list );
546
571
}
547
572
548
573
/*
549
574
* Look up an object in the object search tree and remove it from both
550
- * object_tree_root and object_list. The returned object's use_count should be
551
- * at least 1, as initially set by create_object().
575
+ * object_tree_root (or object_phys_tree_root) and object_list. The
576
+ * returned object's use_count should be at least 1, as initially set
577
+ * by create_object().
552
578
*/
553
- static struct kmemleak_object * find_and_remove_object (unsigned long ptr , int alias )
579
+ static struct kmemleak_object * find_and_remove_object (unsigned long ptr , int alias ,
580
+ bool is_phys )
554
581
{
555
582
unsigned long flags ;
556
583
struct kmemleak_object * object ;
557
584
558
585
raw_spin_lock_irqsave (& kmemleak_lock , flags );
559
- object = lookup_object (ptr , alias );
586
+ object = __lookup_object (ptr , alias , is_phys );
560
587
if (object )
561
588
__remove_object (object );
562
589
raw_spin_unlock_irqrestore (& kmemleak_lock , flags );
@@ -574,7 +601,8 @@ static int __save_stack_trace(unsigned long *trace)
574
601
575
602
/*
576
603
* Create the metadata (struct kmemleak_object) corresponding to an allocated
577
- * memory block and add it to the object_list and object_tree_root.
604
+ * memory block and add it to the object_list and object_tree_root (or
605
+ * object_phys_tree_root).
578
606
*/
579
607
static struct kmemleak_object * __create_object (unsigned long ptr , size_t size ,
580
608
int min_count , gfp_t gfp ,
@@ -631,9 +659,16 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
631
659
raw_spin_lock_irqsave (& kmemleak_lock , flags );
632
660
633
661
untagged_ptr = (unsigned long )kasan_reset_tag ((void * )ptr );
634
- min_addr = min (min_addr , untagged_ptr );
635
- max_addr = max (max_addr , untagged_ptr + size );
636
- link = & object_tree_root .rb_node ;
662
+ /*
663
+ * Only update min_addr and max_addr with object
664
+ * storing virtual address.
665
+ */
666
+ if (!is_phys ) {
667
+ min_addr = min (min_addr , untagged_ptr );
668
+ max_addr = max (max_addr , untagged_ptr + size );
669
+ }
670
+ link = is_phys ? & object_phys_tree_root .rb_node :
671
+ & object_tree_root .rb_node ;
637
672
rb_parent = NULL ;
638
673
while (* link ) {
639
674
rb_parent = * link ;
@@ -657,7 +692,8 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
657
692
}
658
693
}
659
694
rb_link_node (& object -> rb_node , rb_parent , link );
660
- rb_insert_color (& object -> rb_node , & object_tree_root );
695
+ rb_insert_color (& object -> rb_node , is_phys ? & object_phys_tree_root :
696
+ & object_tree_root );
661
697
662
698
list_add_tail_rcu (& object -> object_list , & object_list );
663
699
out :
@@ -707,7 +743,7 @@ static void delete_object_full(unsigned long ptr)
707
743
{
708
744
struct kmemleak_object * object ;
709
745
710
- object = find_and_remove_object (ptr , 0 );
746
+ object = find_and_remove_object (ptr , 0 , false );
711
747
if (!object ) {
712
748
#ifdef DEBUG
713
749
kmemleak_warn ("Freeing unknown object at 0x%08lx\n" ,
@@ -723,12 +759,12 @@ static void delete_object_full(unsigned long ptr)
723
759
* delete it. If the memory block is partially freed, the function may create
724
760
* additional metadata for the remaining parts of the block.
725
761
*/
726
- static void delete_object_part (unsigned long ptr , size_t size )
762
+ static void delete_object_part (unsigned long ptr , size_t size , bool is_phys )
727
763
{
728
764
struct kmemleak_object * object ;
729
765
unsigned long start , end ;
730
766
731
- object = find_and_remove_object (ptr , 1 );
767
+ object = find_and_remove_object (ptr , 1 , is_phys );
732
768
if (!object ) {
733
769
#ifdef DEBUG
734
770
kmemleak_warn ("Partially freeing unknown object at 0x%08lx (size %zu)\n" ,
@@ -746,10 +782,10 @@ static void delete_object_part(unsigned long ptr, size_t size)
746
782
end = object -> pointer + object -> size ;
747
783
if (ptr > start )
748
784
__create_object (start , ptr - start , object -> min_count ,
749
- GFP_KERNEL , object -> flags & OBJECT_PHYS );
785
+ GFP_KERNEL , is_phys );
750
786
if (ptr + size < end )
751
787
__create_object (ptr + size , end - ptr - size , object -> min_count ,
752
- GFP_KERNEL , object -> flags & OBJECT_PHYS );
788
+ GFP_KERNEL , is_phys );
753
789
754
790
__delete_object (object );
755
791
}
@@ -770,11 +806,11 @@ static void paint_it(struct kmemleak_object *object, int color)
770
806
raw_spin_unlock_irqrestore (& object -> lock , flags );
771
807
}
772
808
773
- static void paint_ptr (unsigned long ptr , int color )
809
+ static void paint_ptr (unsigned long ptr , int color , bool is_phys )
774
810
{
775
811
struct kmemleak_object * object ;
776
812
777
- object = find_and_get_object (ptr , 0 );
813
+ object = __find_and_get_object (ptr , 0 , is_phys );
778
814
if (!object ) {
779
815
kmemleak_warn ("Trying to color unknown object at 0x%08lx as %s\n" ,
780
816
ptr ,
@@ -792,16 +828,16 @@ static void paint_ptr(unsigned long ptr, int color)
792
828
*/
793
829
static void make_gray_object (unsigned long ptr )
794
830
{
795
- paint_ptr (ptr , KMEMLEAK_GREY );
831
+ paint_ptr (ptr , KMEMLEAK_GREY , false );
796
832
}
797
833
798
834
/*
799
835
* Mark the object as black-colored so that it is ignored from scans and
800
836
* reporting.
801
837
*/
802
- static void make_black_object (unsigned long ptr )
838
+ static void make_black_object (unsigned long ptr , bool is_phys )
803
839
{
804
- paint_ptr (ptr , KMEMLEAK_BLACK );
840
+ paint_ptr (ptr , KMEMLEAK_BLACK , is_phys );
805
841
}
806
842
807
843
/*
@@ -1007,7 +1043,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
1007
1043
pr_debug ("%s(0x%p)\n" , __func__ , ptr );
1008
1044
1009
1045
if (kmemleak_enabled && ptr && !IS_ERR (ptr ))
1010
- delete_object_part ((unsigned long )ptr , size );
1046
+ delete_object_part ((unsigned long )ptr , size , false );
1011
1047
}
1012
1048
EXPORT_SYMBOL_GPL (kmemleak_free_part );
1013
1049
@@ -1095,7 +1131,7 @@ void __ref kmemleak_ignore(const void *ptr)
1095
1131
pr_debug ("%s(0x%p)\n" , __func__ , ptr );
1096
1132
1097
1133
if (kmemleak_enabled && ptr && !IS_ERR (ptr ))
1098
- make_black_object ((unsigned long )ptr );
1134
+ make_black_object ((unsigned long )ptr , false );
1099
1135
}
1100
1136
EXPORT_SYMBOL (kmemleak_ignore );
1101
1137
@@ -1153,7 +1189,7 @@ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1153
1189
* Create object with OBJECT_PHYS flag and
1154
1190
* assume min_count 0.
1155
1191
*/
1156
- create_object_phys ((unsigned long )__va ( phys ) , size , 0 , gfp );
1192
+ create_object_phys ((unsigned long )phys , size , 0 , gfp );
1157
1193
}
1158
1194
EXPORT_SYMBOL (kmemleak_alloc_phys );
1159
1195
@@ -1166,8 +1202,10 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
1166
1202
*/
1167
1203
void __ref kmemleak_free_part_phys (phys_addr_t phys , size_t size )
1168
1204
{
1205
+ pr_debug ("%s(0x%pa)\n" , __func__ , & phys );
1206
+
1169
1207
if (PHYS_PFN (phys ) >= min_low_pfn && PHYS_PFN (phys ) < max_low_pfn )
1170
- kmemleak_free_part ( __va ( phys ) , size );
1208
+ delete_object_part (( unsigned long ) phys , size , true );
1171
1209
}
1172
1210
EXPORT_SYMBOL (kmemleak_free_part_phys );
1173
1211
@@ -1178,8 +1216,10 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
1178
1216
*/
1179
1217
void __ref kmemleak_ignore_phys (phys_addr_t phys )
1180
1218
{
1219
+ pr_debug ("%s(0x%pa)\n" , __func__ , & phys );
1220
+
1181
1221
if (PHYS_PFN (phys ) >= min_low_pfn && PHYS_PFN (phys ) < max_low_pfn )
1182
- kmemleak_ignore ( __va ( phys ) );
1222
+ make_black_object (( unsigned long ) phys , true );
1183
1223
}
1184
1224
EXPORT_SYMBOL (kmemleak_ignore_phys );
1185
1225
@@ -1190,6 +1230,9 @@ static bool update_checksum(struct kmemleak_object *object)
1190
1230
{
1191
1231
u32 old_csum = object -> checksum ;
1192
1232
1233
+ if (WARN_ON_ONCE (object -> flags & OBJECT_PHYS ))
1234
+ return false;
1235
+
1193
1236
kasan_disable_current ();
1194
1237
kcsan_disable_current ();
1195
1238
object -> checksum = crc32 (0 , kasan_reset_tag ((void * )object -> pointer ), object -> size );
@@ -1343,6 +1386,7 @@ static void scan_object(struct kmemleak_object *object)
1343
1386
{
1344
1387
struct kmemleak_scan_area * area ;
1345
1388
unsigned long flags ;
1389
+ void * obj_ptr ;
1346
1390
1347
1391
/*
1348
1392
* Once the object->lock is acquired, the corresponding memory block
@@ -1354,10 +1398,15 @@ static void scan_object(struct kmemleak_object *object)
1354
1398
if (!(object -> flags & OBJECT_ALLOCATED ))
1355
1399
/* already freed object */
1356
1400
goto out ;
1401
+
1402
+ obj_ptr = object -> flags & OBJECT_PHYS ?
1403
+ __va ((phys_addr_t )object -> pointer ) :
1404
+ (void * )object -> pointer ;
1405
+
1357
1406
if (hlist_empty (& object -> area_list ) ||
1358
1407
object -> flags & OBJECT_FULL_SCAN ) {
1359
- void * start = ( void * ) object -> pointer ;
1360
- void * end = ( void * )( object -> pointer + object -> size ) ;
1408
+ void * start = obj_ptr ;
1409
+ void * end = obj_ptr + object -> size ;
1361
1410
void * next ;
1362
1411
1363
1412
do {
0 commit comments