@@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
202
202
return in_task () ? & current -> kcsan_ctx : raw_cpu_ptr (& kcsan_cpu_ctx );
203
203
}
204
204
205
+ static __always_inline void
206
+ check_access (const volatile void * ptr , size_t size , int type , unsigned long ip );
207
+
205
208
/* Check scoped accesses; never inline because this is a slow-path! */
206
209
static noinline void kcsan_check_scoped_accesses (void )
207
210
{
@@ -210,14 +213,16 @@ static noinline void kcsan_check_scoped_accesses(void)
210
213
struct kcsan_scoped_access * scoped_access ;
211
214
212
215
ctx -> scoped_accesses .prev = NULL ; /* Avoid recursion. */
213
- list_for_each_entry (scoped_access , & ctx -> scoped_accesses , list )
214
- __kcsan_check_access (scoped_access -> ptr , scoped_access -> size , scoped_access -> type );
216
+ list_for_each_entry (scoped_access , & ctx -> scoped_accesses , list ) {
217
+ check_access (scoped_access -> ptr , scoped_access -> size ,
218
+ scoped_access -> type , scoped_access -> ip );
219
+ }
215
220
ctx -> scoped_accesses .prev = prev_save ;
216
221
}
217
222
218
223
/* Rules for generic atomic accesses. Called from fast-path. */
219
224
static __always_inline bool
220
- is_atomic (const volatile void * ptr , size_t size , int type , struct kcsan_ctx * ctx )
225
+ is_atomic (struct kcsan_ctx * ctx , const volatile void * ptr , size_t size , int type )
221
226
{
222
227
if (type & KCSAN_ACCESS_ATOMIC )
223
228
return true;
@@ -254,7 +259,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx
254
259
}
255
260
256
261
static __always_inline bool
257
- should_watch (const volatile void * ptr , size_t size , int type , struct kcsan_ctx * ctx )
262
+ should_watch (struct kcsan_ctx * ctx , const volatile void * ptr , size_t size , int type )
258
263
{
259
264
/*
260
265
* Never set up watchpoints when memory operations are atomic.
@@ -263,7 +268,7 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
263
268
* should not count towards skipped instructions, and (2) to actually
264
269
* decrement kcsan_atomic_next for consecutive instruction stream.
265
270
*/
266
- if (is_atomic (ptr , size , type , ctx ))
271
+ if (is_atomic (ctx , ptr , size , type ))
267
272
return false;
268
273
269
274
if (this_cpu_dec_return (kcsan_skip ) >= 0 )
@@ -350,6 +355,7 @@ void kcsan_restore_irqtrace(struct task_struct *task)
350
355
static noinline void kcsan_found_watchpoint (const volatile void * ptr ,
351
356
size_t size ,
352
357
int type ,
358
+ unsigned long ip ,
353
359
atomic_long_t * watchpoint ,
354
360
long encoded_watchpoint )
355
361
{
@@ -396,7 +402,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
396
402
397
403
if (consumed ) {
398
404
kcsan_save_irqtrace (current );
399
- kcsan_report_set_info (ptr , size , type , watchpoint - watchpoints );
405
+ kcsan_report_set_info (ptr , size , type , ip , watchpoint - watchpoints );
400
406
kcsan_restore_irqtrace (current );
401
407
} else {
402
408
/*
@@ -416,7 +422,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
416
422
}
417
423
418
424
static noinline void
419
- kcsan_setup_watchpoint (const volatile void * ptr , size_t size , int type )
425
+ kcsan_setup_watchpoint (const volatile void * ptr , size_t size , int type , unsigned long ip )
420
426
{
421
427
const bool is_write = (type & KCSAN_ACCESS_WRITE ) != 0 ;
422
428
const bool is_assert = (type & KCSAN_ACCESS_ASSERT ) != 0 ;
@@ -568,8 +574,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
568
574
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE )
569
575
atomic_long_inc (& kcsan_counters [KCSAN_COUNTER_ASSERT_FAILURES ]);
570
576
571
- kcsan_report_known_origin (ptr , size , type , value_change ,
572
- watchpoint - watchpoints ,
577
+ kcsan_report_known_origin (ptr , size , type , ip ,
578
+ value_change , watchpoint - watchpoints ,
573
579
old , new , access_mask );
574
580
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE ) {
575
581
/* Inferring a race, since the value should not have changed. */
@@ -578,8 +584,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
578
584
if (is_assert )
579
585
atomic_long_inc (& kcsan_counters [KCSAN_COUNTER_ASSERT_FAILURES ]);
580
586
581
- if (IS_ENABLED (CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN ) || is_assert )
582
- kcsan_report_unknown_origin (ptr , size , type , old , new , access_mask );
587
+ if (IS_ENABLED (CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN ) || is_assert ) {
588
+ kcsan_report_unknown_origin (ptr , size , type , ip ,
589
+ old , new , access_mask );
590
+ }
583
591
}
584
592
585
593
/*
@@ -596,8 +604,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
596
604
user_access_restore (ua_flags );
597
605
}
598
606
599
- static __always_inline void check_access ( const volatile void * ptr , size_t size ,
600
- int type )
607
+ static __always_inline void
608
+ check_access ( const volatile void * ptr , size_t size , int type , unsigned long ip )
601
609
{
602
610
const bool is_write = (type & KCSAN_ACCESS_WRITE ) != 0 ;
603
611
atomic_long_t * watchpoint ;
@@ -625,13 +633,12 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
625
633
*/
626
634
627
635
if (unlikely (watchpoint != NULL ))
628
- kcsan_found_watchpoint (ptr , size , type , watchpoint ,
629
- encoded_watchpoint );
636
+ kcsan_found_watchpoint (ptr , size , type , ip , watchpoint , encoded_watchpoint );
630
637
else {
631
638
struct kcsan_ctx * ctx = get_ctx (); /* Call only once in fast-path. */
632
639
633
- if (unlikely (should_watch (ptr , size , type , ctx )))
634
- kcsan_setup_watchpoint (ptr , size , type );
640
+ if (unlikely (should_watch (ctx , ptr , size , type )))
641
+ kcsan_setup_watchpoint (ptr , size , type , ip );
635
642
else if (unlikely (ctx -> scoped_accesses .prev ))
636
643
kcsan_check_scoped_accesses ();
637
644
}
@@ -757,14 +764,15 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
757
764
{
758
765
struct kcsan_ctx * ctx = get_ctx ();
759
766
760
- __kcsan_check_access (ptr , size , type );
767
+ check_access (ptr , size , type , _RET_IP_ );
761
768
762
769
ctx -> disable_count ++ ; /* Disable KCSAN, in case list debugging is on. */
763
770
764
771
INIT_LIST_HEAD (& sa -> list );
765
772
sa -> ptr = ptr ;
766
773
sa -> size = size ;
767
774
sa -> type = type ;
775
+ sa -> ip = _RET_IP_ ;
768
776
769
777
if (!ctx -> scoped_accesses .prev ) /* Lazy initialize list head. */
770
778
INIT_LIST_HEAD (& ctx -> scoped_accesses );
@@ -796,13 +804,13 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
796
804
797
805
ctx -> disable_count -- ;
798
806
799
- __kcsan_check_access (sa -> ptr , sa -> size , sa -> type );
807
+ check_access (sa -> ptr , sa -> size , sa -> type , sa -> ip );
800
808
}
801
809
EXPORT_SYMBOL (kcsan_end_scoped_access );
802
810
803
811
void __kcsan_check_access (const volatile void * ptr , size_t size , int type )
804
812
{
805
- check_access (ptr , size , type );
813
+ check_access (ptr , size , type , _RET_IP_ );
806
814
}
807
815
EXPORT_SYMBOL (__kcsan_check_access );
808
816
@@ -823,7 +831,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
823
831
void __tsan_read##size(void *ptr); \
824
832
void __tsan_read##size(void *ptr) \
825
833
{ \
826
- check_access(ptr, size, 0); \
834
+ check_access(ptr, size, 0, _RET_IP_); \
827
835
} \
828
836
EXPORT_SYMBOL(__tsan_read##size); \
829
837
void __tsan_unaligned_read##size(void *ptr) \
@@ -832,7 +840,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
832
840
void __tsan_write##size(void *ptr); \
833
841
void __tsan_write##size(void *ptr) \
834
842
{ \
835
- check_access(ptr, size, KCSAN_ACCESS_WRITE); \
843
+ check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
836
844
} \
837
845
EXPORT_SYMBOL(__tsan_write##size); \
838
846
void __tsan_unaligned_write##size(void *ptr) \
@@ -842,7 +850,8 @@ EXPORT_SYMBOL(__kcsan_check_access);
842
850
void __tsan_read_write##size(void *ptr) \
843
851
{ \
844
852
check_access(ptr, size, \
845
- KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
853
+ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
854
+ _RET_IP_); \
846
855
} \
847
856
EXPORT_SYMBOL(__tsan_read_write##size); \
848
857
void __tsan_unaligned_read_write##size(void *ptr) \
@@ -858,14 +867,14 @@ DEFINE_TSAN_READ_WRITE(16);
858
867
void __tsan_read_range (void * ptr , size_t size );
859
868
void __tsan_read_range (void * ptr , size_t size )
860
869
{
861
- check_access (ptr , size , 0 );
870
+ check_access (ptr , size , 0 , _RET_IP_ );
862
871
}
863
872
EXPORT_SYMBOL (__tsan_read_range );
864
873
865
874
void __tsan_write_range (void * ptr , size_t size );
866
875
void __tsan_write_range (void * ptr , size_t size )
867
876
{
868
- check_access (ptr , size , KCSAN_ACCESS_WRITE );
877
+ check_access (ptr , size , KCSAN_ACCESS_WRITE , _RET_IP_ );
869
878
}
870
879
EXPORT_SYMBOL (__tsan_write_range );
871
880
@@ -886,7 +895,8 @@ EXPORT_SYMBOL(__tsan_write_range);
886
895
IS_ALIGNED((unsigned long)ptr, size); \
887
896
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
888
897
return; \
889
- check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
898
+ check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
899
+ _RET_IP_); \
890
900
} \
891
901
EXPORT_SYMBOL(__tsan_volatile_read##size); \
892
902
void __tsan_unaligned_volatile_read##size(void *ptr) \
@@ -901,7 +911,8 @@ EXPORT_SYMBOL(__tsan_write_range);
901
911
return; \
902
912
check_access(ptr, size, \
903
913
KCSAN_ACCESS_WRITE | \
904
- (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
914
+ (is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
915
+ _RET_IP_); \
905
916
} \
906
917
EXPORT_SYMBOL(__tsan_volatile_write##size); \
907
918
void __tsan_unaligned_volatile_write##size(void *ptr) \
@@ -955,7 +966,7 @@ EXPORT_SYMBOL(__tsan_init);
955
966
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
956
967
{ \
957
968
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
958
- check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
969
+ check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
959
970
} \
960
971
return __atomic_load_n(ptr, memorder); \
961
972
} \
@@ -965,7 +976,7 @@ EXPORT_SYMBOL(__tsan_init);
965
976
{ \
966
977
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
967
978
check_access(ptr, bits / BITS_PER_BYTE, \
968
- KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
979
+ KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
969
980
} \
970
981
__atomic_store_n(ptr, v, memorder); \
971
982
} \
@@ -978,7 +989,7 @@ EXPORT_SYMBOL(__tsan_init);
978
989
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
979
990
check_access(ptr, bits / BITS_PER_BYTE, \
980
991
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
981
- KCSAN_ACCESS_ATOMIC); \
992
+ KCSAN_ACCESS_ATOMIC, _RET_IP_); \
982
993
} \
983
994
return __atomic_##op##suffix(ptr, v, memorder); \
984
995
} \
@@ -1010,7 +1021,7 @@ EXPORT_SYMBOL(__tsan_init);
1010
1021
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1011
1022
check_access(ptr, bits / BITS_PER_BYTE, \
1012
1023
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1013
- KCSAN_ACCESS_ATOMIC); \
1024
+ KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1014
1025
} \
1015
1026
return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1016
1027
} \
@@ -1025,7 +1036,7 @@ EXPORT_SYMBOL(__tsan_init);
1025
1036
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1026
1037
check_access(ptr, bits / BITS_PER_BYTE, \
1027
1038
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1028
- KCSAN_ACCESS_ATOMIC); \
1039
+ KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1029
1040
} \
1030
1041
__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1031
1042
return exp; \
0 commit comments