Skip to content

Commit ca2ef2d

Browse files
committed
Merge tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull KCSAN updates from Paul McKenney: "This contains initialization fixups, testing improvements, addition of instruction pointer to data-race reports, and scoped data-race checks" * tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: kcsan: selftest: Cleanup and add missing __init kcsan: Move ctx to start of argument list kcsan: Support reporting scoped read-write access type kcsan: Start stack trace with explicit location if provided kcsan: Save instruction pointer for scoped accesses kcsan: Add ability to pass instruction pointer of access to reporting kcsan: test: Fix flaky test case kcsan: test: Use kunit_skip() to skip tests kcsan: test: Defer kcsan_test_init() after kunit initialization
2 parents 5593a73 + ac20e39 commit ca2ef2d

File tree

6 files changed

+186
-111
lines changed

6 files changed

+186
-111
lines changed

include/linux/kcsan-checks.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
100100
/* Scoped access information. */
101101
struct kcsan_scoped_access {
102102
struct list_head list;
103+
/* Access information. */
103104
const volatile void *ptr;
104105
size_t size;
105106
int type;
107+
/* Location where scoped access was set up. */
108+
unsigned long ip;
106109
};
107110
/*
108111
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes

kernel/kcsan/core.c

Lines changed: 43 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
202202
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
203203
}
204204

205+
static __always_inline void
206+
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
207+
205208
/* Check scoped accesses; never inline because this is a slow-path! */
206209
static noinline void kcsan_check_scoped_accesses(void)
207210
{
@@ -210,14 +213,16 @@ static noinline void kcsan_check_scoped_accesses(void)
210213
struct kcsan_scoped_access *scoped_access;
211214

212215
ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
213-
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
214-
__kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
216+
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
217+
check_access(scoped_access->ptr, scoped_access->size,
218+
scoped_access->type, scoped_access->ip);
219+
}
215220
ctx->scoped_accesses.prev = prev_save;
216221
}
217222

218223
/* Rules for generic atomic accesses. Called from fast-path. */
219224
static __always_inline bool
220-
is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
225+
is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
221226
{
222227
if (type & KCSAN_ACCESS_ATOMIC)
223228
return true;
@@ -254,7 +259,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx
254259
}
255260

256261
static __always_inline bool
257-
should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
262+
should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
258263
{
259264
/*
260265
* Never set up watchpoints when memory operations are atomic.
@@ -263,7 +268,7 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
263268
* should not count towards skipped instructions, and (2) to actually
264269
* decrement kcsan_atomic_next for consecutive instruction stream.
265270
*/
266-
if (is_atomic(ptr, size, type, ctx))
271+
if (is_atomic(ctx, ptr, size, type))
267272
return false;
268273

269274
if (this_cpu_dec_return(kcsan_skip) >= 0)
@@ -350,6 +355,7 @@ void kcsan_restore_irqtrace(struct task_struct *task)
350355
static noinline void kcsan_found_watchpoint(const volatile void *ptr,
351356
size_t size,
352357
int type,
358+
unsigned long ip,
353359
atomic_long_t *watchpoint,
354360
long encoded_watchpoint)
355361
{
@@ -396,7 +402,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
396402

397403
if (consumed) {
398404
kcsan_save_irqtrace(current);
399-
kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
405+
kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
400406
kcsan_restore_irqtrace(current);
401407
} else {
402408
/*
@@ -416,7 +422,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
416422
}
417423

418424
static noinline void
419-
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
425+
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
420426
{
421427
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
422428
const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
@@ -568,8 +574,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
568574
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
569575
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
570576

571-
kcsan_report_known_origin(ptr, size, type, value_change,
572-
watchpoint - watchpoints,
577+
kcsan_report_known_origin(ptr, size, type, ip,
578+
value_change, watchpoint - watchpoints,
573579
old, new, access_mask);
574580
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
575581
/* Inferring a race, since the value should not have changed. */
@@ -578,8 +584,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
578584
if (is_assert)
579585
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
580586

581-
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
582-
kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
587+
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
588+
kcsan_report_unknown_origin(ptr, size, type, ip,
589+
old, new, access_mask);
590+
}
583591
}
584592

585593
/*
@@ -596,8 +604,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
596604
user_access_restore(ua_flags);
597605
}
598606

599-
static __always_inline void check_access(const volatile void *ptr, size_t size,
600-
int type)
607+
static __always_inline void
608+
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
601609
{
602610
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
603611
atomic_long_t *watchpoint;
@@ -625,13 +633,12 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
625633
*/
626634

627635
if (unlikely(watchpoint != NULL))
628-
kcsan_found_watchpoint(ptr, size, type, watchpoint,
629-
encoded_watchpoint);
636+
kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
630637
else {
631638
struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
632639

633-
if (unlikely(should_watch(ptr, size, type, ctx)))
634-
kcsan_setup_watchpoint(ptr, size, type);
640+
if (unlikely(should_watch(ctx, ptr, size, type)))
641+
kcsan_setup_watchpoint(ptr, size, type, ip);
635642
else if (unlikely(ctx->scoped_accesses.prev))
636643
kcsan_check_scoped_accesses();
637644
}
@@ -757,14 +764,15 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
757764
{
758765
struct kcsan_ctx *ctx = get_ctx();
759766

760-
__kcsan_check_access(ptr, size, type);
767+
check_access(ptr, size, type, _RET_IP_);
761768

762769
ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
763770

764771
INIT_LIST_HEAD(&sa->list);
765772
sa->ptr = ptr;
766773
sa->size = size;
767774
sa->type = type;
775+
sa->ip = _RET_IP_;
768776

769777
if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
770778
INIT_LIST_HEAD(&ctx->scoped_accesses);
@@ -796,13 +804,13 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
796804

797805
ctx->disable_count--;
798806

799-
__kcsan_check_access(sa->ptr, sa->size, sa->type);
807+
check_access(sa->ptr, sa->size, sa->type, sa->ip);
800808
}
801809
EXPORT_SYMBOL(kcsan_end_scoped_access);
802810

803811
void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
804812
{
805-
check_access(ptr, size, type);
813+
check_access(ptr, size, type, _RET_IP_);
806814
}
807815
EXPORT_SYMBOL(__kcsan_check_access);
808816

@@ -823,7 +831,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
823831
void __tsan_read##size(void *ptr); \
824832
void __tsan_read##size(void *ptr) \
825833
{ \
826-
check_access(ptr, size, 0); \
834+
check_access(ptr, size, 0, _RET_IP_); \
827835
} \
828836
EXPORT_SYMBOL(__tsan_read##size); \
829837
void __tsan_unaligned_read##size(void *ptr) \
@@ -832,7 +840,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
832840
void __tsan_write##size(void *ptr); \
833841
void __tsan_write##size(void *ptr) \
834842
{ \
835-
check_access(ptr, size, KCSAN_ACCESS_WRITE); \
843+
check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
836844
} \
837845
EXPORT_SYMBOL(__tsan_write##size); \
838846
void __tsan_unaligned_write##size(void *ptr) \
@@ -842,7 +850,8 @@ EXPORT_SYMBOL(__kcsan_check_access);
842850
void __tsan_read_write##size(void *ptr) \
843851
{ \
844852
check_access(ptr, size, \
845-
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
853+
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
854+
_RET_IP_); \
846855
} \
847856
EXPORT_SYMBOL(__tsan_read_write##size); \
848857
void __tsan_unaligned_read_write##size(void *ptr) \
@@ -858,14 +867,14 @@ DEFINE_TSAN_READ_WRITE(16);
858867
void __tsan_read_range(void *ptr, size_t size);
859868
void __tsan_read_range(void *ptr, size_t size)
860869
{
861-
check_access(ptr, size, 0);
870+
check_access(ptr, size, 0, _RET_IP_);
862871
}
863872
EXPORT_SYMBOL(__tsan_read_range);
864873

865874
void __tsan_write_range(void *ptr, size_t size);
866875
void __tsan_write_range(void *ptr, size_t size)
867876
{
868-
check_access(ptr, size, KCSAN_ACCESS_WRITE);
877+
check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
869878
}
870879
EXPORT_SYMBOL(__tsan_write_range);
871880

@@ -886,7 +895,8 @@ EXPORT_SYMBOL(__tsan_write_range);
886895
IS_ALIGNED((unsigned long)ptr, size); \
887896
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
888897
return; \
889-
check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
898+
check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
899+
_RET_IP_); \
890900
} \
891901
EXPORT_SYMBOL(__tsan_volatile_read##size); \
892902
void __tsan_unaligned_volatile_read##size(void *ptr) \
@@ -901,7 +911,8 @@ EXPORT_SYMBOL(__tsan_write_range);
901911
return; \
902912
check_access(ptr, size, \
903913
KCSAN_ACCESS_WRITE | \
904-
(is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
914+
(is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
915+
_RET_IP_); \
905916
} \
906917
EXPORT_SYMBOL(__tsan_volatile_write##size); \
907918
void __tsan_unaligned_volatile_write##size(void *ptr) \
@@ -955,7 +966,7 @@ EXPORT_SYMBOL(__tsan_init);
955966
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
956967
{ \
957968
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
958-
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
969+
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
959970
} \
960971
return __atomic_load_n(ptr, memorder); \
961972
} \
@@ -965,7 +976,7 @@ EXPORT_SYMBOL(__tsan_init);
965976
{ \
966977
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
967978
check_access(ptr, bits / BITS_PER_BYTE, \
968-
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
979+
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
969980
} \
970981
__atomic_store_n(ptr, v, memorder); \
971982
} \
@@ -978,7 +989,7 @@ EXPORT_SYMBOL(__tsan_init);
978989
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
979990
check_access(ptr, bits / BITS_PER_BYTE, \
980991
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
981-
KCSAN_ACCESS_ATOMIC); \
992+
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
982993
} \
983994
return __atomic_##op##suffix(ptr, v, memorder); \
984995
} \
@@ -1010,7 +1021,7 @@ EXPORT_SYMBOL(__tsan_init);
10101021
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
10111022
check_access(ptr, bits / BITS_PER_BYTE, \
10121023
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1013-
KCSAN_ACCESS_ATOMIC); \
1024+
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
10141025
} \
10151026
return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
10161027
} \
@@ -1025,7 +1036,7 @@ EXPORT_SYMBOL(__tsan_init);
10251036
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
10261037
check_access(ptr, bits / BITS_PER_BYTE, \
10271038
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1028-
KCSAN_ACCESS_ATOMIC); \
1039+
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
10291040
} \
10301041
__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
10311042
return exp; \

kernel/kcsan/kcsan.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -121,22 +121,22 @@ enum kcsan_value_change {
121121
* to be consumed by the reporting thread. No report is printed yet.
122122
*/
123123
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
124-
int watchpoint_idx);
124+
unsigned long ip, int watchpoint_idx);
125125

126126
/*
127127
* The calling thread observed that the watchpoint it set up was hit and
128128
* consumed: print the full report based on information set by the racing
129129
* thread.
130130
*/
131131
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
132-
enum kcsan_value_change value_change, int watchpoint_idx,
133-
u64 old, u64 new, u64 mask);
132+
unsigned long ip, enum kcsan_value_change value_change,
133+
int watchpoint_idx, u64 old, u64 new, u64 mask);
134134

135135
/*
136136
* No other thread was observed to race with the access, but the data value
137137
* before and after the stall differs. Reports a race of "unknown origin".
138138
*/
139139
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
140-
u64 old, u64 new, u64 mask);
140+
unsigned long ip, u64 old, u64 new, u64 mask);
141141

142142
#endif /* _KERNEL_KCSAN_KCSAN_H */

0 commit comments

Comments
 (0)