@@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work)
48
48
}
49
49
static DECLARE_DELAYED_WORK (klp_transition_work , klp_transition_work_fn ) ;
50
50
51
+ /*
52
+ * This function is just a stub to implement a hard force
53
+ * of synchronize_sched(). This requires synchronizing
54
+ * tasks even in userspace and idle.
55
+ */
56
+ static void klp_sync (struct work_struct * work )
57
+ {
58
+ }
59
+
60
+ /*
61
+ * We allow to patch also functions where RCU is not watching,
62
+ * e.g. before user_exit(). We can not rely on the RCU infrastructure
63
+ * to do the synchronization. Instead hard force the sched synchronization.
64
+ *
65
+ * This approach allows to use RCU functions for manipulating func_stack
66
+ * safely.
67
+ */
68
+ static void klp_synchronize_transition (void )
69
+ {
70
+ schedule_on_each_cpu (klp_sync );
71
+ }
72
+
51
73
/*
52
74
* The transition to the target patch state is complete. Clean up the data
53
75
* structures.
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
73
95
* func->transition gets cleared, the handler may choose a
74
96
* removed function.
75
97
*/
76
- synchronize_rcu ();
98
+ klp_synchronize_transition ();
77
99
}
78
100
79
101
if (klp_transition_patch -> immediate )
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
92
114
93
115
/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94
116
if (klp_target_state == KLP_PATCHED )
95
- synchronize_rcu ();
117
+ klp_synchronize_transition ();
96
118
97
119
read_lock (& tasklist_lock );
98
120
for_each_process_thread (g , task ) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
136
158
*/
137
159
void klp_update_patch_state (struct task_struct * task )
138
160
{
139
- rcu_read_lock ();
161
+ /*
162
+ * A variant of synchronize_sched() is used to allow patching functions
163
+ * where RCU is not watching, see klp_synchronize_transition().
164
+ */
165
+ preempt_disable_notrace ();
140
166
141
167
/*
142
168
* This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
153
179
if (test_and_clear_tsk_thread_flag (task , TIF_PATCH_PENDING ))
154
180
task -> patch_state = READ_ONCE (klp_target_state );
155
181
156
- rcu_read_unlock ();
182
+ preempt_enable_notrace ();
157
183
}
158
184
159
185
/*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
539
565
clear_tsk_thread_flag (idle_task (cpu ), TIF_PATCH_PENDING );
540
566
541
567
/* Let any remaining calls to klp_update_patch_state() complete */
542
- synchronize_rcu ();
568
+ klp_synchronize_transition ();
543
569
544
570
klp_start_transition ();
545
571
}
0 commit comments