Skip to content

Commit dcba710

Browse files
committed
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching
Pull livepatching fix from Jiri Kosina: "Fix the way how livepatches are being stacked with respect to RCU, from Petr Mladek" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching: livepatch: Fix stacking of patches with respect to RCU
2 parents 021f601 + 842c088 commit dcba710

File tree

2 files changed

+37
-7
lines changed

2 files changed

+37
-7
lines changed

kernel/livepatch/patch.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
5959

6060
ops = container_of(fops, struct klp_ops, fops);
6161

62-
rcu_read_lock();
62+
/*
63+
* A variant of synchronize_sched() is used to allow patching functions
64+
* where RCU is not watching, see klp_synchronize_transition().
65+
*/
66+
preempt_disable_notrace();
6367

6468
func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
6569
stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
115119

116120
klp_arch_set_pc(regs, (unsigned long)func->new_func);
117121
unlock:
118-
rcu_read_unlock();
122+
preempt_enable_notrace();
119123
}
120124

121125
/*

kernel/livepatch/transition.c

Lines changed: 31 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work)
4848
}
4949
static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
5050

51+
/*
52+
* This function is just a stub to implement a hard force
53+
* of synchronize_sched(). This requires synchronizing
54+
* tasks even in userspace and idle.
55+
*/
56+
static void klp_sync(struct work_struct *work)
57+
{
58+
}
59+
60+
/*
61+
* We allow to patch also functions where RCU is not watching,
62+
* e.g. before user_exit(). We can not rely on the RCU infrastructure
63+
* to do the synchronization. Instead hard force the sched synchronization.
64+
*
65+
* This approach allows to use RCU functions for manipulating func_stack
66+
* safely.
67+
*/
68+
static void klp_synchronize_transition(void)
69+
{
70+
schedule_on_each_cpu(klp_sync);
71+
}
72+
5173
/*
5274
* The transition to the target patch state is complete. Clean up the data
5375
* structures.
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
7395
* func->transition gets cleared, the handler may choose a
7496
* removed function.
7597
*/
76-
synchronize_rcu();
98+
klp_synchronize_transition();
7799
}
78100

79101
if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
92114

93115
/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94116
if (klp_target_state == KLP_PATCHED)
95-
synchronize_rcu();
117+
klp_synchronize_transition();
96118

97119
read_lock(&tasklist_lock);
98120
for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
136158
*/
137159
void klp_update_patch_state(struct task_struct *task)
138160
{
139-
rcu_read_lock();
161+
/*
162+
* A variant of synchronize_sched() is used to allow patching functions
163+
* where RCU is not watching, see klp_synchronize_transition().
164+
*/
165+
preempt_disable_notrace();
140166

141167
/*
142168
* This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
153179
if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
154180
task->patch_state = READ_ONCE(klp_target_state);
155181

156-
rcu_read_unlock();
182+
preempt_enable_notrace();
157183
}
158184

159185
/*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
539565
clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
540566

541567
/* Let any remaining calls to klp_update_patch_state() complete */
542-
synchronize_rcu();
568+
klp_synchronize_transition();
543569

544570
klp_start_transition();
545571
}

0 commit comments

Comments
 (0)