Skip to content

Commit d7bfc7d

Browse files
author
Peter Zijlstra
committed
x86/xen: Make irq_enable() noinstr
vmlinux.o: warning: objtool: pv_ops[32]: native_irq_enable vmlinux.o: warning: objtool: pv_ops[32]: __raw_callee_save_xen_irq_enable vmlinux.o: warning: objtool: pv_ops[32]: xen_irq_enable_direct vmlinux.o: warning: objtool: lock_is_held_type()+0xfe: call to pv_ops[32]() leaves .noinstr.text section Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 74ea805 commit d7bfc7d

File tree

3 files changed

+36
-31
lines changed

3 files changed

+36
-31
lines changed

arch/x86/kernel/paravirt.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
238238
{
239239
native_set_debugreg(regno, val);
240240
}
241+
242+
static noinstr void pv_native_irq_enable(void)
243+
{
244+
native_irq_enable();
245+
}
241246
#endif
242247

243248
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
@@ -302,7 +307,7 @@ struct paravirt_patch_template pv_ops = {
302307
/* Irq ops. */
303308
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
304309
.irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
305-
.irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
310+
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
306311
.irq.safe_halt = native_safe_halt,
307312
.irq.halt = native_halt,
308313
#endif /* CONFIG_PARAVIRT_XXL */

arch/x86/xen/irq.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ asmlinkage __visible void xen_irq_disable(void)
5353
}
5454
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
5555

56-
asmlinkage __visible void xen_irq_enable(void)
56+
asmlinkage __visible noinstr void xen_irq_enable(void)
5757
{
5858
struct vcpu_info *vcpu;
5959

@@ -76,7 +76,7 @@ asmlinkage __visible void xen_irq_enable(void)
7676

7777
preempt_enable();
7878
}
79-
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
79+
__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable, ".noinstr.text");
8080

8181
static void xen_safe_halt(void)
8282
{

arch/x86/xen/xen-asm.S

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -21,33 +21,6 @@
2121
#include <linux/init.h>
2222
#include <linux/linkage.h>
2323

24-
/*
25-
* Enable events. This clears the event mask and tests the pending
26-
* event status with one and operation. If there are pending events,
27-
* then enter the hypervisor to get them handled.
28-
*/
29-
SYM_FUNC_START(xen_irq_enable_direct)
30-
FRAME_BEGIN
31-
/* Unmask events */
32-
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
33-
34-
/*
35-
* Preempt here doesn't matter because that will deal with any
36-
* pending interrupts. The pending check may end up being run
37-
* on the wrong CPU, but that doesn't hurt.
38-
*/
39-
40-
/* Test for pending */
41-
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
42-
jz 1f
43-
44-
call check_events
45-
1:
46-
FRAME_END
47-
ret
48-
SYM_FUNC_END(xen_irq_enable_direct)
49-
50-
5124
/*
5225
* Disabling events is simply a matter of making the event mask
5326
* non-zero.
@@ -57,6 +30,8 @@ SYM_FUNC_START(xen_irq_disable_direct)
5730
ret
5831
SYM_FUNC_END(xen_irq_disable_direct)
5932

33+
.pushsection .noinstr.text, "ax"
34+
6035
/*
6136
* Force an event check by making a hypercall, but preserve regs
6237
* before making the call.
@@ -86,7 +61,32 @@ SYM_FUNC_START(check_events)
8661
ret
8762
SYM_FUNC_END(check_events)
8863

89-
.pushsection .noinstr.text, "ax"
64+
/*
65+
* Enable events. This clears the event mask and tests the pending
66+
* event status with one and operation. If there are pending events,
67+
* then enter the hypervisor to get them handled.
68+
*/
69+
SYM_FUNC_START(xen_irq_enable_direct)
70+
FRAME_BEGIN
71+
/* Unmask events */
72+
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
73+
74+
/*
75+
* Preempt here doesn't matter because that will deal with any
76+
* pending interrupts. The pending check may end up being run
77+
* on the wrong CPU, but that doesn't hurt.
78+
*/
79+
80+
/* Test for pending */
81+
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
82+
jz 1f
83+
84+
call check_events
85+
1:
86+
FRAME_END
87+
ret
88+
SYM_FUNC_END(xen_irq_enable_direct)
89+
9090
/*
9191
* (xen_)save_fl is used to get the current interrupt enable status.
9292
* Callers expect the status to be in X86_EFLAGS_IF, and other bits

0 commit comments

Comments
 (0)