Skip to content

Commit 0b9d2fc

Browse files
KAGA-KOKOIngo Molnar
authored andcommitted
x86/paravirt: Replace the paravirt patch asm magic
The magic macro DEF_NATIVE() in the paravirt patching code uses inline assembly to generate a data table for patching in the native instructions. While clever this is falling apart with LTO and even aside of LTO the construct is just working by chance according to GCC folks. Aside of that the tables are constant data and not some form of magic text. As these constructs are not subject to frequent changes it is not a maintenance issue to convert them to regular data tables which are initialized with hex bytes. Create a new set of macros and data structures to store the instruction sequences and convert the code over. Reported-by: Andi Kleen <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brian Gerst <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Juergen Gross <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent fb2af07 commit 0b9d2fc

File tree

2 files changed

+81
-65
lines changed

2 files changed

+81
-65
lines changed

arch/x86/include/asm/paravirt_types.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -370,10 +370,6 @@ extern struct paravirt_patch_template pv_ops;
370370
/* Simple instruction patching code. */
371371
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
372372

373-
#define DEF_NATIVE(ops, name, code) \
374-
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
375-
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
376-
377373
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
378374
unsigned paravirt_patch_default(u8 type, void *insnbuf,
379375
unsigned long addr, unsigned len);

arch/x86/kernel/paravirt_patch.c

Lines changed: 81 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -4,103 +4,123 @@
44
#include <asm/paravirt.h>
55
#include <asm/asm-offsets.h>
66

7-
#ifdef CONFIG_X86_64
8-
# ifdef CONFIG_PARAVIRT_XXL
9-
DEF_NATIVE(irq, irq_disable, "cli");
10-
DEF_NATIVE(irq, irq_enable, "sti");
11-
DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq");
12-
DEF_NATIVE(irq, save_fl, "pushfq; popq %rax");
13-
DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax");
14-
DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax");
15-
DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3");
16-
DEF_NATIVE(cpu, wbinvd, "wbinvd");
17-
DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
18-
DEF_NATIVE(cpu, swapgs, "swapgs");
19-
DEF_NATIVE(, mov64, "mov %rdi, %rax");
7+
#define PSTART(d, m) \
8+
patch_data_##d.m
209

21-
unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len)
22-
{
23-
return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64);
24-
}
25-
# endif /* CONFIG_PARAVIRT_XXL */
10+
#define PEND(d, m) \
11+
(PSTART(d, m) + sizeof(patch_data_##d.m))
2612

27-
# ifdef CONFIG_PARAVIRT_SPINLOCKS
28-
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
29-
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
30-
# endif
13+
#define PATCH(d, m, ibuf, len) \
14+
paravirt_patch_insns(ibuf, len, PSTART(d, m), PEND(d, m))
3115

32-
#else /* CONFIG_X86_64 */
16+
#define PATCH_CASE(ops, m, data, ibuf, len) \
17+
case PARAVIRT_PATCH(ops.m): \
18+
return PATCH(data, ops##_##m, ibuf, len)
3319

34-
# ifdef CONFIG_PARAVIRT_XXL
35-
DEF_NATIVE(irq, irq_disable, "cli");
36-
DEF_NATIVE(irq, irq_enable, "sti");
37-
DEF_NATIVE(irq, restore_fl, "push %eax; popf");
38-
DEF_NATIVE(irq, save_fl, "pushf; pop %eax");
39-
DEF_NATIVE(cpu, iret, "iret");
40-
DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
41-
DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
42-
DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
20+
#ifdef CONFIG_PARAVIRT_XXL
21+
struct patch_xxl {
22+
const unsigned char irq_irq_disable[1];
23+
const unsigned char irq_irq_enable[1];
24+
const unsigned char irq_restore_fl[2];
25+
const unsigned char irq_save_fl[2];
26+
const unsigned char mmu_read_cr2[3];
27+
const unsigned char mmu_read_cr3[3];
28+
const unsigned char mmu_write_cr3[3];
29+
# ifdef CONFIG_X86_64
30+
const unsigned char cpu_wbinvd[2];
31+
const unsigned char cpu_usergs_sysret64[6];
32+
const unsigned char cpu_swapgs[3];
33+
const unsigned char mov64[3];
34+
# else
35+
const unsigned char cpu_iret[1];
36+
# endif
37+
};
38+
39+
static const struct patch_xxl patch_data_xxl = {
40+
.irq_irq_disable = { 0xfa }, // cli
41+
.irq_irq_enable = { 0xfb }, // sti
42+
.irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
43+
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
44+
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
45+
# ifdef CONFIG_X86_64
46+
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
47+
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
48+
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
49+
.cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
50+
0x48, 0x0f, 0x07 }, // swapgs; sysretq
51+
.cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs
52+
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
53+
# else
54+
.irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf
55+
.mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3
56+
.cpu_iret = { 0xcf }, // iret
57+
# endif
58+
};
4359

4460
unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len)
4561
{
46-
/* arg in %edx:%eax, return in %edx:%eax */
62+
#ifdef CONFIG_X86_64
63+
return PATCH(xxl, mov64, insnbuf, len);
64+
#endif
4765
return 0;
4866
}
4967
# endif /* CONFIG_PARAVIRT_XXL */
5068

51-
# ifdef CONFIG_PARAVIRT_SPINLOCKS
52-
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
53-
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
54-
# endif
69+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
70+
struct patch_lock {
71+
unsigned char queued_spin_unlock[3];
72+
unsigned char vcpu_is_preempted[2];
73+
};
74+
75+
static const struct patch_lock patch_data_lock = {
76+
.vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
5577

56-
#endif /* !CONFIG_X86_64 */
78+
# ifdef CONFIG_X86_64
79+
.queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
80+
# else
81+
.queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
82+
# endif
83+
};
84+
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
5785

5886
unsigned int native_patch(u8 type, void *ibuf, unsigned long addr,
5987
unsigned int len)
6088
{
61-
#define PATCH_SITE(ops, x) \
62-
case PARAVIRT_PATCH(ops.x): \
63-
return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)
64-
6589
switch (type) {
90+
6691
#ifdef CONFIG_PARAVIRT_XXL
67-
PATCH_SITE(irq, restore_fl);
68-
PATCH_SITE(irq, save_fl);
69-
PATCH_SITE(irq, irq_enable);
70-
PATCH_SITE(irq, irq_disable);
92+
PATCH_CASE(irq, restore_fl, xxl, ibuf, len);
93+
PATCH_CASE(irq, save_fl, xxl, ibuf, len);
94+
PATCH_CASE(irq, irq_enable, xxl, ibuf, len);
95+
PATCH_CASE(irq, irq_disable, xxl, ibuf, len);
7196

72-
PATCH_SITE(mmu, read_cr2);
73-
PATCH_SITE(mmu, read_cr3);
74-
PATCH_SITE(mmu, write_cr3);
97+
PATCH_CASE(mmu, read_cr2, xxl, ibuf, len);
98+
PATCH_CASE(mmu, read_cr3, xxl, ibuf, len);
99+
PATCH_CASE(mmu, write_cr3, xxl, ibuf, len);
75100

76101
# ifdef CONFIG_X86_64
77-
PATCH_SITE(cpu, usergs_sysret64);
78-
PATCH_SITE(cpu, swapgs);
79-
PATCH_SITE(cpu, wbinvd);
102+
PATCH_CASE(cpu, usergs_sysret64, xxl, ibuf, len);
103+
PATCH_CASE(cpu, swapgs, xxl, ibuf, len);
104+
PATCH_CASE(cpu, wbinvd, xxl, ibuf, len);
80105
# else
81-
PATCH_SITE(cpu, iret);
106+
PATCH_CASE(cpu, iret, xxl, ibuf, len);
82107
# endif
83108
#endif
84109

85110
#ifdef CONFIG_PARAVIRT_SPINLOCKS
86111
case PARAVIRT_PATCH(lock.queued_spin_unlock):
87112
if (pv_is_native_spin_unlock())
88-
return paravirt_patch_insns(ibuf, len,
89-
start_lock_queued_spin_unlock,
90-
end_lock_queued_spin_unlock);
113+
return PATCH(lock, queued_spin_unlock, ibuf, len);
91114
break;
92115

93116
case PARAVIRT_PATCH(lock.vcpu_is_preempted):
94117
if (pv_is_native_vcpu_is_preempted())
95-
return paravirt_patch_insns(ibuf, len,
96-
start_lock_vcpu_is_preempted,
97-
end_lock_vcpu_is_preempted);
118+
return PATCH(lock, vcpu_is_preempted, ibuf, len);
98119
break;
99120
#endif
100-
101121
default:
102122
break;
103123
}
104-
#undef PATCH_SITE
124+
105125
return paravirt_patch_default(type, ibuf, addr, len);
106126
}

0 commit comments

Comments
 (0)