|
4 | 4 | #include <asm/paravirt.h>
|
5 | 5 | #include <asm/asm-offsets.h>
|
6 | 6 |
|
7 |
| -#ifdef CONFIG_X86_64 |
8 |
| -# ifdef CONFIG_PARAVIRT_XXL |
9 |
| -DEF_NATIVE(irq, irq_disable, "cli"); |
10 |
| -DEF_NATIVE(irq, irq_enable, "sti"); |
11 |
| -DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); |
12 |
| -DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); |
13 |
| -DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); |
14 |
| -DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); |
15 |
| -DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); |
16 |
| -DEF_NATIVE(cpu, wbinvd, "wbinvd"); |
17 |
| -DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); |
18 |
| -DEF_NATIVE(cpu, swapgs, "swapgs"); |
19 |
| -DEF_NATIVE(, mov64, "mov %rdi, %rax"); |
| 7 | +#define PSTART(d, m) \ |
| 8 | + patch_data_##d.m |
20 | 9 |
|
21 |
| -unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len) |
22 |
| -{ |
23 |
| - return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64); |
24 |
| -} |
25 |
| -# endif /* CONFIG_PARAVIRT_XXL */ |
| 10 | +#define PEND(d, m) \ |
| 11 | + (PSTART(d, m) + sizeof(patch_data_##d.m)) |
26 | 12 |
|
27 |
| -# ifdef CONFIG_PARAVIRT_SPINLOCKS |
28 |
| -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); |
29 |
| -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); |
30 |
| -# endif |
| 13 | +#define PATCH(d, m, ibuf, len) \ |
| 14 | + paravirt_patch_insns(ibuf, len, PSTART(d, m), PEND(d, m)) |
31 | 15 |
|
32 |
| -#else /* CONFIG_X86_64 */ |
| 16 | +#define PATCH_CASE(ops, m, data, ibuf, len) \ |
| 17 | + case PARAVIRT_PATCH(ops.m): \ |
| 18 | + return PATCH(data, ops##_##m, ibuf, len) |
33 | 19 |
|
34 |
| -# ifdef CONFIG_PARAVIRT_XXL |
35 |
| -DEF_NATIVE(irq, irq_disable, "cli"); |
36 |
| -DEF_NATIVE(irq, irq_enable, "sti"); |
37 |
| -DEF_NATIVE(irq, restore_fl, "push %eax; popf"); |
38 |
| -DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); |
39 |
| -DEF_NATIVE(cpu, iret, "iret"); |
40 |
| -DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); |
41 |
| -DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); |
42 |
| -DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); |
| 20 | +#ifdef CONFIG_PARAVIRT_XXL |
| 21 | +struct patch_xxl { |
| 22 | + const unsigned char irq_irq_disable[1]; |
| 23 | + const unsigned char irq_irq_enable[1]; |
| 24 | + const unsigned char irq_restore_fl[2]; |
| 25 | + const unsigned char irq_save_fl[2]; |
| 26 | + const unsigned char mmu_read_cr2[3]; |
| 27 | + const unsigned char mmu_read_cr3[3]; |
| 28 | + const unsigned char mmu_write_cr3[3]; |
| 29 | +# ifdef CONFIG_X86_64 |
| 30 | + const unsigned char cpu_wbinvd[2]; |
| 31 | + const unsigned char cpu_usergs_sysret64[6]; |
| 32 | + const unsigned char cpu_swapgs[3]; |
| 33 | + const unsigned char mov64[3]; |
| 34 | +# else |
| 35 | + const unsigned char cpu_iret[1]; |
| 36 | +# endif |
| 37 | +}; |
| 38 | + |
| 39 | +static const struct patch_xxl patch_data_xxl = { |
| 40 | + .irq_irq_disable = { 0xfa }, // cli |
| 41 | + .irq_irq_enable = { 0xfb }, // sti |
| 42 | + .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax |
| 43 | + .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax |
| 44 | + .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax |
| 45 | +# ifdef CONFIG_X86_64 |
| 46 | + .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq |
| 47 | + .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 |
| 48 | + .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd |
| 49 | + .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8, |
| 50 | + 0x48, 0x0f, 0x07 }, // swapgs; sysretq |
| 51 | + .cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs |
| 52 | + .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax |
| 53 | +# else |
| 54 | + .irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf |
| 55 | + .mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3 |
| 56 | + .cpu_iret = { 0xcf }, // iret |
| 57 | +# endif |
| 58 | +}; |
43 | 59 |
|
44 | 60 | unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len)
|
45 | 61 | {
|
46 |
| - /* arg in %edx:%eax, return in %edx:%eax */ |
| 62 | +#ifdef CONFIG_X86_64 |
| 63 | + return PATCH(xxl, mov64, insnbuf, len); |
| 64 | +#endif |
47 | 65 | return 0;
|
48 | 66 | }
|
49 | 67 | # endif /* CONFIG_PARAVIRT_XXL */
|
50 | 68 |
|
51 |
| -# ifdef CONFIG_PARAVIRT_SPINLOCKS |
52 |
| -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); |
53 |
| -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); |
54 |
| -# endif |
| 69 | +#ifdef CONFIG_PARAVIRT_SPINLOCKS |
| 70 | +struct patch_lock { |
| 71 | + unsigned char queued_spin_unlock[3]; |
| 72 | + unsigned char vcpu_is_preempted[2]; |
| 73 | +}; |
| 74 | + |
| 75 | +static const struct patch_lock patch_data_lock = { |
| 76 | + .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax |
55 | 77 |
|
56 |
| -#endif /* !CONFIG_X86_64 */ |
| 78 | +# ifdef CONFIG_X86_64 |
| 79 | + .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi) |
| 80 | +# else |
| 81 | + .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax) |
| 82 | +# endif |
| 83 | +}; |
| 84 | +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
57 | 85 |
|
58 | 86 | unsigned int native_patch(u8 type, void *ibuf, unsigned long addr,
|
59 | 87 | unsigned int len)
|
60 | 88 | {
|
61 |
| -#define PATCH_SITE(ops, x) \ |
62 |
| - case PARAVIRT_PATCH(ops.x): \ |
63 |
| - return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x) |
64 |
| - |
65 | 89 | switch (type) {
|
| 90 | + |
66 | 91 | #ifdef CONFIG_PARAVIRT_XXL
|
67 |
| - PATCH_SITE(irq, restore_fl); |
68 |
| - PATCH_SITE(irq, save_fl); |
69 |
| - PATCH_SITE(irq, irq_enable); |
70 |
| - PATCH_SITE(irq, irq_disable); |
| 92 | + PATCH_CASE(irq, restore_fl, xxl, ibuf, len); |
| 93 | + PATCH_CASE(irq, save_fl, xxl, ibuf, len); |
| 94 | + PATCH_CASE(irq, irq_enable, xxl, ibuf, len); |
| 95 | + PATCH_CASE(irq, irq_disable, xxl, ibuf, len); |
71 | 96 |
|
72 |
| - PATCH_SITE(mmu, read_cr2); |
73 |
| - PATCH_SITE(mmu, read_cr3); |
74 |
| - PATCH_SITE(mmu, write_cr3); |
| 97 | + PATCH_CASE(mmu, read_cr2, xxl, ibuf, len); |
| 98 | + PATCH_CASE(mmu, read_cr3, xxl, ibuf, len); |
| 99 | + PATCH_CASE(mmu, write_cr3, xxl, ibuf, len); |
75 | 100 |
|
76 | 101 | # ifdef CONFIG_X86_64
|
77 |
| - PATCH_SITE(cpu, usergs_sysret64); |
78 |
| - PATCH_SITE(cpu, swapgs); |
79 |
| - PATCH_SITE(cpu, wbinvd); |
| 102 | + PATCH_CASE(cpu, usergs_sysret64, xxl, ibuf, len); |
| 103 | + PATCH_CASE(cpu, swapgs, xxl, ibuf, len); |
| 104 | + PATCH_CASE(cpu, wbinvd, xxl, ibuf, len); |
80 | 105 | # else
|
81 |
| - PATCH_SITE(cpu, iret); |
| 106 | + PATCH_CASE(cpu, iret, xxl, ibuf, len); |
82 | 107 | # endif
|
83 | 108 | #endif
|
84 | 109 |
|
85 | 110 | #ifdef CONFIG_PARAVIRT_SPINLOCKS
|
86 | 111 | case PARAVIRT_PATCH(lock.queued_spin_unlock):
|
87 | 112 | if (pv_is_native_spin_unlock())
|
88 |
| - return paravirt_patch_insns(ibuf, len, |
89 |
| - start_lock_queued_spin_unlock, |
90 |
| - end_lock_queued_spin_unlock); |
| 113 | + return PATCH(lock, queued_spin_unlock, ibuf, len); |
91 | 114 | break;
|
92 | 115 |
|
93 | 116 | case PARAVIRT_PATCH(lock.vcpu_is_preempted):
|
94 | 117 | if (pv_is_native_vcpu_is_preempted())
|
95 |
| - return paravirt_patch_insns(ibuf, len, |
96 |
| - start_lock_vcpu_is_preempted, |
97 |
| - end_lock_vcpu_is_preempted); |
| 118 | + return PATCH(lock, vcpu_is_preempted, ibuf, len); |
98 | 119 | break;
|
99 | 120 | #endif
|
100 |
| - |
101 | 121 | default:
|
102 | 122 | break;
|
103 | 123 | }
|
104 |
| -#undef PATCH_SITE |
| 124 | + |
105 | 125 | return paravirt_patch_default(type, ibuf, addr, len);
|
106 | 126 | }
|
0 commit comments