1
1
// SPDX-License-Identifier: GPL-2.0
2
+ #include <linux/stringify.h>
3
+
2
4
#include <asm/paravirt.h>
3
5
#include <asm/asm-offsets.h>
4
- #include <linux/stringify.h>
5
6
6
- #ifdef CONFIG_PARAVIRT_XXL
7
+ #ifdef CONFIG_X86_64
8
+ # ifdef CONFIG_PARAVIRT_XXL
7
9
DEF_NATIVE (irq , irq_disable , "cli" );
8
10
DEF_NATIVE (irq , irq_enable , "sti" );
9
11
DEF_NATIVE (irq , restore_fl , "pushq %rdi; popfq" );
@@ -12,24 +14,49 @@ DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax");
12
14
DEF_NATIVE (mmu , read_cr3 , "movq %cr3, %rax" );
13
15
DEF_NATIVE (mmu , write_cr3 , "movq %rdi, %cr3" );
14
16
DEF_NATIVE (cpu , wbinvd , "wbinvd" );
15
-
16
17
DEF_NATIVE (cpu , usergs_sysret64 , "swapgs; sysretq" );
17
18
DEF_NATIVE (cpu , swapgs , "swapgs" );
18
19
DEF_NATIVE (, mov64 , "mov %rdi, %rax" );
19
20
20
- unsigned paravirt_patch_ident_64 (void * insnbuf , unsigned len )
21
+ unsigned int paravirt_patch_ident_64 (void * insnbuf , unsigned int len )
21
22
{
22
- return paravirt_patch_insns (insnbuf , len ,
23
- start__mov64 , end__mov64 );
23
+ return paravirt_patch_insns (insnbuf , len , start__mov64 , end__mov64 );
24
24
}
25
- #endif
25
+ # endif /* CONFIG_PARAVIRT_XXL */
26
26
27
- #if defined( CONFIG_PARAVIRT_SPINLOCKS )
27
+ # ifdef CONFIG_PARAVIRT_SPINLOCKS
28
28
DEF_NATIVE (lock , queued_spin_unlock , "movb $0, (%rdi)" );
29
29
DEF_NATIVE (lock , vcpu_is_preempted , "xor %eax, %eax" );
30
- #endif
30
+ # endif
31
+
32
+ #else /* CONFIG_X86_64 */
33
+
34
+ # ifdef CONFIG_PARAVIRT_XXL
35
+ DEF_NATIVE (irq , irq_disable , "cli" );
36
+ DEF_NATIVE (irq , irq_enable , "sti" );
37
+ DEF_NATIVE (irq , restore_fl , "push %eax; popf" );
38
+ DEF_NATIVE (irq , save_fl , "pushf; pop %eax" );
39
+ DEF_NATIVE (cpu , iret , "iret" );
40
+ DEF_NATIVE (mmu , read_cr2 , "mov %cr2, %eax" );
41
+ DEF_NATIVE (mmu , write_cr3 , "mov %eax, %cr3" );
42
+ DEF_NATIVE (mmu , read_cr3 , "mov %cr3, %eax" );
43
+
44
+ unsigned int paravirt_patch_ident_64 (void * insnbuf , unsigned int len )
45
+ {
46
+ /* arg in %edx:%eax, return in %edx:%eax */
47
+ return 0 ;
48
+ }
49
+ # endif /* CONFIG_PARAVIRT_XXL */
50
+
51
+ # ifdef CONFIG_PARAVIRT_SPINLOCKS
52
+ DEF_NATIVE (lock , queued_spin_unlock , "movb $0, (%eax)" );
53
+ DEF_NATIVE (lock , vcpu_is_preempted , "xor %eax, %eax" );
54
+ # endif
55
+
56
+ #endif /* !CONFIG_X86_64 */
31
57
32
- unsigned native_patch (u8 type , void * ibuf , unsigned long addr , unsigned len )
58
+ unsigned int native_patch (u8 type , void * ibuf , unsigned long addr ,
59
+ unsigned int len )
33
60
{
34
61
#define PATCH_SITE (ops , x ) \
35
62
case PARAVIRT_PATCH(ops.x): \
@@ -41,14 +68,21 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
41
68
PATCH_SITE (irq , save_fl );
42
69
PATCH_SITE (irq , irq_enable );
43
70
PATCH_SITE (irq , irq_disable );
44
- PATCH_SITE (cpu , usergs_sysret64 );
45
- PATCH_SITE (cpu , swapgs );
46
- PATCH_SITE (cpu , wbinvd );
71
+
47
72
PATCH_SITE (mmu , read_cr2 );
48
73
PATCH_SITE (mmu , read_cr3 );
49
74
PATCH_SITE (mmu , write_cr3 );
75
+
76
+ # ifdef CONFIG_X86_64
77
+ PATCH_SITE (cpu , usergs_sysret64 );
78
+ PATCH_SITE (cpu , swapgs );
79
+ PATCH_SITE (cpu , wbinvd );
80
+ # else
81
+ PATCH_SITE (cpu , iret );
82
+ # endif
50
83
#endif
51
- #if defined(CONFIG_PARAVIRT_SPINLOCKS )
84
+
85
+ #ifdef CONFIG_PARAVIRT_SPINLOCKS
52
86
case PARAVIRT_PATCH (lock .queued_spin_unlock ):
53
87
if (pv_is_native_spin_unlock ())
54
88
return paravirt_patch_insns (ibuf , len ,
0 commit comments