5
5
#include <linux/spinlock.h>
6
6
#include <asm/unaccepted_memory.h>
7
7
8
- /* Protects unaccepted memory bitmap */
8
+ /* Protects unaccepted memory bitmap and accepting_list */
9
9
static DEFINE_SPINLOCK (unaccepted_memory_lock );
10
10
11
+ struct accept_range {
12
+ struct list_head list ;
13
+ unsigned long start ;
14
+ unsigned long end ;
15
+ };
16
+
17
+ static LIST_HEAD (accepting_list );
18
+
11
19
/*
12
20
* accept_memory() -- Consult bitmap and accept the memory if needed.
13
21
*
@@ -24,6 +32,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
24
32
{
25
33
struct efi_unaccepted_memory * unaccepted ;
26
34
unsigned long range_start , range_end ;
35
+ struct accept_range range , * entry ;
27
36
unsigned long flags ;
28
37
u64 unit_size ;
29
38
@@ -78,20 +87,67 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
78
87
if (end > unaccepted -> size * unit_size * BITS_PER_BYTE )
79
88
end = unaccepted -> size * unit_size * BITS_PER_BYTE ;
80
89
81
- range_start = start / unit_size ;
82
-
90
+ range .start = start / unit_size ;
91
+ range .end = DIV_ROUND_UP (end , unit_size );
92
+ retry :
83
93
spin_lock_irqsave (& unaccepted_memory_lock , flags );
94
+
95
+ /*
96
+ * Check if anybody works on accepting the same range of the memory.
97
+ *
98
+ * The check is done with unit_size granularity. It is crucial to catch
99
+ * all accept requests to the same unit_size block, even if they don't
100
+ * overlap on physical address level.
101
+ */
102
+ list_for_each_entry (entry , & accepting_list , list ) {
103
+ if (entry -> end < range .start )
104
+ continue ;
105
+ if (entry -> start >= range .end )
106
+ continue ;
107
+
108
+ /*
109
+ * Somebody else accepting the range. Or at least part of it.
110
+ *
111
+ * Drop the lock and retry until it is complete.
112
+ */
113
+ spin_unlock_irqrestore (& unaccepted_memory_lock , flags );
114
+ goto retry ;
115
+ }
116
+
117
+ /*
118
+ * Register that the range is about to be accepted.
119
+ * Make sure nobody else will accept it.
120
+ */
121
+ list_add (& range .list , & accepting_list );
122
+
123
+ range_start = range .start ;
84
124
for_each_set_bitrange_from (range_start , range_end , unaccepted -> bitmap ,
85
- DIV_ROUND_UP ( end , unit_size ) ) {
125
+ range . end ) {
86
126
unsigned long phys_start , phys_end ;
87
127
unsigned long len = range_end - range_start ;
88
128
89
129
phys_start = range_start * unit_size + unaccepted -> phys_base ;
90
130
phys_end = range_end * unit_size + unaccepted -> phys_base ;
91
131
132
+ /*
133
+ * Keep interrupts disabled until the accept operation is
134
+ * complete in order to prevent deadlocks.
135
+ *
136
+ * Enabling interrupts before calling arch_accept_memory()
137
+ * creates an opportunity for an interrupt handler to request
138
+ * acceptance for the same memory. The handler will continuously
139
+ * spin with interrupts disabled, preventing other task from
140
+ * making progress with the acceptance process.
141
+ */
142
+ spin_unlock (& unaccepted_memory_lock );
143
+
92
144
arch_accept_memory (phys_start , phys_end );
145
+
146
+ spin_lock (& unaccepted_memory_lock );
93
147
bitmap_clear (unaccepted -> bitmap , range_start , len );
94
148
}
149
+
150
+ list_del (& range .list );
95
151
spin_unlock_irqrestore (& unaccepted_memory_lock , flags );
96
152
}
97
153
0 commit comments