|
36 | 36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
|
37 | 37 | #define MAX_RETRIES 20000
|
38 | 38 |
|
39 |
| -static DEFINE_SPINLOCK(skb_list_lock); |
40 |
| -static int nr_skbs; |
41 |
| -static struct sk_buff *skbs; |
| 39 | +static struct sk_buff_head skb_pool; |
42 | 40 |
|
43 | 41 | static DEFINE_SPINLOCK(queue_lock);
|
44 | 42 | static int queue_depth;
|
@@ -190,17 +188,15 @@ static void refill_skbs(void)
|
190 | 188 | struct sk_buff *skb;
|
191 | 189 | unsigned long flags;
|
192 | 190 |
|
193 |
| - spin_lock_irqsave(&skb_list_lock, flags); |
194 |
| - while (nr_skbs < MAX_SKBS) { |
| 191 | + spin_lock_irqsave(&skb_pool.lock, flags); |
| 192 | + while (skb_pool.qlen < MAX_SKBS) { |
195 | 193 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
|
196 | 194 | if (!skb)
|
197 | 195 | break;
|
198 | 196 |
|
199 |
| - skb->next = skbs; |
200 |
| - skbs = skb; |
201 |
| - nr_skbs++; |
| 197 | + __skb_queue_tail(&skb_pool, skb); |
202 | 198 | }
|
203 |
| - spin_unlock_irqrestore(&skb_list_lock, flags); |
| 199 | + spin_unlock_irqrestore(&skb_pool.lock, flags); |
204 | 200 | }
|
205 | 201 |
|
206 | 202 | static void zap_completion_queue(void)
|
@@ -229,38 +225,25 @@ static void zap_completion_queue(void)
|
229 | 225 | put_cpu_var(softnet_data);
|
230 | 226 | }
|
231 | 227 |
|
232 |
| -static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) |
| 228 | +static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
233 | 229 | {
|
234 |
| - int once = 1, count = 0; |
235 |
| - unsigned long flags; |
236 |
| - struct sk_buff *skb = NULL; |
| 230 | + int count = 0; |
| 231 | + struct sk_buff *skb; |
237 | 232 |
|
238 | 233 | zap_completion_queue();
|
| 234 | + refill_skbs(); |
239 | 235 | repeat:
|
240 |
| - if (nr_skbs < MAX_SKBS) |
241 |
| - refill_skbs(); |
242 | 236 |
|
243 | 237 | skb = alloc_skb(len, GFP_ATOMIC);
|
244 |
| - |
245 |
| - if (!skb) { |
246 |
| - spin_lock_irqsave(&skb_list_lock, flags); |
247 |
| - skb = skbs; |
248 |
| - if (skb) { |
249 |
| - skbs = skb->next; |
250 |
| - skb->next = NULL; |
251 |
| - nr_skbs--; |
252 |
| - } |
253 |
| - spin_unlock_irqrestore(&skb_list_lock, flags); |
254 |
| - } |
| 238 | + if (!skb) |
| 239 | + skb = skb_dequeue(&skb_pool); |
255 | 240 |
|
256 | 241 | if(!skb) {
|
257 |
| - count++; |
258 |
| - if (once && (count == 1000000)) { |
259 |
| - printk("out of netpoll skbs!\n"); |
260 |
| - once = 0; |
| 242 | + if (++count < 10) { |
| 243 | + netpoll_poll(np); |
| 244 | + goto repeat; |
261 | 245 | }
|
262 |
| - netpoll_poll(np); |
263 |
| - goto repeat; |
| 246 | + return NULL; |
264 | 247 | }
|
265 | 248 |
|
266 | 249 | atomic_set(&skb->users, 1);
|
@@ -770,6 +753,12 @@ int netpoll_setup(struct netpoll *np)
|
770 | 753 | return -1;
|
771 | 754 | }
|
772 | 755 |
|
| 756 | +static int __init netpoll_init(void) { |
| 757 | + skb_queue_head_init(&skb_pool); |
| 758 | + return 0; |
| 759 | +} |
| 760 | +core_initcall(netpoll_init); |
| 761 | + |
773 | 762 | void netpoll_cleanup(struct netpoll *np)
|
774 | 763 | {
|
775 | 764 | struct netpoll_info *npinfo;
|
|
0 commit comments