Skip to content

Commit a1bcfac

Browse files
Stephen HemmingerDavid S. Miller
authored andcommitted
netpoll: private skb pool (rev3)
It was a dark and stormy night when Steve first saw the netpoll beast. The beast was odd, and misshapen but not extremely ugly. "Let me take off one of your warts" he said. This wart is where you tried to make an skb list yourself. If the beast had ever run out of memory, he would have stupefied himself unnecessarily. The first try was painful, so he tried again till the bleeding stopped. And again, and again... Signed-off-by: Stephen Hemminger <[email protected]>
1 parent d23ca15 commit a1bcfac

File tree

1 file changed

+21
-32
lines changed

1 file changed

+21
-32
lines changed

net/core/netpoll.c

Lines changed: 21 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,7 @@
3636
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
3737
#define MAX_RETRIES 20000
3838

39-
static DEFINE_SPINLOCK(skb_list_lock);
40-
static int nr_skbs;
41-
static struct sk_buff *skbs;
39+
static struct sk_buff_head skb_pool;
4240

4341
static DEFINE_SPINLOCK(queue_lock);
4442
static int queue_depth;
@@ -190,17 +188,15 @@ static void refill_skbs(void)
190188
struct sk_buff *skb;
191189
unsigned long flags;
192190

193-
spin_lock_irqsave(&skb_list_lock, flags);
194-
while (nr_skbs < MAX_SKBS) {
191+
spin_lock_irqsave(&skb_pool.lock, flags);
192+
while (skb_pool.qlen < MAX_SKBS) {
195193
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
196194
if (!skb)
197195
break;
198196

199-
skb->next = skbs;
200-
skbs = skb;
201-
nr_skbs++;
197+
__skb_queue_tail(&skb_pool, skb);
202198
}
203-
spin_unlock_irqrestore(&skb_list_lock, flags);
199+
spin_unlock_irqrestore(&skb_pool.lock, flags);
204200
}
205201

206202
static void zap_completion_queue(void)
@@ -229,38 +225,25 @@ static void zap_completion_queue(void)
229225
put_cpu_var(softnet_data);
230226
}
231227

232-
static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
228+
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
233229
{
234-
int once = 1, count = 0;
235-
unsigned long flags;
236-
struct sk_buff *skb = NULL;
230+
int count = 0;
231+
struct sk_buff *skb;
237232

238233
zap_completion_queue();
234+
refill_skbs();
239235
repeat:
240-
if (nr_skbs < MAX_SKBS)
241-
refill_skbs();
242236

243237
skb = alloc_skb(len, GFP_ATOMIC);
244-
245-
if (!skb) {
246-
spin_lock_irqsave(&skb_list_lock, flags);
247-
skb = skbs;
248-
if (skb) {
249-
skbs = skb->next;
250-
skb->next = NULL;
251-
nr_skbs--;
252-
}
253-
spin_unlock_irqrestore(&skb_list_lock, flags);
254-
}
238+
if (!skb)
239+
skb = skb_dequeue(&skb_pool);
255240

256241
if(!skb) {
257-
count++;
258-
if (once && (count == 1000000)) {
259-
printk("out of netpoll skbs!\n");
260-
once = 0;
242+
if (++count < 10) {
243+
netpoll_poll(np);
244+
goto repeat;
261245
}
262-
netpoll_poll(np);
263-
goto repeat;
246+
return NULL;
264247
}
265248

266249
atomic_set(&skb->users, 1);
@@ -770,6 +753,12 @@ int netpoll_setup(struct netpoll *np)
770753
return -1;
771754
}
772755

756+
static int __init netpoll_init(void) {
757+
skb_queue_head_init(&skb_pool);
758+
return 0;
759+
}
760+
core_initcall(netpoll_init);
761+
773762
void netpoll_cleanup(struct netpoll *np)
774763
{
775764
struct netpoll_info *npinfo;

0 commit comments

Comments
 (0)