Skip to content

Commit b6cd27e

Browse files
Stephen HemmingerDavid S. Miller
authored andcommitted
netpoll per device txq
When the netpoll beast got really busy, it tended to clog things, so it stored them for later. But the beast was putting all it's skb's in one basket. This was bad because maybe some pipes were clogged and others were not. Signed-off-by: Stephen Hemminger <[email protected]>
1 parent 93ec2c7 commit b6cd27e

File tree

2 files changed

+17
-35
lines changed

2 files changed

+17
-35
lines changed

include/linux/netpoll.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ struct netpoll_info {
3333
spinlock_t rx_lock;
3434
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
3535
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
36+
struct sk_buff_head txq;
37+
struct work_struct tx_work;
3638
};
3739

3840
void netpoll_poll(struct netpoll *np);

net/core/netpoll.c

Lines changed: 15 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,6 @@
3838

3939
static struct sk_buff_head skb_pool;
4040

41-
static DEFINE_SPINLOCK(queue_lock);
42-
static int queue_depth;
43-
static struct sk_buff *queue_head, *queue_tail;
44-
4541
static atomic_t trapped;
4642

4743
#define NETPOLL_RX_ENABLED 1
@@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb);
5652

5753
static void queue_process(void *p)
5854
{
59-
unsigned long flags;
55+
struct netpoll_info *npinfo = p;
6056
struct sk_buff *skb;
6157

62-
while (queue_head) {
63-
spin_lock_irqsave(&queue_lock, flags);
64-
65-
skb = queue_head;
66-
queue_head = skb->next;
67-
if (skb == queue_tail)
68-
queue_head = NULL;
69-
70-
queue_depth--;
71-
72-
spin_unlock_irqrestore(&queue_lock, flags);
73-
58+
while ((skb = skb_dequeue(&npinfo->txq)))
7459
dev_queue_xmit(skb);
75-
}
76-
}
7760

78-
static DECLARE_WORK(send_queue, queue_process, NULL);
61+
}
7962

8063
void netpoll_queue(struct sk_buff *skb)
8164
{
82-
unsigned long flags;
65+
struct net_device *dev = skb->dev;
66+
struct netpoll_info *npinfo = dev->npinfo;
8367

84-
if (queue_depth == MAX_QUEUE_DEPTH) {
85-
__kfree_skb(skb);
86-
return;
68+
if (!npinfo)
69+
kfree_skb(skb);
70+
else {
71+
skb_queue_tail(&npinfo->txq, skb);
72+
schedule_work(&npinfo->tx_work);
8773
}
88-
89-
spin_lock_irqsave(&queue_lock, flags);
90-
if (!queue_head)
91-
queue_head = skb;
92-
else
93-
queue_tail->next = skb;
94-
queue_tail = skb;
95-
queue_depth++;
96-
spin_unlock_irqrestore(&queue_lock, flags);
97-
98-
schedule_work(&send_queue);
9974
}
10075

10176
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
@@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np)
658633
npinfo->tries = MAX_RETRIES;
659634
spin_lock_init(&npinfo->rx_lock);
660635
skb_queue_head_init(&npinfo->arp_tx);
636+
skb_queue_head_init(&npinfo->txq);
637+
INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
638+
661639
atomic_set(&npinfo->refcnt, 1);
662640
} else {
663641
npinfo = ndev->npinfo;
@@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np)
780758
np->dev->npinfo = NULL;
781759
if (atomic_dec_and_test(&npinfo->refcnt)) {
782760
skb_queue_purge(&npinfo->arp_tx);
761+
skb_queue_purge(&npinfo->txq);
762+
flush_scheduled_work();
783763

784764
kfree(npinfo);
785765
}

0 commit comments

Comments
 (0)