|
34 | 34 | #define MAX_UDP_CHUNK 1460
|
35 | 35 | #define MAX_SKBS 32
|
36 | 36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
|
37 |
| -#define MAX_RETRIES 20000 |
38 | 37 |
|
39 | 38 | static struct sk_buff_head skb_pool;
|
40 | 39 |
|
41 | 40 | static atomic_t trapped;
|
42 | 41 |
|
| 42 | +#define USEC_PER_POLL 50 |
43 | 43 | #define NETPOLL_RX_ENABLED 1
|
44 | 44 | #define NETPOLL_RX_DROP 2
|
45 | 45 |
|
@@ -72,6 +72,7 @@ static void queue_process(void *p)
|
72 | 72 | schedule_delayed_work(&npinfo->tx_work, HZ/10);
|
73 | 73 | return;
|
74 | 74 | }
|
| 75 | + |
75 | 76 | netif_tx_unlock_bh(dev);
|
76 | 77 | }
|
77 | 78 | }
|
@@ -244,50 +245,44 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
|
244 | 245 |
|
245 | 246 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
246 | 247 | {
|
247 |
| - int status; |
248 |
| - struct netpoll_info *npinfo; |
| 248 | + int status = NETDEV_TX_BUSY; |
| 249 | + unsigned long tries; |
| 250 | + struct net_device *dev = np->dev; |
| 251 | + struct netpoll_info *npinfo = np->dev->npinfo; |
| 252 | + |
| 253 | + if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
| 254 | + __kfree_skb(skb); |
| 255 | + return; |
| 256 | + } |
| 257 | + |
| 258 | + /* don't get messages out of order, and no recursion */ |
| 259 | + if ( !(np->drop == netpoll_queue && skb_queue_len(&npinfo->txq)) |
| 260 | + && npinfo->poll_owner != smp_processor_id() |
| 261 | + && netif_tx_trylock(dev)) { |
| 262 | + |
| 263 | + /* try until next clock tick */ |
| 264 | + for(tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { |
| 265 | + if (!netif_queue_stopped(dev)) |
| 266 | + status = dev->hard_start_xmit(skb, dev); |
| 267 | + |
| 268 | + if (status == NETDEV_TX_OK) |
| 269 | + break; |
249 | 270 |
|
250 |
| - if (!np || !np->dev || !netif_running(np->dev)) { |
251 |
| - __kfree_skb(skb); |
252 |
| - return; |
253 |
| - } |
| 271 | + /* tickle device maybe there is some cleanup */ |
| 272 | + netpoll_poll(np); |
254 | 273 |
|
255 |
| - npinfo = np->dev->npinfo; |
| 274 | + udelay(USEC_PER_POLL); |
| 275 | + } |
| 276 | + netif_tx_unlock(dev); |
| 277 | + } |
256 | 278 |
|
257 |
| - /* avoid recursion */ |
258 |
| - if (npinfo->poll_owner == smp_processor_id() || |
259 |
| - np->dev->xmit_lock_owner == smp_processor_id()) { |
| 279 | + if (status != NETDEV_TX_OK) { |
| 280 | + /* requeue for later */ |
260 | 281 | if (np->drop)
|
261 | 282 | np->drop(skb);
|
262 | 283 | else
|
263 | 284 | __kfree_skb(skb);
|
264 |
| - return; |
265 | 285 | }
|
266 |
| - |
267 |
| - do { |
268 |
| - npinfo->tries--; |
269 |
| - netif_tx_lock(np->dev); |
270 |
| - |
271 |
| - /* |
272 |
| - * network drivers do not expect to be called if the queue is |
273 |
| - * stopped. |
274 |
| - */ |
275 |
| - status = NETDEV_TX_BUSY; |
276 |
| - if (!netif_queue_stopped(np->dev)) |
277 |
| - status = np->dev->hard_start_xmit(skb, np->dev); |
278 |
| - |
279 |
| - netif_tx_unlock(np->dev); |
280 |
| - |
281 |
| - /* success */ |
282 |
| - if(!status) { |
283 |
| - npinfo->tries = MAX_RETRIES; /* reset */ |
284 |
| - return; |
285 |
| - } |
286 |
| - |
287 |
| - /* transmit busy */ |
288 |
| - netpoll_poll(np); |
289 |
| - udelay(50); |
290 |
| - } while (npinfo->tries > 0); |
291 | 286 | }
|
292 | 287 |
|
293 | 288 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
|
@@ -649,7 +644,7 @@ int netpoll_setup(struct netpoll *np)
|
649 | 644 | npinfo->rx_np = NULL;
|
650 | 645 | spin_lock_init(&npinfo->poll_lock);
|
651 | 646 | npinfo->poll_owner = -1;
|
652 |
| - npinfo->tries = MAX_RETRIES; |
| 647 | + |
653 | 648 | spin_lock_init(&npinfo->rx_lock);
|
654 | 649 | skb_queue_head_init(&npinfo->arp_tx);
|
655 | 650 | skb_queue_head_init(&npinfo->txq);
|
|
0 commit comments