38
38
39
39
static struct sk_buff_head skb_pool ;
40
40
41
- static DEFINE_SPINLOCK (queue_lock );
42
- static int queue_depth ;
43
- static struct sk_buff * queue_head , * queue_tail ;
44
-
45
41
static atomic_t trapped ;
46
42
47
43
#define NETPOLL_RX_ENABLED 1
@@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb);
56
52
57
53
static void queue_process (void * p )
58
54
{
59
- unsigned long flags ;
55
+ struct netpoll_info * npinfo = p ;
60
56
struct sk_buff * skb ;
61
57
62
- while (queue_head ) {
63
- spin_lock_irqsave (& queue_lock , flags );
64
-
65
- skb = queue_head ;
66
- queue_head = skb -> next ;
67
- if (skb == queue_tail )
68
- queue_head = NULL ;
69
-
70
- queue_depth -- ;
71
-
72
- spin_unlock_irqrestore (& queue_lock , flags );
73
-
58
+ while ((skb = skb_dequeue (& npinfo -> txq )))
74
59
dev_queue_xmit (skb );
75
- }
76
- }
77
60
78
- static DECLARE_WORK ( send_queue , queue_process , NULL) ;
61
+ }
79
62
80
63
void netpoll_queue (struct sk_buff * skb )
81
64
{
82
- unsigned long flags ;
65
+ struct net_device * dev = skb -> dev ;
66
+ struct netpoll_info * npinfo = dev -> npinfo ;
83
67
84
- if (queue_depth == MAX_QUEUE_DEPTH ) {
85
- __kfree_skb (skb );
86
- return ;
68
+ if (!npinfo )
69
+ kfree_skb (skb );
70
+ else {
71
+ skb_queue_tail (& npinfo -> txq , skb );
72
+ schedule_work (& npinfo -> tx_work );
87
73
}
88
-
89
- spin_lock_irqsave (& queue_lock , flags );
90
- if (!queue_head )
91
- queue_head = skb ;
92
- else
93
- queue_tail -> next = skb ;
94
- queue_tail = skb ;
95
- queue_depth ++ ;
96
- spin_unlock_irqrestore (& queue_lock , flags );
97
-
98
- schedule_work (& send_queue );
99
74
}
100
75
101
76
static int checksum_udp (struct sk_buff * skb , struct udphdr * uh ,
@@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np)
658
633
npinfo -> tries = MAX_RETRIES ;
659
634
spin_lock_init (& npinfo -> rx_lock );
660
635
skb_queue_head_init (& npinfo -> arp_tx );
636
+ skb_queue_head_init (& npinfo -> txq );
637
+ INIT_WORK (& npinfo -> tx_work , queue_process , npinfo );
638
+
661
639
atomic_set (& npinfo -> refcnt , 1 );
662
640
} else {
663
641
npinfo = ndev -> npinfo ;
@@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np)
780
758
np -> dev -> npinfo = NULL ;
781
759
if (atomic_dec_and_test (& npinfo -> refcnt )) {
782
760
skb_queue_purge (& npinfo -> arp_tx );
761
+ skb_queue_purge (& npinfo -> txq );
762
+ flush_scheduled_work ();
783
763
784
764
kfree (npinfo );
785
765
}
0 commit comments