Skip to content

Commit e1bd4d3

Browse files
ebiedermdavem330
authored andcommitted
netpoll: Move all receive processing under CONFIG_NETPOLL_TRAP
Make rx_skb_hook, and rx in struct netpoll depend on CONFIG_NETPOLL_TRAP Make rx_lock, rx_np, and neigh_tx in struct netpoll_info depend on CONFIG_NETPOLL_TRAP Make the functions netpoll_rx_on, netpoll_rx, and netpoll_receive_skb no-ops when CONFIG_NETPOLL_TRAP is not set. Only build netpoll_neigh_reply, checksum_udp service_neigh_queue, pkt_is_ns, and __netpoll_rx when CONFIG_NETPOLL_TRAP is defined. Add helper functions netpoll_trap_setup, netpoll_trap_setup_info, netpoll_trap_cleanup, and netpoll_trap_cleanup_info that initialize and cleanup the struct netpoll and struct netpoll_info receive specific fields when CONFIG_NETPOLL_TRAP is enabled and do nothing otherwise. Signed-off-by: "Eric W. Biederman" <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 18b3753 commit e1bd4d3

File tree

2 files changed

+104
-50
lines changed

2 files changed

+104
-50
lines changed

include/linux/netpoll.h

Lines changed: 40 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -24,32 +24,38 @@ struct netpoll {
2424
struct net_device *dev;
2525
char dev_name[IFNAMSIZ];
2626
const char *name;
27-
void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28-
int offset, int len);
2927

3028
union inet_addr local_ip, remote_ip;
3129
bool ipv6;
3230
u16 local_port, remote_port;
3331
u8 remote_mac[ETH_ALEN];
3432

35-
struct list_head rx; /* rx_np list element */
3633
struct work_struct cleanup_work;
34+
35+
#ifdef CONFIG_NETPOLL_TRAP
36+
void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
37+
int offset, int len);
38+
struct list_head rx; /* rx_np list element */
39+
#endif
3740
};
3841

3942
struct netpoll_info {
4043
atomic_t refcnt;
4144

42-
spinlock_t rx_lock;
4345
struct semaphore dev_lock;
44-
struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
4546

46-
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
4747
struct sk_buff_head txq;
4848

4949
struct delayed_work tx_work;
5050

5151
struct netpoll *netpoll;
5252
struct rcu_head rcu;
53+
54+
#ifdef CONFIG_NETPOLL_TRAP
55+
spinlock_t rx_lock;
56+
struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
57+
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
58+
#endif
5359
};
5460

5561
#ifdef CONFIG_NETPOLL
@@ -68,7 +74,6 @@ int netpoll_setup(struct netpoll *np);
6874
void __netpoll_cleanup(struct netpoll *np);
6975
void __netpoll_free_async(struct netpoll *np);
7076
void netpoll_cleanup(struct netpoll *np);
71-
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
7277
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
7378
struct net_device *dev);
7479
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
@@ -82,25 +87,12 @@ static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
8287
#ifdef CONFIG_NETPOLL_TRAP
8388
int netpoll_trap(void);
8489
void netpoll_set_trap(int trap);
90+
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
8591
static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
8692
{
8793
return !list_empty(&npinfo->rx_np);
8894
}
89-
#else
90-
static inline int netpoll_trap(void)
91-
{
92-
return 0;
93-
}
94-
static inline void netpoll_set_trap(int trap)
95-
{
96-
}
97-
static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
98-
{
99-
return false;
100-
}
101-
#endif
10295

103-
#ifdef CONFIG_NETPOLL
10496
static inline bool netpoll_rx_on(struct sk_buff *skb)
10597
{
10698
struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
@@ -138,6 +130,33 @@ static inline int netpoll_receive_skb(struct sk_buff *skb)
138130
return 0;
139131
}
140132

133+
#else
134+
static inline int netpoll_trap(void)
135+
{
136+
return 0;
137+
}
138+
static inline void netpoll_set_trap(int trap)
139+
{
140+
}
141+
static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
142+
{
143+
return false;
144+
}
145+
static inline bool netpoll_rx(struct sk_buff *skb)
146+
{
147+
return false;
148+
}
149+
static inline bool netpoll_rx_on(struct sk_buff *skb)
150+
{
151+
return false;
152+
}
153+
static inline int netpoll_receive_skb(struct sk_buff *skb)
154+
{
155+
return 0;
156+
}
157+
#endif
158+
159+
#ifdef CONFIG_NETPOLL
141160
static inline void *netpoll_poll_lock(struct napi_struct *napi)
142161
{
143162
struct net_device *dev = napi->dev;
@@ -166,18 +185,6 @@ static inline bool netpoll_tx_running(struct net_device *dev)
166185
}
167186

168187
#else
169-
static inline bool netpoll_rx(struct sk_buff *skb)
170-
{
171-
return false;
172-
}
173-
static inline bool netpoll_rx_on(struct sk_buff *skb)
174-
{
175-
return false;
176-
}
177-
static inline int netpoll_receive_skb(struct sk_buff *skb)
178-
{
179-
return 0;
180-
}
181188
static inline void *netpoll_poll_lock(struct napi_struct *napi)
182189
{
183190
return NULL;

net/core/netpoll.c

Lines changed: 64 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ static struct sk_buff_head skb_pool;
4848

4949
#ifdef CONFIG_NETPOLL_TRAP
5050
static atomic_t trapped;
51+
static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
5152
#endif
5253

5354
DEFINE_STATIC_SRCU(netpoll_srcu);
@@ -61,7 +62,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu);
6162
MAX_UDP_CHUNK)
6263

6364
static void zap_completion_queue(void);
64-
static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
6565
static void netpoll_async_cleanup(struct work_struct *work);
6666

6767
static unsigned int carrier_timeout = 4;
@@ -109,6 +109,7 @@ static void queue_process(struct work_struct *work)
109109
}
110110
}
111111

112+
#ifdef CONFIG_NETPOLL_TRAP
112113
static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
113114
unsigned short ulen, __be32 saddr, __be32 daddr)
114115
{
@@ -127,6 +128,7 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
127128

128129
return __skb_checksum_complete(skb);
129130
}
131+
#endif /* CONFIG_NETPOLL_TRAP */
130132

131133
/*
132134
* Check whether delayed processing was scheduled for our NIC. If so,
@@ -179,6 +181,7 @@ static void poll_napi(struct net_device *dev, int budget)
179181
}
180182
}
181183

184+
#ifdef CONFIG_NETPOLL_TRAP
182185
static void service_neigh_queue(struct net_device *dev,
183186
struct netpoll_info *npi)
184187
{
@@ -197,6 +200,12 @@ static void service_neigh_queue(struct net_device *dev,
197200
while ((skb = skb_dequeue(&npi->neigh_tx)))
198201
netpoll_neigh_reply(skb, npi);
199202
}
203+
#else /* !CONFIG_NETPOLL_TRAP */
204+
static inline void service_neigh_queue(struct net_device *dev,
205+
struct netpoll_info *npi)
206+
{
207+
}
208+
#endif /* CONFIG_NETPOLL_TRAP */
200209

201210
static void netpoll_poll_dev(struct net_device *dev)
202211
{
@@ -522,6 +531,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
522531
}
523532
EXPORT_SYMBOL(netpoll_send_udp);
524533

534+
#ifdef CONFIG_NETPOLL_TRAP
525535
static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
526536
{
527537
int size, type = ARPOP_REPLY;
@@ -900,6 +910,55 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
900910
return 0;
901911
}
902912

913+
static void netpoll_trap_setup_info(struct netpoll_info *npinfo)
914+
{
915+
INIT_LIST_HEAD(&npinfo->rx_np);
916+
spin_lock_init(&npinfo->rx_lock);
917+
skb_queue_head_init(&npinfo->neigh_tx);
918+
}
919+
920+
static void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
921+
{
922+
skb_queue_purge(&npinfo->neigh_tx);
923+
}
924+
925+
static void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
926+
{
927+
unsigned long flags;
928+
if (np->rx_skb_hook) {
929+
spin_lock_irqsave(&npinfo->rx_lock, flags);
930+
list_add_tail(&np->rx, &npinfo->rx_np);
931+
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
932+
}
933+
}
934+
935+
static void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
936+
{
937+
unsigned long flags;
938+
if (!list_empty(&npinfo->rx_np)) {
939+
spin_lock_irqsave(&npinfo->rx_lock, flags);
940+
list_del(&np->rx);
941+
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
942+
}
943+
}
944+
945+
#else /* !CONFIG_NETPOLL_TRAP */
946+
static inline void netpoll_trap_setup_info(struct netpoll_info *npinfo)
947+
{
948+
}
949+
static inline void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
950+
{
951+
}
952+
static inline
953+
void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
954+
{
955+
}
956+
static inline
957+
void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
958+
{
959+
}
960+
#endif /* CONFIG_NETPOLL_TRAP */
961+
903962
void netpoll_print_options(struct netpoll *np)
904963
{
905964
np_info(np, "local port %d\n", np->local_port);
@@ -1023,7 +1082,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
10231082
{
10241083
struct netpoll_info *npinfo;
10251084
const struct net_device_ops *ops;
1026-
unsigned long flags;
10271085
int err;
10281086

10291087
np->dev = ndev;
@@ -1045,11 +1103,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
10451103
goto out;
10461104
}
10471105

1048-
INIT_LIST_HEAD(&npinfo->rx_np);
1106+
netpoll_trap_setup_info(npinfo);
10491107

1050-
spin_lock_init(&npinfo->rx_lock);
10511108
sema_init(&npinfo->dev_lock, 1);
1052-
skb_queue_head_init(&npinfo->neigh_tx);
10531109
skb_queue_head_init(&npinfo->txq);
10541110
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
10551111

@@ -1068,11 +1124,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
10681124

10691125
npinfo->netpoll = np;
10701126

1071-
if (np->rx_skb_hook) {
1072-
spin_lock_irqsave(&npinfo->rx_lock, flags);
1073-
list_add_tail(&np->rx, &npinfo->rx_np);
1074-
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1075-
}
1127+
netpoll_trap_setup(np, npinfo);
10761128

10771129
/* last thing to do is link it to the net device structure */
10781130
rcu_assign_pointer(ndev->npinfo, npinfo);
@@ -1222,7 +1274,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
12221274
struct netpoll_info *npinfo =
12231275
container_of(rcu_head, struct netpoll_info, rcu);
12241276

1225-
skb_queue_purge(&npinfo->neigh_tx);
1277+
netpoll_trap_cleanup_info(npinfo);
12261278
skb_queue_purge(&npinfo->txq);
12271279

12281280
/* we can't call cancel_delayed_work_sync here, as we are in softirq */
@@ -1238,7 +1290,6 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
12381290
void __netpoll_cleanup(struct netpoll *np)
12391291
{
12401292
struct netpoll_info *npinfo;
1241-
unsigned long flags;
12421293

12431294
/* rtnl_dereference would be preferable here but
12441295
* rcu_cleanup_netpoll path can put us in here safely without
@@ -1248,11 +1299,7 @@ void __netpoll_cleanup(struct netpoll *np)
12481299
if (!npinfo)
12491300
return;
12501301

1251-
if (!list_empty(&npinfo->rx_np)) {
1252-
spin_lock_irqsave(&npinfo->rx_lock, flags);
1253-
list_del(&np->rx);
1254-
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1255-
}
1302+
netpoll_trap_cleanup(np, npinfo);
12561303

12571304
synchronize_srcu(&netpoll_srcu);
12581305

0 commit comments

Comments
 (0)