|
59 | 59 | #include <linux/uaccess.h>
|
60 | 60 | #include <linux/errno.h>
|
61 | 61 | #include <linux/netdevice.h>
|
62 |
| -#include <linux/netpoll.h> |
63 | 62 | #include <linux/inetdevice.h>
|
64 | 63 | #include <linux/igmp.h>
|
65 | 64 | #include <linux/etherdevice.h>
|
@@ -424,15 +423,11 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
|
424 | 423 | {
|
425 | 424 | skb->dev = slave_dev;
|
426 | 425 | skb->priority = 1;
|
427 |
| -#ifdef CONFIG_NET_POLL_CONTROLLER |
428 |
| - if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { |
429 |
| - struct netpoll *np = bond->dev->npinfo->netpoll; |
430 |
| - slave_dev->npinfo = bond->dev->npinfo; |
| 426 | + if (unlikely(netpoll_tx_running(slave_dev))) { |
431 | 427 | slave_dev->priv_flags |= IFF_IN_NETPOLL;
|
432 |
| - netpoll_send_skb_on_dev(np, skb, slave_dev); |
| 428 | + bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); |
433 | 429 | slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
|
434 | 430 | } else
|
435 |
| -#endif |
436 | 431 | dev_queue_xmit(skb);
|
437 | 432 |
|
438 | 433 | return 0;
|
@@ -1288,63 +1283,113 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
|
1288 | 1283 | }
|
1289 | 1284 |
|
1290 | 1285 | #ifdef CONFIG_NET_POLL_CONTROLLER
|
1291 |
| -/* |
1292 |
| - * You must hold read lock on bond->lock before calling this. |
1293 |
| - */ |
1294 |
| -static bool slaves_support_netpoll(struct net_device *bond_dev) |
| 1286 | +static inline int slave_enable_netpoll(struct slave *slave) |
1295 | 1287 | {
|
1296 |
| - struct bonding *bond = netdev_priv(bond_dev); |
1297 |
| - struct slave *slave; |
1298 |
| - int i = 0; |
1299 |
| - bool ret = true; |
| 1288 | + struct netpoll *np; |
| 1289 | + int err = 0; |
1300 | 1290 |
|
1301 |
| - bond_for_each_slave(bond, slave, i) { |
1302 |
| - if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) || |
1303 |
| - !slave->dev->netdev_ops->ndo_poll_controller) |
1304 |
| - ret = false; |
| 1291 | + np = kzalloc(sizeof(*np), GFP_KERNEL); |
| 1292 | + err = -ENOMEM; |
| 1293 | + if (!np) |
| 1294 | + goto out; |
| 1295 | + |
| 1296 | + np->dev = slave->dev; |
| 1297 | + err = __netpoll_setup(np); |
| 1298 | + if (err) { |
| 1299 | + kfree(np); |
| 1300 | + goto out; |
1305 | 1301 | }
|
1306 |
| - return i != 0 && ret; |
| 1302 | + slave->np = np; |
| 1303 | +out: |
| 1304 | + return err; |
| 1305 | +} |
| 1306 | +static inline void slave_disable_netpoll(struct slave *slave) |
| 1307 | +{ |
| 1308 | + struct netpoll *np = slave->np; |
| 1309 | + |
| 1310 | + if (!np) |
| 1311 | + return; |
| 1312 | + |
| 1313 | + slave->np = NULL; |
| 1314 | + synchronize_rcu_bh(); |
| 1315 | + __netpoll_cleanup(np); |
| 1316 | + kfree(np); |
| 1317 | +} |
| 1318 | +static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) |
| 1319 | +{ |
| 1320 | + if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL) |
| 1321 | + return false; |
| 1322 | + if (!slave_dev->netdev_ops->ndo_poll_controller) |
| 1323 | + return false; |
| 1324 | + return true; |
1307 | 1325 | }
|
1308 | 1326 |
|
1309 | 1327 | static void bond_poll_controller(struct net_device *bond_dev)
|
1310 | 1328 | {
|
1311 |
| - struct bonding *bond = netdev_priv(bond_dev); |
| 1329 | +} |
| 1330 | + |
| 1331 | +static void __bond_netpoll_cleanup(struct bonding *bond) |
| 1332 | +{ |
1312 | 1333 | struct slave *slave;
|
1313 | 1334 | int i;
|
1314 | 1335 |
|
1315 |
| - bond_for_each_slave(bond, slave, i) { |
1316 |
| - if (slave->dev && IS_UP(slave->dev)) |
1317 |
| - netpoll_poll_dev(slave->dev); |
1318 |
| - } |
| 1336 | + bond_for_each_slave(bond, slave, i) |
| 1337 | + if (IS_UP(slave->dev)) |
| 1338 | + slave_disable_netpoll(slave); |
1319 | 1339 | }
|
1320 |
| - |
1321 | 1340 | static void bond_netpoll_cleanup(struct net_device *bond_dev)
|
1322 | 1341 | {
|
1323 | 1342 | struct bonding *bond = netdev_priv(bond_dev);
|
| 1343 | + |
| 1344 | + read_lock(&bond->lock); |
| 1345 | + __bond_netpoll_cleanup(bond); |
| 1346 | + read_unlock(&bond->lock); |
| 1347 | +} |
| 1348 | + |
| 1349 | +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) |
| 1350 | +{ |
| 1351 | + struct bonding *bond = netdev_priv(dev); |
1324 | 1352 | struct slave *slave;
|
1325 |
| - const struct net_device_ops *ops; |
1326 |
| - int i; |
| 1353 | + int i, err = 0; |
1327 | 1354 |
|
1328 | 1355 | read_lock(&bond->lock);
|
1329 |
| - bond_dev->npinfo = NULL; |
1330 | 1356 | bond_for_each_slave(bond, slave, i) {
|
1331 |
| - if (slave->dev) { |
1332 |
| - ops = slave->dev->netdev_ops; |
1333 |
| - if (ops->ndo_netpoll_cleanup) |
1334 |
| - ops->ndo_netpoll_cleanup(slave->dev); |
1335 |
| - else |
1336 |
| - slave->dev->npinfo = NULL; |
| 1357 | + if (!IS_UP(slave->dev)) |
| 1358 | + continue; |
| 1359 | + err = slave_enable_netpoll(slave); |
| 1360 | + if (err) { |
| 1361 | + __bond_netpoll_cleanup(bond); |
| 1362 | + break; |
1337 | 1363 | }
|
1338 | 1364 | }
|
1339 | 1365 | read_unlock(&bond->lock);
|
| 1366 | + return err; |
1340 | 1367 | }
|
1341 | 1368 |
|
1342 |
| -#else |
| 1369 | +static struct netpoll_info *bond_netpoll_info(struct bonding *bond) |
| 1370 | +{ |
| 1371 | + return bond->dev->npinfo; |
| 1372 | +} |
1343 | 1373 |
|
| 1374 | +#else |
| 1375 | +static inline int slave_enable_netpoll(struct slave *slave) |
| 1376 | +{ |
| 1377 | + return 0; |
| 1378 | +} |
| 1379 | +static inline void slave_disable_netpoll(struct slave *slave) |
| 1380 | +{ |
| 1381 | +} |
1344 | 1382 | static void bond_netpoll_cleanup(struct net_device *bond_dev)
|
1345 | 1383 | {
|
1346 | 1384 | }
|
1347 |
| - |
| 1385 | +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) |
| 1386 | +{ |
| 1387 | + return 0; |
| 1388 | +} |
| 1389 | +static struct netpoll_info *bond_netpoll_info(struct bonding *bond) |
| 1390 | +{ |
| 1391 | + return NULL; |
| 1392 | +} |
1348 | 1393 | #endif
|
1349 | 1394 |
|
1350 | 1395 | /*---------------------------------- IOCTL ----------------------------------*/
|
@@ -1782,17 +1827,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
1782 | 1827 | bond_set_carrier(bond);
|
1783 | 1828 |
|
1784 | 1829 | #ifdef CONFIG_NET_POLL_CONTROLLER
|
1785 |
| - if (slaves_support_netpoll(bond_dev)) { |
1786 |
| - bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; |
1787 |
| - if (bond_dev->npinfo) |
1788 |
| - slave_dev->npinfo = bond_dev->npinfo; |
1789 |
| - } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { |
1790 |
| - bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
1791 |
| - pr_info("New slave device %s does not support netpoll\n", |
1792 |
| - slave_dev->name); |
1793 |
| - pr_info("Disabling netpoll support for %s\n", bond_dev->name); |
| 1830 | + slave_dev->npinfo = bond_netpoll_info(bond); |
| 1831 | + if (slave_dev->npinfo) { |
| 1832 | + if (slave_enable_netpoll(new_slave)) { |
| 1833 | + read_unlock(&bond->lock); |
| 1834 | + pr_info("Error, %s: master_dev is using netpoll, " |
| 1835 | + "but new slave device does not support netpoll.\n", |
| 1836 | + bond_dev->name); |
| 1837 | + res = -EBUSY; |
| 1838 | + goto err_close; |
| 1839 | + } |
1794 | 1840 | }
|
1795 | 1841 | #endif
|
| 1842 | + |
1796 | 1843 | read_unlock(&bond->lock);
|
1797 | 1844 |
|
1798 | 1845 | res = bond_create_slave_symlinks(bond_dev, slave_dev);
|
@@ -1994,17 +2041,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
|
1994 | 2041 |
|
1995 | 2042 | netdev_set_bond_master(slave_dev, NULL);
|
1996 | 2043 |
|
1997 |
| -#ifdef CONFIG_NET_POLL_CONTROLLER |
1998 |
| - read_lock_bh(&bond->lock); |
1999 |
| - |
2000 |
| - if (slaves_support_netpoll(bond_dev)) |
2001 |
| - bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; |
2002 |
| - read_unlock_bh(&bond->lock); |
2003 |
| - if (slave_dev->netdev_ops->ndo_netpoll_cleanup) |
2004 |
| - slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); |
2005 |
| - else |
2006 |
| - slave_dev->npinfo = NULL; |
2007 |
| -#endif |
| 2044 | + slave_disable_netpoll(slave); |
2008 | 2045 |
|
2009 | 2046 | /* close slave before restoring its mac address */
|
2010 | 2047 | dev_close(slave_dev);
|
@@ -2039,6 +2076,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
|
2039 | 2076 |
|
2040 | 2077 | ret = bond_release(bond_dev, slave_dev);
|
2041 | 2078 | if ((ret == 0) && (bond->slave_cnt == 0)) {
|
| 2079 | + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
2042 | 2080 | pr_info("%s: destroying bond %s.\n",
|
2043 | 2081 | bond_dev->name, bond_dev->name);
|
2044 | 2082 | unregister_netdevice(bond_dev);
|
@@ -2116,6 +2154,8 @@ static int bond_release_all(struct net_device *bond_dev)
|
2116 | 2154 |
|
2117 | 2155 | netdev_set_bond_master(slave_dev, NULL);
|
2118 | 2156 |
|
| 2157 | + slave_disable_netpoll(slave); |
| 2158 | + |
2119 | 2159 | /* close slave before restoring its mac address */
|
2120 | 2160 | dev_close(slave_dev);
|
2121 | 2161 |
|
@@ -4654,6 +4694,7 @@ static const struct net_device_ops bond_netdev_ops = {
|
4654 | 4694 | .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
|
4655 | 4695 | .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
|
4656 | 4696 | #ifdef CONFIG_NET_POLL_CONTROLLER
|
| 4697 | + .ndo_netpoll_setup = bond_netpoll_setup, |
4657 | 4698 | .ndo_netpoll_cleanup = bond_netpoll_cleanup,
|
4658 | 4699 | .ndo_poll_controller = bond_poll_controller,
|
4659 | 4700 | #endif
|
|
0 commit comments