Skip to content

Commit 8e7d925

Browse files
committed
Merge branch 'net-mana-big-tcp'
Shradha Gupta says: ==================== net: Enable Big TCP for MANA devices Allow the max gso/gro aggregated pkt size to go up to GSO_MAX_SIZE for MANA NIC. On Azure, this not possible without allowing the same for netvsc NIC (as the NICs are bonded together). Therefore, we use netif_set_tso_max_size() to set max aggregated pkt size to VF's tso_max_size for netvsc too, when the data path is switched over to the VF The first patch allows MANA to configure aggregated pkt size of up-to GSO_MAX_SIZE The second patch enables the same on the netvsc NIC, if the data path for the bonded NIC is switched to the VF --- Changes in v3 * Add ipv6_hopopt_jumbo_remove() while sending Big TCP packets --- Changes in v2 * Instead of using 'tcp segment' throughout the patch used the words 'aggregated pkt size' ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents aefd232 + 6859209 commit 8e7d925

File tree

4 files changed

+29
-6
lines changed

4 files changed

+29
-6
lines changed

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
256256
if (skb_cow_head(skb, MANA_HEADROOM))
257257
goto tx_drop_count;
258258

259+
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
260+
goto tx_drop_count;
261+
259262
txq = &apc->tx_qp[txq_idx].txq;
260263
gdma_sq = txq->gdma_sq;
261264
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -2873,6 +2876,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
28732876
ndev->dev_port = port_idx;
28742877
SET_NETDEV_DEV(ndev, gc->dev);
28752878

2879+
netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
2880+
28762881
netif_carrier_off(ndev);
28772882

28782883
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);

drivers/net/hyperv/hyperv_net.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1166,6 +1166,8 @@ struct netvsc_device {
11661166
u32 max_chn;
11671167
u32 num_chn;
11681168

1169+
u32 netvsc_gso_max_size;
1170+
11691171
atomic_t open_chn;
11701172
struct work_struct subchan_work;
11711173
wait_queue_head_t subchan_open;

drivers/net/hyperv/netvsc_drv.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2461,6 +2461,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
24612461
} else {
24622462
netdev_info(ndev, "Data path switched %s VF: %s\n",
24632463
vf_is_up ? "to" : "from", vf_netdev->name);
2464+
2465+
/* In Azure, when accelerated networking in enabled, other NICs
2466+
* like MANA, MLX, are configured as a bonded nic with
2467+
* Netvsc(failover) NIC. For bonded NICs, the min of the max
2468+
* pkt aggregate size of the members is propagated in the stack.
2469+
* In order to allow these NICs (MANA/MLX) to use up to
2470+
* GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to
2471+
* also support this in the guest.
2472+
* This value is only increased for netvsc NIC when datapath is
2473+
* switched over to the VF
2474+
*/
2475+
if (vf_is_up)
2476+
netif_set_tso_max_size(ndev, vf_netdev->tso_max_size);
2477+
else
2478+
netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size);
24642479
}
24652480

24662481
return NOTIFY_OK;

drivers/net/hyperv/rndis_filter.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1356,9 +1356,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
13561356
struct net_device_context *net_device_ctx = netdev_priv(net);
13571357
struct ndis_offload hwcaps;
13581358
struct ndis_offload_params offloads;
1359-
unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
13601359
int ret;
13611360

1361+
nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;
1362+
13621363
/* Find HW offload capabilities */
13631364
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
13641365
if (ret != 0)
@@ -1390,8 +1391,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
13901391
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
13911392
net->hw_features |= NETIF_F_TSO;
13921393

1393-
if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1394-
gso_max_size = hwcaps.lsov2.ip4_maxsz;
1394+
if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
1395+
nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
13951396
}
13961397

13971398
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
@@ -1411,8 +1412,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
14111412
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
14121413
net->hw_features |= NETIF_F_TSO6;
14131414

1414-
if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1415-
gso_max_size = hwcaps.lsov2.ip6_maxsz;
1415+
if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
1416+
nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
14161417
}
14171418

14181419
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
@@ -1438,7 +1439,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
14381439
*/
14391440
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
14401441

1441-
netif_set_tso_max_size(net, gso_max_size);
1442+
netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);
14421443

14431444
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
14441445

0 commit comments

Comments
 (0)