Skip to content

Commit a84a8ab

Browse files
committed
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix divide by zero in mlx5, from Talut Batheesh. 2) Guard against invalid GSO packets coming from untrusted guests and arriving in qdisc_pkt_len_init(), from Eric Dumazet. 3) Similarly add such protection to the various protocol GSO handlers. From Willem de Bruijn. 4) Fix regression added to IGMP source address checking for IGMPv3 reports, from Felix Feitkau. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: tls: Correct length of scatterlist in tls_sw_sendpage be2net: restore properly promisc mode after queues reconfiguration net: igmp: fix source address check for IGMPv3 reports gso: validate gso_type in GSO handlers net: qdisc_pkt_len_init() should be more robust ibmvnic: Allocate and request vpd in init_resources ibmvnic: Revert to previous mtu when unsupported value requested ibmvnic: Modify buffer size and number of queues on failover rds: tcp: compute m_ack_seq as offset from ->write_seq usbnet: silence an unnecessary warning cxgb4: fix endianness for vlan value in cxgb4_tc_flower cxgb4: set filter type to 1 for ETH_P_IPV6 net/mlx5e: Fix fixpoint divide exception in mlx5e_am_stats_compare
2 parents 1995266 + 7a8c4dd commit a84a8ab

File tree

19 files changed

+128
-32
lines changed

19 files changed

+128
-32
lines changed

drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
111111
ethtype_mask = 0;
112112
}
113113

114+
if (ethtype_key == ETH_P_IPV6)
115+
fs->type = 1;
116+
114117
fs->val.ethtype = ethtype_key;
115118
fs->mask.ethtype = ethtype_mask;
116119
fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
205208
VLAN_PRIO_SHIFT);
206209
vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
207210
VLAN_PRIO_SHIFT);
208-
fs->val.ivlan = cpu_to_be16(vlan_tci);
209-
fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
211+
fs->val.ivlan = vlan_tci;
212+
fs->mask.ivlan = vlan_tci_mask;
210213

211214
/* Chelsio adapters use ivlan_vld bit to match vlan packets
212215
* as 802.1Q. Also, when vlan tag is present in packets,

drivers/net/ethernet/emulex/benet/be_main.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
46344634

46354635
be_schedule_worker(adapter);
46364636

4637+
/*
4638+
* The IF was destroyed and re-created. We need to clear
4639+
* all promiscuous flags valid for the destroyed IF.
4640+
* Without this promisc mode is not restored during
4641+
* be_open() because the driver thinks that it is
4642+
* already enabled in HW.
4643+
*/
4644+
adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4645+
46374646
if (netif_running(netdev))
46384647
status = be_open(netdev);
46394648

drivers/net/ethernet/ibm/ibmvnic.c

Lines changed: 58 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -410,14 +410,28 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
410410
struct ibmvnic_rx_pool *rx_pool;
411411
int rx_scrqs;
412412
int i, j, rc;
413+
u64 *size_array;
414+
415+
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
416+
be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
413417

414418
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
415419
for (i = 0; i < rx_scrqs; i++) {
416420
rx_pool = &adapter->rx_pool[i];
417421

418422
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
419423

420-
rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
424+
if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
425+
free_long_term_buff(adapter, &rx_pool->long_term_buff);
426+
rx_pool->buff_size = be64_to_cpu(size_array[i]);
427+
alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
428+
rx_pool->size *
429+
rx_pool->buff_size);
430+
} else {
431+
rc = reset_long_term_buff(adapter,
432+
&rx_pool->long_term_buff);
433+
}
434+
421435
if (rc)
422436
return rc;
423437

@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
439453
static void release_rx_pools(struct ibmvnic_adapter *adapter)
440454
{
441455
struct ibmvnic_rx_pool *rx_pool;
442-
int rx_scrqs;
443456
int i, j;
444457

445458
if (!adapter->rx_pool)
446459
return;
447460

448-
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
449-
for (i = 0; i < rx_scrqs; i++) {
461+
for (i = 0; i < adapter->num_active_rx_pools; i++) {
450462
rx_pool = &adapter->rx_pool[i];
451463

452464
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
469481

470482
kfree(adapter->rx_pool);
471483
adapter->rx_pool = NULL;
484+
adapter->num_active_rx_pools = 0;
472485
}
473486

474487
static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
493506
return -1;
494507
}
495508

509+
adapter->num_active_rx_pools = 0;
510+
496511
for (i = 0; i < rxadd_subcrqs; i++) {
497512
rx_pool = &adapter->rx_pool[i];
498513

@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
536551
rx_pool->next_free = 0;
537552
}
538553

554+
adapter->num_active_rx_pools = rxadd_subcrqs;
555+
539556
return 0;
540557
}
541558

@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
586603
static void release_tx_pools(struct ibmvnic_adapter *adapter)
587604
{
588605
struct ibmvnic_tx_pool *tx_pool;
589-
int i, tx_scrqs;
606+
int i;
590607

591608
if (!adapter->tx_pool)
592609
return;
593610

594-
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
595-
for (i = 0; i < tx_scrqs; i++) {
611+
for (i = 0; i < adapter->num_active_tx_pools; i++) {
596612
netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
597613
tx_pool = &adapter->tx_pool[i];
598614
kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
603619

604620
kfree(adapter->tx_pool);
605621
adapter->tx_pool = NULL;
622+
adapter->num_active_tx_pools = 0;
606623
}
607624

608625
static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
619636
if (!adapter->tx_pool)
620637
return -1;
621638

639+
adapter->num_active_tx_pools = 0;
640+
622641
for (i = 0; i < tx_subcrqs; i++) {
623642
tx_pool = &adapter->tx_pool[i];
624643

@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
666685
tx_pool->producer_index = 0;
667686
}
668687

688+
adapter->num_active_tx_pools = tx_subcrqs;
689+
669690
return 0;
670691
}
671692

@@ -860,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
860881
if (adapter->vpd->buff)
861882
len = adapter->vpd->len;
862883

863-
reinit_completion(&adapter->fw_done);
884+
init_completion(&adapter->fw_done);
864885
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
865886
crq.get_vpd_size.cmd = GET_VPD_SIZE;
866887
ibmvnic_send_crq(adapter, &crq);
@@ -922,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
922943
if (!adapter->vpd)
923944
return -ENOMEM;
924945

946+
/* Vital Product Data (VPD) */
947+
rc = ibmvnic_get_vpd(adapter);
948+
if (rc) {
949+
netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
950+
return rc;
951+
}
952+
925953
adapter->map_id = 1;
926954
adapter->napi = kcalloc(adapter->req_rx_queues,
927955
sizeof(struct napi_struct), GFP_KERNEL);
@@ -995,7 +1023,7 @@ static int __ibmvnic_open(struct net_device *netdev)
9951023
static int ibmvnic_open(struct net_device *netdev)
9961024
{
9971025
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
998-
int rc, vpd;
1026+
int rc;
9991027

10001028
mutex_lock(&adapter->reset_lock);
10011029

@@ -1018,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
10181046
rc = __ibmvnic_open(netdev);
10191047
netif_carrier_on(netdev);
10201048

1021-
/* Vital Product Data (VPD) */
1022-
vpd = ibmvnic_get_vpd(adapter);
1023-
if (vpd)
1024-
netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1025-
10261049
mutex_unlock(&adapter->reset_lock);
10271050

10281051
return rc;
@@ -1548,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
15481571
static int do_reset(struct ibmvnic_adapter *adapter,
15491572
struct ibmvnic_rwi *rwi, u32 reset_state)
15501573
{
1574+
u64 old_num_rx_queues, old_num_tx_queues;
15511575
struct net_device *netdev = adapter->netdev;
15521576
int i, rc;
15531577

@@ -1557,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
15571581
netif_carrier_off(netdev);
15581582
adapter->reset_reason = rwi->reset_reason;
15591583

1584+
old_num_rx_queues = adapter->req_rx_queues;
1585+
old_num_tx_queues = adapter->req_tx_queues;
1586+
15601587
if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
15611588
rc = ibmvnic_reenable_crq_queue(adapter);
15621589
if (rc)
@@ -1601,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
16011628
rc = init_resources(adapter);
16021629
if (rc)
16031630
return rc;
1631+
} else if (adapter->req_rx_queues != old_num_rx_queues ||
1632+
adapter->req_tx_queues != old_num_tx_queues) {
1633+
release_rx_pools(adapter);
1634+
release_tx_pools(adapter);
1635+
init_rx_pools(netdev);
1636+
init_tx_pools(netdev);
16041637
} else {
16051638
rc = reset_tx_pools(adapter);
16061639
if (rc)
@@ -3592,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
35923625
*req_value,
35933626
(long int)be64_to_cpu(crq->request_capability_rsp.
35943627
number), name);
3595-
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
3628+
3629+
if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3630+
REQ_MTU) {
3631+
pr_err("mtu of %llu is not supported. Reverting.\n",
3632+
*req_value);
3633+
*req_value = adapter->fallback.mtu;
3634+
} else {
3635+
*req_value =
3636+
be64_to_cpu(crq->request_capability_rsp.number);
3637+
}
3638+
35963639
ibmvnic_send_req_caps(adapter, 1);
35973640
return;
35983641
default:

drivers/net/ethernet/ibm/ibmvnic.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
10911091
u64 opt_rxba_entries_per_subcrq;
10921092
__be64 tx_rx_desc_req;
10931093
u8 map_id;
1094+
u64 num_active_rx_pools;
1095+
u64 num_active_tx_pools;
10941096

10951097
struct tasklet_struct tasklet;
10961098
enum vnic_state state;

drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
201201
return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
202202
MLX5E_AM_STATS_WORSE;
203203

204+
if (!prev->ppms)
205+
return curr->ppms ? MLX5E_AM_STATS_BETTER :
206+
MLX5E_AM_STATS_SAME;
207+
204208
if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
205209
return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
206210
MLX5E_AM_STATS_WORSE;
211+
if (!prev->epms)
212+
return MLX5E_AM_STATS_SAME;
207213

208214
if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
209215
return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :

drivers/net/usb/usbnet.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
457457
void usbnet_defer_kevent (struct usbnet *dev, int work)
458458
{
459459
set_bit (work, &dev->flags);
460-
if (!schedule_work (&dev->kevent)) {
461-
if (net_ratelimit())
462-
netdev_err(dev->net, "kevent %d may have been dropped\n", work);
463-
} else {
460+
if (!schedule_work (&dev->kevent))
461+
netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
462+
else
464463
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
465-
}
466464
}
467465
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
468466

net/core/dev.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3151,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
31513151
hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
31523152

31533153
/* + transport layer */
3154-
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3155-
hdr_len += tcp_hdrlen(skb);
3156-
else
3157-
hdr_len += sizeof(struct udphdr);
3154+
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3155+
const struct tcphdr *th;
3156+
struct tcphdr _tcphdr;
3157+
3158+
th = skb_header_pointer(skb, skb_transport_offset(skb),
3159+
sizeof(_tcphdr), &_tcphdr);
3160+
if (likely(th))
3161+
hdr_len += __tcp_hdrlen(th);
3162+
} else {
3163+
struct udphdr _udphdr;
3164+
3165+
if (skb_header_pointer(skb, skb_transport_offset(skb),
3166+
sizeof(_udphdr), &_udphdr))
3167+
hdr_len += sizeof(struct udphdr);
3168+
}
31583169

31593170
if (shinfo->gso_type & SKB_GSO_DODGY)
31603171
gso_segs = DIV_ROUND_UP(skb->len - hdr_len,

net/ipv4/esp4_offload.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
122122
if (!xo)
123123
goto out;
124124

125+
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
126+
goto out;
127+
125128
seq = xo->seq.low;
126129

127130
x = skb->sp->xvec[skb->sp->len - 1];

net/ipv4/igmp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
332332
return htonl(INADDR_ANY);
333333

334334
for_ifa(in_dev) {
335-
if (inet_ifa_match(fl4->saddr, ifa))
335+
if (fl4->saddr == ifa->ifa_local)
336336
return fl4->saddr;
337337
} endfor_ifa(in_dev);
338338

net/ipv4/tcp_offload.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
3232
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
3333
netdev_features_t features)
3434
{
35+
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36+
return ERR_PTR(-EINVAL);
37+
3538
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
3639
return ERR_PTR(-EINVAL);
3740

net/ipv4/udp_offload.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
203203
goto out;
204204
}
205205

206+
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
207+
goto out;
208+
206209
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
207210
goto out;
208211

net/ipv6/esp6_offload.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
149149
if (!xo)
150150
goto out;
151151

152+
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
153+
goto out;
154+
152155
seq = xo->seq.low;
153156

154157
x = skb->sp->xvec[skb->sp->len - 1];

net/ipv6/tcpv6_offload.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
4646
{
4747
struct tcphdr *th;
4848

49+
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
50+
return ERR_PTR(-EINVAL);
51+
4952
if (!pskb_may_pull(skb, sizeof(*th)))
5053
return ERR_PTR(-EINVAL);
5154

net/ipv6/udp_offload.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
4242
const struct ipv6hdr *ipv6h;
4343
struct udphdr *uh;
4444

45+
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
46+
goto out;
47+
4548
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
4649
goto out;
4750

net/rds/tcp.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
9090
sizeof(val));
9191
}
9292

93-
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
93+
u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
9494
{
95-
return tcp_sk(tc->t_sock->sk)->snd_nxt;
95+
/* seq# of the last byte of data in tcp send buffer */
96+
return tcp_sk(tc->t_sock->sk)->write_seq;
9697
}
9798

9899
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)

0 commit comments

Comments
 (0)