Skip to content

Commit f3ed4de

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Nothing special here, though Bob's regression fixes for rxe would have made it before the rc cycle had there not been such strong winter weather! - Fix corner cases in the rxe reference counting cleanup that are causing regressions in blktests for SRP - Two kdoc fixes so W=1 is clean - Missing error return in error unwind for mlx5 - Wrong lock type nesting in IB CM" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/rxe: Fix errant WARN_ONCE in rxe_completer() RDMA/rxe: Fix extra deref in rxe_rcv_mcast_pkt() RDMA/rxe: Fix missed IB reference counting in loopback RDMA/uverbs: Fix kernel-doc warning of _uverbs_alloc RDMA/mlx5: Set correct kernel-doc identifier IB/mlx5: Add missing error code RDMA/rxe: Fix missing kconfig dependency on CRYPTO RDMA/cm: Fix IRQ restore in ib_send_cm_sidr_rep
2 parents de5bd6c + 545c4ab commit f3ed4de

File tree

8 files changed

+76
-62
lines changed

8 files changed

+76
-62
lines changed

drivers/infiniband/core/cm.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3651,6 +3651,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
36513651
struct ib_cm_sidr_rep_param *param)
36523652
{
36533653
struct ib_mad_send_buf *msg;
3654+
unsigned long flags;
36543655
int ret;
36553656

36563657
lockdep_assert_held(&cm_id_priv->lock);
@@ -3676,12 +3677,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
36763677
return ret;
36773678
}
36783679
cm_id_priv->id.state = IB_CM_IDLE;
3679-
spin_lock_irq(&cm.lock);
3680+
spin_lock_irqsave(&cm.lock, flags);
36803681
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
36813682
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
36823683
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
36833684
}
3684-
spin_unlock_irq(&cm.lock);
3685+
spin_unlock_irqrestore(&cm.lock, flags);
36853686
return 0;
36863687
}
36873688

drivers/infiniband/core/uverbs_ioctl.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
9191
}
9292

9393
/**
94-
* uverbs_alloc() - Quickly allocate memory for use with a bundle
94+
* _uverbs_alloc() - Quickly allocate memory for use with a bundle
9595
* @bundle: The bundle
9696
* @size: Number of bytes to allocate
9797
* @flags: Allocator flags

drivers/infiniband/hw/mlx5/devx.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2073,8 +2073,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
20732073

20742074
num_alloc_xa_entries++;
20752075
event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2076-
if (!event_sub)
2076+
if (!event_sub) {
2077+
err = -ENOMEM;
20772078
goto err;
2079+
}
20782080

20792081
list_add_tail(&event_sub->event_list, &sub_list);
20802082
uverbs_uobject_get(&ev_file->uobj);

drivers/infiniband/hw/mlx5/odp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1082,7 +1082,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
10821082
return ret ? ret : npages;
10831083
}
10841084

1085-
/**
1085+
/*
10861086
* Parse a series of data segments for page fault handling.
10871087
*
10881088
* @dev: Pointer to mlx5 IB device

drivers/infiniband/sw/rxe/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ config RDMA_RXE
44
depends on INET && PCI && INFINIBAND
55
depends on INFINIBAND_VIRT_DMA
66
select NET_UDP_TUNNEL
7+
select CRYPTO
78
select CRYPTO_CRC32
89
help
910
This driver implements the InfiniBand RDMA transport over

drivers/infiniband/sw/rxe/rxe_comp.c

Lines changed: 23 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -547,14 +547,16 @@ int rxe_completer(void *arg)
547547
struct sk_buff *skb = NULL;
548548
struct rxe_pkt_info *pkt = NULL;
549549
enum comp_state state;
550+
int ret = 0;
550551

551552
rxe_add_ref(qp);
552553

553554
if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
554555
qp->req.state == QP_STATE_RESET) {
555556
rxe_drain_resp_pkts(qp, qp->valid &&
556557
qp->req.state == QP_STATE_ERROR);
557-
goto exit;
558+
ret = -EAGAIN;
559+
goto done;
558560
}
559561

560562
if (qp->comp.timeout) {
@@ -564,8 +566,10 @@ int rxe_completer(void *arg)
564566
qp->comp.timeout_retry = 0;
565567
}
566568

567-
if (qp->req.need_retry)
568-
goto exit;
569+
if (qp->req.need_retry) {
570+
ret = -EAGAIN;
571+
goto done;
572+
}
569573

570574
state = COMPST_GET_ACK;
571575

@@ -636,8 +640,6 @@ int rxe_completer(void *arg)
636640
break;
637641

638642
case COMPST_DONE:
639-
if (pkt)
640-
free_pkt(pkt);
641643
goto done;
642644

643645
case COMPST_EXIT:
@@ -660,7 +662,8 @@ int rxe_completer(void *arg)
660662
qp->qp_timeout_jiffies)
661663
mod_timer(&qp->retrans_timer,
662664
jiffies + qp->qp_timeout_jiffies);
663-
goto exit;
665+
ret = -EAGAIN;
666+
goto done;
664667

665668
case COMPST_ERROR_RETRY:
666669
/* we come here if the retry timer fired and we did
@@ -672,18 +675,18 @@ int rxe_completer(void *arg)
672675
*/
673676

674677
/* there is nothing to retry in this case */
675-
if (!wqe || (wqe->state == wqe_state_posted))
676-
goto exit;
678+
if (!wqe || (wqe->state == wqe_state_posted)) {
679+
pr_warn("Retry attempted without a valid wqe\n");
680+
ret = -EAGAIN;
681+
goto done;
682+
}
677683

678684
/* if we've started a retry, don't start another
679685
* retry sequence, unless this is a timeout.
680686
*/
681687
if (qp->comp.started_retry &&
682-
!qp->comp.timeout_retry) {
683-
if (pkt)
684-
free_pkt(pkt);
688+
!qp->comp.timeout_retry)
685689
goto done;
686-
}
687690

688691
if (qp->comp.retry_cnt > 0) {
689692
if (qp->comp.retry_cnt != 7)
@@ -704,8 +707,6 @@ int rxe_completer(void *arg)
704707
qp->comp.started_retry = 1;
705708
rxe_run_task(&qp->req.task, 0);
706709
}
707-
if (pkt)
708-
free_pkt(pkt);
709710
goto done;
710711

711712
} else {
@@ -726,8 +727,8 @@ int rxe_completer(void *arg)
726727
mod_timer(&qp->rnr_nak_timer,
727728
jiffies + rnrnak_jiffies(aeth_syn(pkt)
728729
& ~AETH_TYPE_MASK));
729-
free_pkt(pkt);
730-
goto exit;
730+
ret = -EAGAIN;
731+
goto done;
731732
} else {
732733
rxe_counter_inc(rxe,
733734
RXE_CNT_RNR_RETRY_EXCEEDED);
@@ -740,25 +741,15 @@ int rxe_completer(void *arg)
740741
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
741742
do_complete(qp, wqe);
742743
rxe_qp_error(qp);
743-
if (pkt)
744-
free_pkt(pkt);
745-
goto exit;
744+
ret = -EAGAIN;
745+
goto done;
746746
}
747747
}
748748

749-
exit:
750-
/* we come here if we are done with processing and want the task to
751-
* exit from the loop calling us
752-
*/
753-
WARN_ON_ONCE(skb);
754-
rxe_drop_ref(qp);
755-
return -EAGAIN;
756-
757749
done:
758-
/* we come here if we have processed a packet we want the task to call
759-
* us again to see if there is anything else to do
760-
*/
761-
WARN_ON_ONCE(skb);
750+
if (pkt)
751+
free_pkt(pkt);
762752
rxe_drop_ref(qp);
763-
return 0;
753+
754+
return ret;
764755
}

drivers/infiniband/sw/rxe/rxe_net.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -407,14 +407,22 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
407407
return 0;
408408
}
409409

410+
/* fix up a send packet to match the packets
411+
* received from UDP before looping them back
412+
*/
410413
void rxe_loopback(struct sk_buff *skb)
411414
{
415+
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
416+
412417
if (skb->protocol == htons(ETH_P_IP))
413418
skb_pull(skb, sizeof(struct iphdr));
414419
else
415420
skb_pull(skb, sizeof(struct ipv6hdr));
416421

417-
rxe_rcv(skb);
422+
if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev)))
423+
kfree_skb(skb);
424+
else
425+
rxe_rcv(skb);
418426
}
419427

420428
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,

drivers/infiniband/sw/rxe/rxe_recv.c

Lines changed: 35 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -237,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
237237
struct rxe_mc_elem *mce;
238238
struct rxe_qp *qp;
239239
union ib_gid dgid;
240-
struct sk_buff *per_qp_skb;
241-
struct rxe_pkt_info *per_qp_pkt;
242240
int err;
243241

244242
if (skb->protocol == htons(ETH_P_IP))
@@ -250,10 +248,15 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
250248
/* lookup mcast group corresponding to mgid, takes a ref */
251249
mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
252250
if (!mcg)
253-
goto err1; /* mcast group not registered */
251+
goto drop; /* mcast group not registered */
254252

255253
spin_lock_bh(&mcg->mcg_lock);
256254

255+
/* this is unreliable datagram service so we let
256+
* failures to deliver a multicast packet to a
257+
* single QP happen and just move on and try
258+
* the rest of them on the list
259+
*/
257260
list_for_each_entry(mce, &mcg->qp_list, qp_list) {
258261
qp = mce->qp;
259262

@@ -266,39 +269,47 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
266269
if (err)
267270
continue;
268271

269-
/* for all but the last qp create a new clone of the
270-
* skb and pass to the qp. If an error occurs in the
271-
* checks for the last qp in the list we need to
272-
* free the skb since it hasn't been passed on to
273-
* rxe_rcv_pkt() which would free it later.
272+
/* for all but the last QP create a new clone of the
273+
* skb and pass to the QP. Pass the original skb to
274+
* the last QP in the list.
274275
*/
275276
if (mce->qp_list.next != &mcg->qp_list) {
276-
per_qp_skb = skb_clone(skb, GFP_ATOMIC);
277-
if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
278-
kfree_skb(per_qp_skb);
277+
struct sk_buff *cskb;
278+
struct rxe_pkt_info *cpkt;
279+
280+
cskb = skb_clone(skb, GFP_ATOMIC);
281+
if (unlikely(!cskb))
279282
continue;
283+
284+
if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
285+
kfree_skb(cskb);
286+
break;
280287
}
288+
289+
cpkt = SKB_TO_PKT(cskb);
290+
cpkt->qp = qp;
291+
rxe_add_ref(qp);
292+
rxe_rcv_pkt(cpkt, cskb);
281293
} else {
282-
per_qp_skb = skb;
283-
/* show we have consumed the skb */
284-
skb = NULL;
294+
pkt->qp = qp;
295+
rxe_add_ref(qp);
296+
rxe_rcv_pkt(pkt, skb);
297+
skb = NULL; /* mark consumed */
285298
}
286-
287-
if (unlikely(!per_qp_skb))
288-
continue;
289-
290-
per_qp_pkt = SKB_TO_PKT(per_qp_skb);
291-
per_qp_pkt->qp = qp;
292-
rxe_add_ref(qp);
293-
rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
294299
}
295300

296301
spin_unlock_bh(&mcg->mcg_lock);
297302

298303
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
299304

300-
err1:
301-
/* free skb if not consumed */
305+
if (likely(!skb))
306+
return;
307+
308+
/* This only occurs if one of the checks fails on the last
309+
* QP in the list above
310+
*/
311+
312+
drop:
302313
kfree_skb(skb);
303314
ib_device_put(&rxe->ib_dev);
304315
}

0 commit comments

Comments
 (0)