Skip to content

Commit 2b74c87

Browse files
kwan-intcjgunthorpe
authored andcommitted
IB/hfi1: Unreserve a flushed OPFN request
When an OPFN request is flushed, the request is completed without unreserving itself from the send queue. Subsequently, when a new request is post sent, the following warning will be triggered: WARNING: CPU: 4 PID: 8130 at rdmavt/qp.c:1761 rvt_post_send+0x72a/0x880 [rdmavt] Call Trace: [<ffffffffbbb61e41>] dump_stack+0x19/0x1b [<ffffffffbb497688>] __warn+0xd8/0x100 [<ffffffffbb4977cd>] warn_slowpath_null+0x1d/0x20 [<ffffffffc01c941a>] rvt_post_send+0x72a/0x880 [rdmavt] [<ffffffffbb4dcabe>] ? account_entity_dequeue+0xae/0xd0 [<ffffffffbb61d645>] ? __kmalloc+0x55/0x230 [<ffffffffc04e1a4c>] ib_uverbs_post_send+0x37c/0x5d0 [ib_uverbs] [<ffffffffc04e5e36>] ? rdma_lookup_put_uobject+0x26/0x60 [ib_uverbs] [<ffffffffc04dbce6>] ib_uverbs_write+0x286/0x460 [ib_uverbs] [<ffffffffbb6f9457>] ? security_file_permission+0x27/0xa0 [<ffffffffbb641650>] vfs_write+0xc0/0x1f0 [<ffffffffbb64246f>] SyS_write+0x7f/0xf0 [<ffffffffbbb74ddb>] system_call_fastpath+0x22/0x27 This patch fixes the problem by moving rvt_qp_wqe_unreserve() into rvt_qp_complete_swqe() to simplify the code and make it less error-prone. Fixes: ca95f80 ("IB/hfi1: Unreserve a reserved request when it is completed") Link: https://lore.kernel.org/r/[email protected] Cc: <[email protected]> Reviewed-by: Mike Marciniszyn <[email protected]> Reviewed-by: Dennis Dalessandro <[email protected]> Signed-off-by: Kaike Wan <[email protected]> Signed-off-by: Mike Marciniszyn <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent cd48a82 commit 2b74c87

File tree

2 files changed

+4
-7
lines changed

2 files changed

+4
-7
lines changed

drivers/infiniband/hw/hfi1/rc.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
18351835
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
18361836
break;
18371837
trdma_clean_swqe(qp, wqe);
1838-
rvt_qp_wqe_unreserve(qp, wqe);
18391838
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
18401839
rvt_qp_complete_swqe(qp,
18411840
wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
18821881
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
18831882
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
18841883
trdma_clean_swqe(qp, wqe);
1885-
rvt_qp_wqe_unreserve(qp, wqe);
18861884
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
18871885
rvt_qp_complete_swqe(qp,
18881886
wqe,

include/rdma/rdmavt_qp.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
608608
/**
609609
* rvt_qp_wqe_unreserve - clean reserved operation
610610
* @qp - the rvt qp
611-
* @wqe - the send wqe
611+
* @flags - send wqe flags
612612
*
613613
* This decrements the reserve use count.
614614
*
@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
620620
* the compiler does not juggle the order of the s_last
621621
* ring index and the decrementing of s_reserved_used.
622622
*/
623-
static inline void rvt_qp_wqe_unreserve(
624-
struct rvt_qp *qp,
625-
struct rvt_swqe *wqe)
623+
static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
626624
{
627-
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
625+
if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
628626
atomic_dec(&qp->s_reserved_used);
629627
/* insure no compiler re-order up to s_last change */
630628
smp_mb__after_atomic();
@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
853851
u32 byte_len, last;
854852
int flags = wqe->wr.send_flags;
855853

854+
rvt_qp_wqe_unreserve(qp, flags);
856855
rvt_put_qp_swqe(qp, wqe);
857856

858857
need_completion =

0 commit comments

Comments
 (0)