Skip to content

Commit 765d677

Browse files
Steve Wisedledford
authored andcommitted
IB: new common API for draining queues
Add provider-specific drain_sq/drain_rq functions for providers needing special drain logic. Add static functions __ib_drain_sq() and __ib_drain_rq() which post noop WRs to the SQ or RQ and block until their completions are processed. This ensures the applications completions for work requests posted prior to the drain work request have all been processed. Add API functions ib_drain_sq(), ib_drain_rq(), and ib_drain_qp(). For the drain logic to work, the caller must: ensure there is room in the CQ(s) and QP for the drain work request and completion. allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be IB_POLL_DIRECT. ensure that there are no other contexts that are posting WRs concurrently. Otherwise the drain is not guaranteed. Reviewed-by: Chuck Lever <[email protected]> Signed-off-by: Steve Wise <[email protected]> Signed-off-by: Doug Ledford <[email protected]>
1 parent fc77dbd commit 765d677

File tree

2 files changed

+169
-0
lines changed

2 files changed

+169
-0
lines changed

drivers/infiniband/core/verbs.c

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1657,3 +1657,167 @@ int ib_sg_to_pages(struct ib_mr *mr,
16571657
return i;
16581658
}
16591659
EXPORT_SYMBOL(ib_sg_to_pages);
1660+
1661+
struct ib_drain_cqe {
1662+
struct ib_cqe cqe;
1663+
struct completion done;
1664+
};
1665+
1666+
static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
1667+
{
1668+
struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
1669+
cqe);
1670+
1671+
complete(&cqe->done);
1672+
}
1673+
1674+
/*
1675+
* Post a WR and block until its completion is reaped for the SQ.
1676+
*/
1677+
static void __ib_drain_sq(struct ib_qp *qp)
1678+
{
1679+
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1680+
struct ib_drain_cqe sdrain;
1681+
struct ib_send_wr swr = {}, *bad_swr;
1682+
int ret;
1683+
1684+
if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
1685+
WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
1686+
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
1687+
return;
1688+
}
1689+
1690+
swr.wr_cqe = &sdrain.cqe;
1691+
sdrain.cqe.done = ib_drain_qp_done;
1692+
init_completion(&sdrain.done);
1693+
1694+
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1695+
if (ret) {
1696+
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1697+
return;
1698+
}
1699+
1700+
ret = ib_post_send(qp, &swr, &bad_swr);
1701+
if (ret) {
1702+
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1703+
return;
1704+
}
1705+
1706+
wait_for_completion(&sdrain.done);
1707+
}
1708+
1709+
/*
1710+
* Post a WR and block until its completion is reaped for the RQ.
1711+
*/
1712+
static void __ib_drain_rq(struct ib_qp *qp)
1713+
{
1714+
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1715+
struct ib_drain_cqe rdrain;
1716+
struct ib_recv_wr rwr = {}, *bad_rwr;
1717+
int ret;
1718+
1719+
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
1720+
WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
1721+
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
1722+
return;
1723+
}
1724+
1725+
rwr.wr_cqe = &rdrain.cqe;
1726+
rdrain.cqe.done = ib_drain_qp_done;
1727+
init_completion(&rdrain.done);
1728+
1729+
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1730+
if (ret) {
1731+
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1732+
return;
1733+
}
1734+
1735+
ret = ib_post_recv(qp, &rwr, &bad_rwr);
1736+
if (ret) {
1737+
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1738+
return;
1739+
}
1740+
1741+
wait_for_completion(&rdrain.done);
1742+
}
1743+
1744+
/**
1745+
* ib_drain_sq() - Block until all SQ CQEs have been consumed by the
1746+
* application.
1747+
* @qp: queue pair to drain
1748+
*
1749+
* If the device has a provider-specific drain function, then
1750+
* call that. Otherwise call the generic drain function
1751+
* __ib_drain_sq().
1752+
*
1753+
* The caller must:
1754+
*
1755+
* ensure there is room in the CQ and SQ for the drain work request and
1756+
* completion.
1757+
*
1758+
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1759+
* IB_POLL_DIRECT.
1760+
*
1761+
* ensure that there are no other contexts that are posting WRs concurrently.
1762+
* Otherwise the drain is not guaranteed.
1763+
*/
1764+
void ib_drain_sq(struct ib_qp *qp)
1765+
{
1766+
if (qp->device->drain_sq)
1767+
qp->device->drain_sq(qp);
1768+
else
1769+
__ib_drain_sq(qp);
1770+
}
1771+
EXPORT_SYMBOL(ib_drain_sq);
1772+
1773+
/**
1774+
* ib_drain_rq() - Block until all RQ CQEs have been consumed by the
1775+
* application.
1776+
* @qp: queue pair to drain
1777+
*
1778+
* If the device has a provider-specific drain function, then
1779+
* call that. Otherwise call the generic drain function
1780+
* __ib_drain_rq().
1781+
*
1782+
* The caller must:
1783+
*
1784+
* ensure there is room in the CQ and RQ for the drain work request and
1785+
* completion.
1786+
*
1787+
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1788+
* IB_POLL_DIRECT.
1789+
*
1790+
* ensure that there are no other contexts that are posting WRs concurrently.
1791+
* Otherwise the drain is not guaranteed.
1792+
*/
1793+
void ib_drain_rq(struct ib_qp *qp)
1794+
{
1795+
if (qp->device->drain_rq)
1796+
qp->device->drain_rq(qp);
1797+
else
1798+
__ib_drain_rq(qp);
1799+
}
1800+
EXPORT_SYMBOL(ib_drain_rq);
1801+
1802+
/**
1803+
* ib_drain_qp() - Block until all CQEs have been consumed by the
1804+
* application on both the RQ and SQ.
1805+
* @qp: queue pair to drain
1806+
*
1807+
* The caller must:
1808+
*
1809+
* ensure there is room in the CQ(s), SQ, and RQ for drain work requests
1810+
* and completions.
1811+
*
1812+
* allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
1813+
* IB_POLL_DIRECT.
1814+
*
1815+
* ensure that there are no other contexts that are posting WRs concurrently.
1816+
* Otherwise the drain is not guaranteed.
1817+
*/
1818+
void ib_drain_qp(struct ib_qp *qp)
1819+
{
1820+
ib_drain_sq(qp);
1821+
ib_drain_rq(qp);
1822+
}
1823+
EXPORT_SYMBOL(ib_drain_qp);

include/rdma/ib_verbs.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1846,6 +1846,8 @@ struct ib_device {
18461846
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
18471847
struct ib_mr_status *mr_status);
18481848
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
1849+
void (*drain_rq)(struct ib_qp *qp);
1850+
void (*drain_sq)(struct ib_qp *qp);
18491851

18501852
struct ib_dma_mapping_ops *dma_ops;
18511853

@@ -3094,4 +3096,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
30943096
int sg_nents,
30953097
int (*set_page)(struct ib_mr *, u64));
30963098

3099+
void ib_drain_rq(struct ib_qp *qp);
3100+
void ib_drain_sq(struct ib_qp *qp);
3101+
void ib_drain_qp(struct ib_qp *qp);
30973102
#endif /* IB_VERBS_H */

0 commit comments

Comments
 (0)