Skip to content

Commit 76b0640

Browse files
committed
Merge branches 'ib_core', 'ib_ipoib', 'srpt', 'drain-cq-v4' and 'net/9p' into k.o/for-4.6
5 parents 5a30247 + 78a50a5 + 387add4 + 4c8ba94 + 7cf20fc commit 76b0640

File tree

17 files changed

+610
-736
lines changed

17 files changed

+610
-736
lines changed

drivers/infiniband/core/verbs.c

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1657,3 +1657,167 @@ int ib_sg_to_pages(struct ib_mr *mr,
16571657
return i;
16581658
}
16591659
EXPORT_SYMBOL(ib_sg_to_pages);
1660+
1661+
struct ib_drain_cqe {
1662+
struct ib_cqe cqe;
1663+
struct completion done;
1664+
};
1665+
1666+
static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
1667+
{
1668+
struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
1669+
cqe);
1670+
1671+
complete(&cqe->done);
1672+
}
1673+
1674+
/*
1675+
* Post a WR and block until its completion is reaped for the SQ.
1676+
*/
1677+
static void __ib_drain_sq(struct ib_qp *qp)
1678+
{
1679+
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1680+
struct ib_drain_cqe sdrain;
1681+
struct ib_send_wr swr = {}, *bad_swr;
1682+
int ret;
1683+
1684+
if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
1685+
WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
1686+
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
1687+
return;
1688+
}
1689+
1690+
swr.wr_cqe = &sdrain.cqe;
1691+
sdrain.cqe.done = ib_drain_qp_done;
1692+
init_completion(&sdrain.done);
1693+
1694+
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1695+
if (ret) {
1696+
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1697+
return;
1698+
}
1699+
1700+
ret = ib_post_send(qp, &swr, &bad_swr);
1701+
if (ret) {
1702+
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1703+
return;
1704+
}
1705+
1706+
wait_for_completion(&sdrain.done);
1707+
}
1708+
1709+
/*
1710+
* Post a WR and block until its completion is reaped for the RQ.
1711+
*/
1712+
static void __ib_drain_rq(struct ib_qp *qp)
1713+
{
1714+
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1715+
struct ib_drain_cqe rdrain;
1716+
struct ib_recv_wr rwr = {}, *bad_rwr;
1717+
int ret;
1718+
1719+
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
1720+
WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
1721+
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
1722+
return;
1723+
}
1724+
1725+
rwr.wr_cqe = &rdrain.cqe;
1726+
rdrain.cqe.done = ib_drain_qp_done;
1727+
init_completion(&rdrain.done);
1728+
1729+
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1730+
if (ret) {
1731+
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1732+
return;
1733+
}
1734+
1735+
ret = ib_post_recv(qp, &rwr, &bad_rwr);
1736+
if (ret) {
1737+
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
1738+
return;
1739+
}
1740+
1741+
wait_for_completion(&rdrain.done);
1742+
}
1743+
1744+
/**
1745+
* ib_drain_sq() - Block until all SQ CQEs have been consumed by the
1746+
* application.
1747+
* @qp: queue pair to drain
1748+
*
1749+
* If the device has a provider-specific drain function, then
1750+
* call that. Otherwise call the generic drain function
1751+
* __ib_drain_sq().
1752+
*
1753+
* The caller must:
1754+
*
1755+
* ensure there is room in the CQ and SQ for the drain work request and
1756+
* completion.
1757+
*
1758+
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1759+
* IB_POLL_DIRECT.
1760+
*
1761+
* ensure that there are no other contexts that are posting WRs concurrently.
1762+
* Otherwise the drain is not guaranteed.
1763+
*/
1764+
void ib_drain_sq(struct ib_qp *qp)
1765+
{
1766+
if (qp->device->drain_sq)
1767+
qp->device->drain_sq(qp);
1768+
else
1769+
__ib_drain_sq(qp);
1770+
}
1771+
EXPORT_SYMBOL(ib_drain_sq);
1772+
1773+
/**
1774+
* ib_drain_rq() - Block until all RQ CQEs have been consumed by the
1775+
* application.
1776+
* @qp: queue pair to drain
1777+
*
1778+
* If the device has a provider-specific drain function, then
1779+
* call that. Otherwise call the generic drain function
1780+
* __ib_drain_rq().
1781+
*
1782+
* The caller must:
1783+
*
1784+
* ensure there is room in the CQ and RQ for the drain work request and
1785+
* completion.
1786+
*
1787+
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1788+
* IB_POLL_DIRECT.
1789+
*
1790+
* ensure that there are no other contexts that are posting WRs concurrently.
1791+
* Otherwise the drain is not guaranteed.
1792+
*/
1793+
void ib_drain_rq(struct ib_qp *qp)
1794+
{
1795+
if (qp->device->drain_rq)
1796+
qp->device->drain_rq(qp);
1797+
else
1798+
__ib_drain_rq(qp);
1799+
}
1800+
EXPORT_SYMBOL(ib_drain_rq);
1801+
1802+
/**
1803+
* ib_drain_qp() - Block until all CQEs have been consumed by the
1804+
* application on both the RQ and SQ.
1805+
* @qp: queue pair to drain
1806+
*
1807+
* The caller must:
1808+
*
1809+
* ensure there is room in the CQ(s), SQ, and RQ for drain work requests
1810+
* and completions.
1811+
*
1812+
* allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
1813+
* IB_POLL_DIRECT.
1814+
*
1815+
* ensure that there are no other contexts that are posting WRs concurrently.
1816+
* Otherwise the drain is not guaranteed.
1817+
*/
1818+
void ib_drain_qp(struct ib_qp *qp)
1819+
{
1820+
ib_drain_sq(qp);
1821+
ib_drain_rq(qp);
1822+
}
1823+
EXPORT_SYMBOL(ib_drain_qp);

drivers/infiniband/hw/cxgb4/cq.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -815,8 +815,15 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
815815
}
816816
}
817817
out:
818-
if (wq)
818+
if (wq) {
819+
if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
820+
if (t4_sq_empty(wq))
821+
complete(&qhp->sq_drained);
822+
if (t4_rq_empty(wq))
823+
complete(&qhp->rq_drained);
824+
}
819825
spin_unlock(&qhp->lock);
826+
}
820827
return ret;
821828
}
822829

drivers/infiniband/hw/cxgb4/iw_cxgb4.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,8 @@ struct c4iw_qp {
476476
wait_queue_head_t wait;
477477
struct timer_list timer;
478478
int sq_sig_all;
479+
struct completion rq_drained;
480+
struct completion sq_drained;
479481
};
480482

481483
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -1016,6 +1018,8 @@ extern int c4iw_wr_log;
10161018
extern int db_fc_threshold;
10171019
extern int db_coalescing_threshold;
10181020
extern int use_dsgl;
1021+
void c4iw_drain_rq(struct ib_qp *qp);
1022+
void c4iw_drain_sq(struct ib_qp *qp);
10191023

10201024

10211025
#endif

drivers/infiniband/hw/cxgb4/provider.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -564,6 +564,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
564564
dev->ibdev.get_protocol_stats = c4iw_get_mib;
565565
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
566566
dev->ibdev.get_port_immutable = c4iw_port_immutable;
567+
dev->ibdev.drain_sq = c4iw_drain_sq;
568+
dev->ibdev.drain_rq = c4iw_drain_rq;
567569

568570
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
569571
if (!dev->ibdev.iwcm)

drivers/infiniband/hw/cxgb4/qp.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1697,6 +1697,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
16971697
qhp->attr.max_ird = 0;
16981698
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
16991699
spin_lock_init(&qhp->lock);
1700+
init_completion(&qhp->sq_drained);
1701+
init_completion(&qhp->rq_drained);
17001702
mutex_init(&qhp->mutex);
17011703
init_waitqueue_head(&qhp->wait);
17021704
atomic_set(&qhp->refcnt, 1);
@@ -1888,3 +1890,17 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
18881890
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
18891891
return 0;
18901892
}
1893+
1894+
void c4iw_drain_sq(struct ib_qp *ibqp)
1895+
{
1896+
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1897+
1898+
wait_for_completion(&qp->sq_drained);
1899+
}
1900+
1901+
void c4iw_drain_rq(struct ib_qp *ibqp)
1902+
{
1903+
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1904+
1905+
wait_for_completion(&qp->rq_drained);
1906+
}

drivers/infiniband/ulp/ipoib/ipoib.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,7 @@ struct ipoib_cm_tx {
244244
unsigned tx_tail;
245245
unsigned long flags;
246246
u32 mtu;
247+
unsigned max_send_sge;
247248
};
248249

249250
struct ipoib_cm_rx_buf {
@@ -390,6 +391,7 @@ struct ipoib_dev_priv {
390391
int hca_caps;
391392
struct ipoib_ethtool_st ethtool;
392393
struct timer_list poll_timer;
394+
unsigned max_send_sge;
393395
};
394396

395397
struct ipoib_ah {

drivers/infiniband/ulp/ipoib/ipoib_cm.c

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -710,6 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
710710
struct ipoib_dev_priv *priv = netdev_priv(dev);
711711
struct ipoib_tx_buf *tx_req;
712712
int rc;
713+
unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
713714

714715
if (unlikely(skb->len > tx->mtu)) {
715716
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -719,7 +720,23 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
719720
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
720721
return;
721722
}
722-
723+
if (skb_shinfo(skb)->nr_frags > usable_sge) {
724+
if (skb_linearize(skb) < 0) {
725+
ipoib_warn(priv, "skb could not be linearized\n");
726+
++dev->stats.tx_dropped;
727+
++dev->stats.tx_errors;
728+
dev_kfree_skb_any(skb);
729+
return;
730+
}
731+
/* Does skb_linearize return ok without reducing nr_frags? */
732+
if (skb_shinfo(skb)->nr_frags > usable_sge) {
733+
ipoib_warn(priv, "too many frags after skb linearize\n");
734+
++dev->stats.tx_dropped;
735+
++dev->stats.tx_errors;
736+
dev_kfree_skb_any(skb);
737+
return;
738+
}
739+
}
723740
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
724741
tx->tx_head, skb->len, tx->qp->qp_num);
725742

@@ -1031,7 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
10311048
struct ib_qp *tx_qp;
10321049

10331050
if (dev->features & NETIF_F_SG)
1034-
attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
1051+
attr.cap.max_send_sge =
1052+
min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
10351053

10361054
tx_qp = ib_create_qp(priv->pd, &attr);
10371055
if (PTR_ERR(tx_qp) == -EINVAL) {
@@ -1040,6 +1058,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
10401058
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
10411059
tx_qp = ib_create_qp(priv->pd, &attr);
10421060
}
1061+
tx->max_send_sge = attr.cap.max_send_sge;
10431062
return tx_qp;
10441063
}
10451064

drivers/infiniband/ulp/ipoib/ipoib_ib.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
538538
struct ipoib_tx_buf *tx_req;
539539
int hlen, rc;
540540
void *phead;
541+
unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
541542

542543
if (skb_is_gso(skb)) {
543544
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -561,6 +562,23 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
561562
phead = NULL;
562563
hlen = 0;
563564
}
565+
if (skb_shinfo(skb)->nr_frags > usable_sge) {
566+
if (skb_linearize(skb) < 0) {
567+
ipoib_warn(priv, "skb could not be linearized\n");
568+
++dev->stats.tx_dropped;
569+
++dev->stats.tx_errors;
570+
dev_kfree_skb_any(skb);
571+
return;
572+
}
573+
/* Does skb_linearize return ok without reducing nr_frags? */
574+
if (skb_shinfo(skb)->nr_frags > usable_sge) {
575+
ipoib_warn(priv, "too many frags after skb linearize\n");
576+
++dev->stats.tx_dropped;
577+
++dev->stats.tx_errors;
578+
dev_kfree_skb_any(skb);
579+
return;
580+
}
581+
}
564582

565583
ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
566584
skb->len, address, qpn);

drivers/infiniband/ulp/ipoib/ipoib_verbs.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
206206
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
207207

208208
if (dev->features & NETIF_F_SG)
209-
init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
209+
init_attr.cap.max_send_sge =
210+
min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
210211

211212
priv->qp = ib_create_qp(priv->pd, &init_attr);
212213
if (IS_ERR(priv->qp)) {
@@ -233,6 +234,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
233234
priv->rx_wr.next = NULL;
234235
priv->rx_wr.sg_list = priv->rx_sge;
235236

237+
priv->max_send_sge = init_attr.cap.max_send_sge;
238+
236239
return 0;
237240

238241
out_free_send_cq:

drivers/infiniband/ulp/iser/iscsi_iser.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -458,9 +458,6 @@ struct iser_fr_pool {
458458
* @comp: iser completion context
459459
* @fr_pool: connection fast registration poool
460460
* @pi_support: Indicate device T10-PI support
461-
* @last: last send wr to signal all flush errors were drained
462-
* @last_cqe: cqe handler for last wr
463-
* @last_comp: completes when all connection completions consumed
464461
*/
465462
struct ib_conn {
466463
struct rdma_cm_id *cma_id;
@@ -472,10 +469,7 @@ struct ib_conn {
472469
struct iser_comp *comp;
473470
struct iser_fr_pool fr_pool;
474471
bool pi_support;
475-
struct ib_send_wr last;
476-
struct ib_cqe last_cqe;
477472
struct ib_cqe reg_cqe;
478-
struct completion last_comp;
479473
};
480474

481475
/**
@@ -617,7 +611,6 @@ void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
617611
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
618612
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
619613
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
620-
void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc);
621614

622615
void iser_task_rdma_init(struct iscsi_iser_task *task);
623616

drivers/infiniband/ulp/iser/iser_initiator.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -729,13 +729,6 @@ void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
729729
kmem_cache_free(ig.desc_cache, desc);
730730
}
731731

732-
void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc)
733-
{
734-
struct ib_conn *ib_conn = wc->qp->qp_context;
735-
736-
complete(&ib_conn->last_comp);
737-
}
738-
739732
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
740733

741734
{

0 commit comments

Comments
 (0)