@@ -1657,3 +1657,167 @@ int ib_sg_to_pages(struct ib_mr *mr,
1657
1657
return i ;
1658
1658
}
1659
1659
EXPORT_SYMBOL (ib_sg_to_pages );
1660
+
1661
+ struct ib_drain_cqe {
1662
+ struct ib_cqe cqe ;
1663
+ struct completion done ;
1664
+ };
1665
+
1666
+ static void ib_drain_qp_done (struct ib_cq * cq , struct ib_wc * wc )
1667
+ {
1668
+ struct ib_drain_cqe * cqe = container_of (wc -> wr_cqe , struct ib_drain_cqe ,
1669
+ cqe );
1670
+
1671
+ complete (& cqe -> done );
1672
+ }
1673
+
1674
+ /*
1675
+ * Post a WR and block until its completion is reaped for the SQ.
1676
+ */
1677
+ static void __ib_drain_sq (struct ib_qp * qp )
1678
+ {
1679
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1680
+ struct ib_drain_cqe sdrain ;
1681
+ struct ib_send_wr swr = {}, * bad_swr ;
1682
+ int ret ;
1683
+
1684
+ if (qp -> send_cq -> poll_ctx == IB_POLL_DIRECT ) {
1685
+ WARN_ONCE (qp -> send_cq -> poll_ctx == IB_POLL_DIRECT ,
1686
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n" );
1687
+ return ;
1688
+ }
1689
+
1690
+ swr .wr_cqe = & sdrain .cqe ;
1691
+ sdrain .cqe .done = ib_drain_qp_done ;
1692
+ init_completion (& sdrain .done );
1693
+
1694
+ ret = ib_modify_qp (qp , & attr , IB_QP_STATE );
1695
+ if (ret ) {
1696
+ WARN_ONCE (ret , "failed to drain send queue: %d\n" , ret );
1697
+ return ;
1698
+ }
1699
+
1700
+ ret = ib_post_send (qp , & swr , & bad_swr );
1701
+ if (ret ) {
1702
+ WARN_ONCE (ret , "failed to drain send queue: %d\n" , ret );
1703
+ return ;
1704
+ }
1705
+
1706
+ wait_for_completion (& sdrain .done );
1707
+ }
1708
+
1709
+ /*
1710
+ * Post a WR and block until its completion is reaped for the RQ.
1711
+ */
1712
+ static void __ib_drain_rq (struct ib_qp * qp )
1713
+ {
1714
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1715
+ struct ib_drain_cqe rdrain ;
1716
+ struct ib_recv_wr rwr = {}, * bad_rwr ;
1717
+ int ret ;
1718
+
1719
+ if (qp -> recv_cq -> poll_ctx == IB_POLL_DIRECT ) {
1720
+ WARN_ONCE (qp -> recv_cq -> poll_ctx == IB_POLL_DIRECT ,
1721
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n" );
1722
+ return ;
1723
+ }
1724
+
1725
+ rwr .wr_cqe = & rdrain .cqe ;
1726
+ rdrain .cqe .done = ib_drain_qp_done ;
1727
+ init_completion (& rdrain .done );
1728
+
1729
+ ret = ib_modify_qp (qp , & attr , IB_QP_STATE );
1730
+ if (ret ) {
1731
+ WARN_ONCE (ret , "failed to drain recv queue: %d\n" , ret );
1732
+ return ;
1733
+ }
1734
+
1735
+ ret = ib_post_recv (qp , & rwr , & bad_rwr );
1736
+ if (ret ) {
1737
+ WARN_ONCE (ret , "failed to drain recv queue: %d\n" , ret );
1738
+ return ;
1739
+ }
1740
+
1741
+ wait_for_completion (& rdrain .done );
1742
+ }
1743
+
1744
+ /**
1745
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
1746
+ * application.
1747
+ * @qp: queue pair to drain
1748
+ *
1749
+ * If the device has a provider-specific drain function, then
1750
+ * call that. Otherwise call the generic drain function
1751
+ * __ib_drain_sq().
1752
+ *
1753
+ * The caller must:
1754
+ *
1755
+ * ensure there is room in the CQ and SQ for the drain work request and
1756
+ * completion.
1757
+ *
1758
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1759
+ * IB_POLL_DIRECT.
1760
+ *
1761
+ * ensure that there are no other contexts that are posting WRs concurrently.
1762
+ * Otherwise the drain is not guaranteed.
1763
+ */
1764
+ void ib_drain_sq (struct ib_qp * qp )
1765
+ {
1766
+ if (qp -> device -> drain_sq )
1767
+ qp -> device -> drain_sq (qp );
1768
+ else
1769
+ __ib_drain_sq (qp );
1770
+ }
1771
+ EXPORT_SYMBOL (ib_drain_sq );
1772
+
1773
+ /**
1774
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
1775
+ * application.
1776
+ * @qp: queue pair to drain
1777
+ *
1778
+ * If the device has a provider-specific drain function, then
1779
+ * call that. Otherwise call the generic drain function
1780
+ * __ib_drain_rq().
1781
+ *
1782
+ * The caller must:
1783
+ *
1784
+ * ensure there is room in the CQ and RQ for the drain work request and
1785
+ * completion.
1786
+ *
1787
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
1788
+ * IB_POLL_DIRECT.
1789
+ *
1790
+ * ensure that there are no other contexts that are posting WRs concurrently.
1791
+ * Otherwise the drain is not guaranteed.
1792
+ */
1793
+ void ib_drain_rq (struct ib_qp * qp )
1794
+ {
1795
+ if (qp -> device -> drain_rq )
1796
+ qp -> device -> drain_rq (qp );
1797
+ else
1798
+ __ib_drain_rq (qp );
1799
+ }
1800
+ EXPORT_SYMBOL (ib_drain_rq );
1801
+
1802
+ /**
1803
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
1804
+ * application on both the RQ and SQ.
1805
+ * @qp: queue pair to drain
1806
+ *
1807
+ * The caller must:
1808
+ *
1809
+ * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
1810
+ * and completions.
1811
+ *
1812
+ * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
1813
+ * IB_POLL_DIRECT.
1814
+ *
1815
+ * ensure that there are no other contexts that are posting WRs concurrently.
1816
+ * Otherwise the drain is not guaranteed.
1817
+ */
1818
+ void ib_drain_qp (struct ib_qp * qp )
1819
+ {
1820
+ ib_drain_sq (qp );
1821
+ ib_drain_rq (qp );
1822
+ }
1823
+ EXPORT_SYMBOL (ib_drain_qp );
0 commit comments