@@ -584,6 +584,7 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
584
584
qp -> r_rq .wq -> tail = 0 ;
585
585
}
586
586
qp -> r_sge .num_sge = 0 ;
587
+ atomic_set (& qp -> s_reserved_used , 0 );
587
588
}
588
589
589
590
/**
@@ -645,7 +646,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
645
646
return ERR_PTR (- EINVAL );
646
647
}
647
648
sqsize =
648
- init_attr -> cap .max_send_wr + 1 ;
649
+ init_attr -> cap .max_send_wr + 1 +
650
+ rdi -> dparms .reserved_operations ;
649
651
switch (init_attr -> qp_type ) {
650
652
case IB_QPT_SMI :
651
653
case IB_QPT_GSI :
@@ -1335,7 +1337,8 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1335
1337
attr -> sq_psn = qp -> s_next_psn & rdi -> dparms .psn_mask ;
1336
1338
attr -> dest_qp_num = qp -> remote_qpn ;
1337
1339
attr -> qp_access_flags = qp -> qp_access_flags ;
1338
- attr -> cap .max_send_wr = qp -> s_size - 1 ;
1340
+ attr -> cap .max_send_wr = qp -> s_size - 1 -
1341
+ rdi -> dparms .reserved_operations ;
1339
1342
attr -> cap .max_recv_wr = qp -> ibqp .srq ? 0 : qp -> r_rq .size - 1 ;
1340
1343
attr -> cap .max_send_sge = qp -> s_max_sge ;
1341
1344
attr -> cap .max_recv_sge = qp -> r_rq .max_sge ;
@@ -1494,27 +1497,65 @@ static inline int rvt_qp_valid_operation(
1494
1497
}
1495
1498
1496
1499
/**
1497
- * qp_get_savail - return number of avail send entries
1500
+ * rvt_qp_is_avail - determine queue capacity
1498
1501
* @qp - the qp
1502
+ * @rdi - the rdmavt device
1503
+ * @reserved_op - is reserved operation
1499
1504
*
1500
1505
* This assumes the s_hlock is held but the s_last
1501
1506
* qp variable is uncontrolled.
1502
1507
*
1503
- * The return is adjusted to not count device specific
1504
- * reserved operations.
1508
+ * For non reserved operations, the qp->s_avail
1509
+ * may be changed.
1510
+ *
1511
+ * The return value is zero or a -ENOMEM.
1505
1512
*/
1506
- static inline u32 qp_get_savail (struct rvt_qp * qp )
1513
+ static inline int rvt_qp_is_avail (
1514
+ struct rvt_qp * qp ,
1515
+ struct rvt_dev_info * rdi ,
1516
+ bool reserved_op )
1507
1517
{
1508
1518
u32 slast ;
1509
- u32 ret ;
1510
-
1519
+ u32 avail ;
1520
+ u32 reserved_used ;
1521
+
1522
+ /* see rvt_qp_wqe_unreserve() */
1523
+ smp_mb__before_atomic ();
1524
+ reserved_used = atomic_read (& qp -> s_reserved_used );
1525
+ if (unlikely (reserved_op )) {
1526
+ /* see rvt_qp_wqe_unreserve() */
1527
+ smp_mb__before_atomic ();
1528
+ if (reserved_used >= rdi -> dparms .reserved_operations )
1529
+ return - ENOMEM ;
1530
+ return 0 ;
1531
+ }
1532
+ /* non-reserved operations */
1533
+ if (likely (qp -> s_avail ))
1534
+ return 0 ;
1511
1535
smp_read_barrier_depends (); /* see rc.c */
1512
1536
slast = ACCESS_ONCE (qp -> s_last );
1513
1537
if (qp -> s_head >= slast )
1514
- ret = qp -> s_size - (qp -> s_head - slast );
1538
+ avail = qp -> s_size - (qp -> s_head - slast );
1515
1539
else
1516
- ret = slast - qp -> s_head ;
1517
- return ret - 1 ;
1540
+ avail = slast - qp -> s_head ;
1541
+
1542
+ /* see rvt_qp_wqe_unreserve() */
1543
+ smp_mb__before_atomic ();
1544
+ reserved_used = atomic_read (& qp -> s_reserved_used );
1545
+ avail = avail - 1 -
1546
+ (rdi -> dparms .reserved_operations - reserved_used );
1547
+ /* insure we don't assign a negative s_avail */
1548
+ if ((s32 )avail <= 0 )
1549
+ return - ENOMEM ;
1550
+ qp -> s_avail = avail ;
1551
+ if (WARN_ON (qp -> s_avail >
1552
+ (qp -> s_size - 1 - rdi -> dparms .reserved_operations )))
1553
+ rvt_pr_err (rdi ,
1554
+ "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u" ,
1555
+ qp -> ibqp .qp_num , qp -> s_size , qp -> s_avail ,
1556
+ qp -> s_head , qp -> s_tail , qp -> s_cur ,
1557
+ qp -> s_acked , qp -> s_last );
1558
+ return 0 ;
1518
1559
}
1519
1560
1520
1561
/**
@@ -1537,6 +1578,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
1537
1578
u8 log_pmtu ;
1538
1579
int ret ;
1539
1580
size_t cplen ;
1581
+ bool reserved_op ;
1540
1582
1541
1583
BUILD_BUG_ON (IB_QPT_MAX >= (sizeof (u32 ) * BITS_PER_BYTE ));
1542
1584
@@ -1574,18 +1616,12 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
1574
1616
}
1575
1617
}
1576
1618
1619
+ reserved_op = rdi -> post_parms [wr -> opcode ].flags &
1620
+ RVT_OPERATION_USE_RESERVE ;
1577
1621
/* check for avail */
1578
- if (unlikely (!qp -> s_avail )) {
1579
- qp -> s_avail = qp_get_savail (qp );
1580
- if (WARN_ON (qp -> s_avail > (qp -> s_size - 1 )))
1581
- rvt_pr_err (rdi ,
1582
- "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u" ,
1583
- qp -> ibqp .qp_num , qp -> s_size , qp -> s_avail ,
1584
- qp -> s_head , qp -> s_tail , qp -> s_cur ,
1585
- qp -> s_acked , qp -> s_last );
1586
- if (!qp -> s_avail )
1587
- return - ENOMEM ;
1588
- }
1622
+ ret = rvt_qp_is_avail (qp , rdi , reserved_op );
1623
+ if (ret )
1624
+ return ret ;
1589
1625
next = qp -> s_head + 1 ;
1590
1626
if (next >= qp -> s_size )
1591
1627
next = 0 ;
@@ -1653,8 +1689,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
1653
1689
qp -> s_next_psn = wqe -> lpsn + 1 ;
1654
1690
}
1655
1691
trace_rvt_post_one_wr (qp , wqe );
1692
+ if (unlikely (reserved_op ))
1693
+ rvt_qp_wqe_reserve (qp , wqe );
1694
+ else
1695
+ qp -> s_avail -- ;
1656
1696
smp_wmb (); /* see request builders */
1657
- qp -> s_avail -- ;
1658
1697
qp -> s_head = next ;
1659
1698
1660
1699
return 0 ;
0 commit comments