Skip to content

Commit 37f5f84

Browse files
Bang NguyenJerry Snitselaar
authored andcommitted
RDS: Replace queue_work() by cond_resched() in the tasklet to breakup RX stream
Orabug: 18801931 Signed-off-by: Bang Nguyen <[email protected]> Signed-off-by: Ajaykumar Hotchandani <[email protected]> (cherry picked from commit 74723dd3283d1bf2b352e5f71fe27340283716ed) Signed-off-by: Jerry Snitselaar <[email protected]>
1 parent a148e65 commit 37f5f84

File tree

2 files changed

+15
-75
lines changed

2 files changed

+15
-75
lines changed

net/rds/ib.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -235,9 +235,6 @@ struct rds_ib_connection {
235235
int i_scq_vector;
236236
int i_rcq_vector;
237237

238-
spinlock_t i_rx_lock;
239-
struct rds_ib_rx_work i_rx_w;
240-
unsigned int i_rx_wait_for_handler;
241238
unsigned int i_rx_poll_cq;
242239
};
243240

net/rds/ib_cm.c

Lines changed: 15 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -329,18 +329,11 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
329329

330330
while ((nr = ib_poll_cq(cq, RDS_WC_MAX, wcs)) > 0) {
331331
for (i = 0; i < nr; i++) {
332-
if (rx) {
333-
if ((++ic->i_rx_poll_cq % RDS_IB_RX_LIMIT)
334-
== 0) {
335-
rdsdebug("connection "
336-
"<%u.%u.%u.%u,%u.%u.%u.%u,%d> "
337-
"RX poll_cq processed %d\n",
338-
NIPQUAD(ic->conn->c_laddr),
339-
NIPQUAD(ic->conn->c_faddr),
340-
ic->conn->c_tos,
341-
ic->i_rx_poll_cq);
342-
}
343-
}
332+
333+
if (rx &&
334+
(++ic->i_rx_poll_cq % RDS_IB_RX_LIMIT) == 0)
335+
cond_resched();
336+
344337
wc = wcs + i;
345338
rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
346339
(unsigned long long)wc->wr_id, wc->status, wc->byte_len,
@@ -351,10 +344,6 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
351344
else
352345
rds_ib_recv_cqe_handler(ic, wc, ack_state);
353346
}
354-
355-
if (rx && ic->i_rx_poll_cq >= RDS_IB_RX_LIMIT)
356-
break;
357-
358347
}
359348
}
360349

@@ -381,14 +370,9 @@ void rds_ib_tasklet_fn_send(unsigned long data)
381370
rds_send_xmit(ic->conn);
382371
}
383372

384-
/*
385-
* Note: rds_ib_rx(): don't call with irqs disabled.
386-
* It calls rds_send_drop_acked() which calls other
387-
* routines that reach into rds_rdma_free_op()
388-
* where irqs_disabled() warning is asserted!
389-
*/
390-
static void rds_ib_rx(struct rds_ib_connection *ic)
373+
void rds_ib_tasklet_fn_recv(unsigned long data)
391374
{
375+
struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
392376
struct rds_connection *conn = ic->conn;
393377
struct rds_ib_ack_state ack_state;
394378
struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
@@ -406,53 +390,22 @@ static void rds_ib_rx(struct rds_ib_connection *ic)
406390

407391
if (ack_state.ack_next_valid)
408392
rds_ib_set_ack(ic, ack_state.ack_next, ack_state.ack_required);
393+
409394
if (ack_state.ack_recv_valid && ack_state.ack_recv > ic->i_ack_recv) {
410395
rds_send_drop_acked(conn, ack_state.ack_recv, NULL);
411396
ic->i_ack_recv = ack_state.ack_recv;
412397
}
398+
413399
if (rds_conn_up(conn))
414400
rds_ib_attempt_ack(ic);
415401

416402
if (rds_ib_srq_enabled)
417403
if ((atomic_read(&rds_ibdev->srq->s_num_posted) <
418-
rds_ib_srq_hwm_refill) &&
419-
!test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
420-
queue_delayed_work(rds_wq,
421-
&rds_ibdev->srq->s_refill_w, 0);
422-
423-
if (ic->i_rx_poll_cq >= RDS_IB_RX_LIMIT) {
424-
ic->i_rx_w.ic = ic;
425-
queue_delayed_work(rds_aux_wq, &ic->i_rx_w.dlywork,
426-
msecs_to_jiffies(10));
427-
ic->i_rx_wait_for_handler = 1;
428-
}
404+
rds_ib_srq_hwm_refill) &&
405+
!test_and_set_bit(0, &rds_ibdev->srq->s_refill_gate))
406+
queue_delayed_work(rds_wq, &rds_ibdev->srq->s_refill_w, 0);
429407
}
430408

431-
void rds_ib_tasklet_fn_recv(unsigned long data)
432-
{
433-
struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
434-
435-
spin_lock(&ic->i_rx_lock);
436-
if (ic->i_rx_wait_for_handler)
437-
goto out;
438-
rds_ib_rx(ic);
439-
out:
440-
spin_unlock(&ic->i_rx_lock);
441-
}
442-
443-
static void rds_ib_rx_handler(struct work_struct *workarg)
444-
{
445-
struct delayed_work *delayedwork =
446-
container_of(workarg, struct delayed_work, work);
447-
struct rds_ib_rx_work *rirwork =
448-
container_of(delayedwork, struct rds_ib_rx_work, dlywork);
449-
struct rds_ib_connection *ic = rirwork->ic;
450-
451-
spin_lock(&ic->i_rx_lock);
452-
ic->i_rx_wait_for_handler = 0;
453-
rds_ib_rx(ic);
454-
spin_unlock(&ic->i_rx_lock);
455-
}
456409

457410
static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
458411
{
@@ -1111,17 +1064,9 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
11111064
}
11121065

11131066
/* quiesce tx and rx completion before tearing down */
1114-
while (!wait_event_timeout(rds_ib_ring_empty_wait,
1115-
rds_ib_ring_empty(&ic->i_recv_ring) &&
1116-
(atomic_read(&ic->i_signaled_sends) == 0),
1117-
msecs_to_jiffies(5000))) {
1118-
1119-
if (!rds_ib_ring_empty(&ic->i_recv_ring)) {
1120-
spin_lock(&ic->i_rx_lock);
1121-
rds_ib_rx(ic);
1122-
spin_unlock(&ic->i_rx_lock);
1123-
}
1124-
}
1067+
wait_event(rds_ib_ring_empty_wait,
1068+
rds_ib_ring_empty(&ic->i_recv_ring) &&
1069+
(atomic_read(&ic->i_signaled_sends) == 0));
11251070

11261071
tasklet_kill(&ic->i_stasklet);
11271072
tasklet_kill(&ic->i_rtasklet);
@@ -1254,7 +1199,6 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
12541199
spin_lock_init(&ic->i_ack_lock);
12551200
#endif
12561201
atomic_set(&ic->i_signaled_sends, 0);
1257-
spin_lock_init(&ic->i_rx_lock);
12581202

12591203
/*
12601204
* rds_ib_conn_shutdown() waits for these to be emptied so they
@@ -1269,7 +1213,6 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
12691213
init_completion(&ic->i_last_wqe_complete);
12701214

12711215
INIT_DELAYED_WORK(&ic->i_migrate_w.work, rds_ib_migrate);
1272-
INIT_DELAYED_WORK(&ic->i_rx_w.dlywork, rds_ib_rx_handler);
12731216

12741217
spin_lock_irqsave(&ib_nodev_conns_lock, flags);
12751218
list_add_tail(&ic->ib_node, &ib_nodev_conns);

0 commit comments

Comments
 (0)