@@ -329,18 +329,11 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
329
329
330
330
while ((nr = ib_poll_cq (cq , RDS_WC_MAX , wcs )) > 0 ) {
331
331
for (i = 0 ; i < nr ; i ++ ) {
332
- if (rx ) {
333
- if ((++ ic -> i_rx_poll_cq % RDS_IB_RX_LIMIT )
334
- == 0 ) {
335
- rdsdebug ("connection "
336
- "<%u.%u.%u.%u,%u.%u.%u.%u,%d> "
337
- "RX poll_cq processed %d\n" ,
338
- NIPQUAD (ic -> conn -> c_laddr ),
339
- NIPQUAD (ic -> conn -> c_faddr ),
340
- ic -> conn -> c_tos ,
341
- ic -> i_rx_poll_cq );
342
- }
343
- }
332
+
333
+ if (rx &&
334
+ (++ ic -> i_rx_poll_cq % RDS_IB_RX_LIMIT ) == 0 )
335
+ cond_resched ();
336
+
344
337
wc = wcs + i ;
345
338
rdsdebug ("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n" ,
346
339
(unsigned long long )wc -> wr_id , wc -> status , wc -> byte_len ,
@@ -351,10 +344,6 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
351
344
else
352
345
rds_ib_recv_cqe_handler (ic , wc , ack_state );
353
346
}
354
-
355
- if (rx && ic -> i_rx_poll_cq >= RDS_IB_RX_LIMIT )
356
- break ;
357
-
358
347
}
359
348
}
360
349
@@ -381,14 +370,9 @@ void rds_ib_tasklet_fn_send(unsigned long data)
381
370
rds_send_xmit (ic -> conn );
382
371
}
383
372
384
- /*
385
- * Note: rds_ib_rx(): don't call with irqs disabled.
386
- * It calls rds_send_drop_acked() which calls other
387
- * routines that reach into rds_rdma_free_op()
388
- * where irqs_disabled() warning is asserted!
389
- */
390
- static void rds_ib_rx (struct rds_ib_connection * ic )
373
+ void rds_ib_tasklet_fn_recv (unsigned long data )
391
374
{
375
+ struct rds_ib_connection * ic = (struct rds_ib_connection * ) data ;
392
376
struct rds_connection * conn = ic -> conn ;
393
377
struct rds_ib_ack_state ack_state ;
394
378
struct rds_ib_device * rds_ibdev = ic -> rds_ibdev ;
@@ -406,53 +390,22 @@ static void rds_ib_rx(struct rds_ib_connection *ic)
406
390
407
391
if (ack_state .ack_next_valid )
408
392
rds_ib_set_ack (ic , ack_state .ack_next , ack_state .ack_required );
393
+
409
394
if (ack_state .ack_recv_valid && ack_state .ack_recv > ic -> i_ack_recv ) {
410
395
rds_send_drop_acked (conn , ack_state .ack_recv , NULL );
411
396
ic -> i_ack_recv = ack_state .ack_recv ;
412
397
}
398
+
413
399
if (rds_conn_up (conn ))
414
400
rds_ib_attempt_ack (ic );
415
401
416
402
if (rds_ib_srq_enabled )
417
403
if ((atomic_read (& rds_ibdev -> srq -> s_num_posted ) <
418
- rds_ib_srq_hwm_refill ) &&
419
- !test_and_set_bit (0 , & rds_ibdev -> srq -> s_refill_gate ))
420
- queue_delayed_work (rds_wq ,
421
- & rds_ibdev -> srq -> s_refill_w , 0 );
422
-
423
- if (ic -> i_rx_poll_cq >= RDS_IB_RX_LIMIT ) {
424
- ic -> i_rx_w .ic = ic ;
425
- queue_delayed_work (rds_aux_wq , & ic -> i_rx_w .dlywork ,
426
- msecs_to_jiffies (10 ));
427
- ic -> i_rx_wait_for_handler = 1 ;
428
- }
404
+ rds_ib_srq_hwm_refill ) &&
405
+ !test_and_set_bit (0 , & rds_ibdev -> srq -> s_refill_gate ))
406
+ queue_delayed_work (rds_wq , & rds_ibdev -> srq -> s_refill_w , 0 );
429
407
}
430
408
431
- void rds_ib_tasklet_fn_recv (unsigned long data )
432
- {
433
- struct rds_ib_connection * ic = (struct rds_ib_connection * ) data ;
434
-
435
- spin_lock (& ic -> i_rx_lock );
436
- if (ic -> i_rx_wait_for_handler )
437
- goto out ;
438
- rds_ib_rx (ic );
439
- out :
440
- spin_unlock (& ic -> i_rx_lock );
441
- }
442
-
443
- static void rds_ib_rx_handler (struct work_struct * workarg )
444
- {
445
- struct delayed_work * delayedwork =
446
- container_of (workarg , struct delayed_work , work );
447
- struct rds_ib_rx_work * rirwork =
448
- container_of (delayedwork , struct rds_ib_rx_work , dlywork );
449
- struct rds_ib_connection * ic = rirwork -> ic ;
450
-
451
- spin_lock (& ic -> i_rx_lock );
452
- ic -> i_rx_wait_for_handler = 0 ;
453
- rds_ib_rx (ic );
454
- spin_unlock (& ic -> i_rx_lock );
455
- }
456
409
457
410
static void rds_ib_qp_event_handler (struct ib_event * event , void * data )
458
411
{
@@ -1111,17 +1064,9 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
1111
1064
}
1112
1065
1113
1066
/* quiesce tx and rx completion before tearing down */
1114
- while (!wait_event_timeout (rds_ib_ring_empty_wait ,
1115
- rds_ib_ring_empty (& ic -> i_recv_ring ) &&
1116
- (atomic_read (& ic -> i_signaled_sends ) == 0 ),
1117
- msecs_to_jiffies (5000 ))) {
1118
-
1119
- if (!rds_ib_ring_empty (& ic -> i_recv_ring )) {
1120
- spin_lock (& ic -> i_rx_lock );
1121
- rds_ib_rx (ic );
1122
- spin_unlock (& ic -> i_rx_lock );
1123
- }
1124
- }
1067
+ wait_event (rds_ib_ring_empty_wait ,
1068
+ rds_ib_ring_empty (& ic -> i_recv_ring ) &&
1069
+ (atomic_read (& ic -> i_signaled_sends ) == 0 ));
1125
1070
1126
1071
tasklet_kill (& ic -> i_stasklet );
1127
1072
tasklet_kill (& ic -> i_rtasklet );
@@ -1254,7 +1199,6 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1254
1199
spin_lock_init (& ic -> i_ack_lock );
1255
1200
#endif
1256
1201
atomic_set (& ic -> i_signaled_sends , 0 );
1257
- spin_lock_init (& ic -> i_rx_lock );
1258
1202
1259
1203
/*
1260
1204
* rds_ib_conn_shutdown() waits for these to be emptied so they
@@ -1269,7 +1213,6 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1269
1213
init_completion (& ic -> i_last_wqe_complete );
1270
1214
1271
1215
INIT_DELAYED_WORK (& ic -> i_migrate_w .work , rds_ib_migrate );
1272
- INIT_DELAYED_WORK (& ic -> i_rx_w .dlywork , rds_ib_rx_handler );
1273
1216
1274
1217
spin_lock_irqsave (& ib_nodev_conns_lock , flags );
1275
1218
list_add_tail (& ic -> ib_node , & ib_nodev_conns );
0 commit comments