@@ -333,18 +333,11 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
333
333
334
334
while ((nr = ib_poll_cq (cq , RDS_WC_MAX , wcs )) > 0 ) {
335
335
for (i = 0 ; i < nr ; i ++ ) {
336
- if (rx ) {
337
- if ((++ ic -> i_rx_poll_cq % RDS_IB_RX_LIMIT )
338
- == 0 ) {
339
- rdsdebug ("connection "
340
- "<%u.%u.%u.%u,%u.%u.%u.%u,%d> "
341
- "RX poll_cq processed %d\n" ,
342
- NIPQUAD (ic -> conn -> c_laddr ),
343
- NIPQUAD (ic -> conn -> c_faddr ),
344
- ic -> conn -> c_tos ,
345
- ic -> i_rx_poll_cq );
346
- }
347
- }
336
+
337
+ if (rx &&
338
+ (++ ic -> i_rx_poll_cq % RDS_IB_RX_LIMIT ) == 0 )
339
+ cond_resched ();
340
+
348
341
wc = wcs + i ;
349
342
rdsdebug ("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n" ,
350
343
(unsigned long long )wc -> wr_id , wc -> status , wc -> byte_len ,
@@ -355,10 +348,6 @@ static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
355
348
else
356
349
rds_ib_recv_cqe_handler (ic , wc , ack_state );
357
350
}
358
-
359
- if (rx && ic -> i_rx_poll_cq >= RDS_IB_RX_LIMIT )
360
- break ;
361
-
362
351
}
363
352
}
364
353
@@ -385,14 +374,9 @@ void rds_ib_tasklet_fn_send(unsigned long data)
385
374
rds_send_xmit (ic -> conn );
386
375
}
387
376
388
- /*
389
- * Note: rds_ib_rx(): don't call with irqs disabled.
390
- * It calls rds_send_drop_acked() which calls other
391
- * routines that reach into rds_rdma_free_op()
392
- * where irqs_disabled() warning is asserted!
393
- */
394
- static void rds_ib_rx (struct rds_ib_connection * ic )
377
+ void rds_ib_tasklet_fn_recv (unsigned long data )
395
378
{
379
+ struct rds_ib_connection * ic = (struct rds_ib_connection * ) data ;
396
380
struct rds_connection * conn = ic -> conn ;
397
381
struct rds_ib_ack_state ack_state ;
398
382
struct rds_ib_device * rds_ibdev = ic -> rds_ibdev ;
@@ -410,53 +394,22 @@ static void rds_ib_rx(struct rds_ib_connection *ic)
410
394
411
395
if (ack_state .ack_next_valid )
412
396
rds_ib_set_ack (ic , ack_state .ack_next , ack_state .ack_required );
397
+
413
398
if (ack_state .ack_recv_valid && ack_state .ack_recv > ic -> i_ack_recv ) {
414
399
rds_send_drop_acked (conn , ack_state .ack_recv , NULL );
415
400
ic -> i_ack_recv = ack_state .ack_recv ;
416
401
}
402
+
417
403
if (rds_conn_up (conn ))
418
404
rds_ib_attempt_ack (ic );
419
405
420
406
if (rds_ib_srq_enabled )
421
407
if ((atomic_read (& rds_ibdev -> srq -> s_num_posted ) <
422
- rds_ib_srq_hwm_refill ) &&
423
- !test_and_set_bit (0 , & rds_ibdev -> srq -> s_refill_gate ))
424
- queue_delayed_work (rds_wq ,
425
- & rds_ibdev -> srq -> s_refill_w , 0 );
426
-
427
- if (ic -> i_rx_poll_cq >= RDS_IB_RX_LIMIT ) {
428
- ic -> i_rx_w .ic = ic ;
429
- queue_delayed_work (rds_aux_wq , & ic -> i_rx_w .dlywork ,
430
- msecs_to_jiffies (10 ));
431
- ic -> i_rx_wait_for_handler = 1 ;
432
- }
408
+ rds_ib_srq_hwm_refill ) &&
409
+ !test_and_set_bit (0 , & rds_ibdev -> srq -> s_refill_gate ))
410
+ queue_delayed_work (rds_wq , & rds_ibdev -> srq -> s_refill_w , 0 );
433
411
}
434
412
435
- void rds_ib_tasklet_fn_recv (unsigned long data )
436
- {
437
- struct rds_ib_connection * ic = (struct rds_ib_connection * ) data ;
438
-
439
- spin_lock (& ic -> i_rx_lock );
440
- if (ic -> i_rx_wait_for_handler )
441
- goto out ;
442
- rds_ib_rx (ic );
443
- out :
444
- spin_unlock (& ic -> i_rx_lock );
445
- }
446
-
447
- static void rds_ib_rx_handler (struct work_struct * workarg )
448
- {
449
- struct delayed_work * delayedwork =
450
- container_of (workarg , struct delayed_work , work );
451
- struct rds_ib_rx_work * rirwork =
452
- container_of (delayedwork , struct rds_ib_rx_work , dlywork );
453
- struct rds_ib_connection * ic = rirwork -> ic ;
454
-
455
- spin_lock (& ic -> i_rx_lock );
456
- ic -> i_rx_wait_for_handler = 0 ;
457
- rds_ib_rx (ic );
458
- spin_unlock (& ic -> i_rx_lock );
459
- }
460
413
461
414
static void rds_ib_qp_event_handler (struct ib_event * event , void * data )
462
415
{
@@ -1130,17 +1083,9 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
1130
1083
}
1131
1084
1132
1085
/* quiesce tx and rx completion before tearing down */
1133
- while (!wait_event_timeout (rds_ib_ring_empty_wait ,
1134
- rds_ib_ring_empty (& ic -> i_recv_ring ) &&
1135
- (atomic_read (& ic -> i_signaled_sends ) == 0 ),
1136
- msecs_to_jiffies (5000 ))) {
1137
-
1138
- if (!rds_ib_ring_empty (& ic -> i_recv_ring )) {
1139
- spin_lock (& ic -> i_rx_lock );
1140
- rds_ib_rx (ic );
1141
- spin_unlock (& ic -> i_rx_lock );
1142
- }
1143
- }
1086
+ wait_event (rds_ib_ring_empty_wait ,
1087
+ rds_ib_ring_empty (& ic -> i_recv_ring ) &&
1088
+ (atomic_read (& ic -> i_signaled_sends ) == 0 ));
1144
1089
1145
1090
tasklet_kill (& ic -> i_stasklet );
1146
1091
tasklet_kill (& ic -> i_rtasklet );
@@ -1277,7 +1222,6 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1277
1222
spin_lock_init (& ic -> i_ack_lock );
1278
1223
#endif
1279
1224
atomic_set (& ic -> i_signaled_sends , 0 );
1280
- spin_lock_init (& ic -> i_rx_lock );
1281
1225
1282
1226
/*
1283
1227
* rds_ib_conn_shutdown() waits for these to be emptied so they
@@ -1292,7 +1236,6 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1292
1236
init_completion (& ic -> i_last_wqe_complete );
1293
1237
1294
1238
INIT_DELAYED_WORK (& ic -> i_migrate_w .work , rds_ib_migrate );
1295
- INIT_DELAYED_WORK (& ic -> i_rx_w .dlywork , rds_ib_rx_handler );
1296
1239
1297
1240
spin_lock_irqsave (& ib_nodev_conns_lock , flags );
1298
1241
list_add_tail (& ic -> ib_node , & ib_nodev_conns );
0 commit comments