@@ -529,8 +529,7 @@ static void rds_ib_cq_comp_handler_fastreg(struct ib_cq *cq, void *context)
529
529
{
530
530
struct rds_ib_device * rds_ibdev = context ;
531
531
532
- queue_work_on (smp_processor_id (),
533
- rds_evt_wq , & rds_ibdev -> fastreg_w );
532
+ tasklet_schedule (& rds_ibdev -> fastreg_tasklet );
534
533
}
535
534
536
535
static void rds_ib_cq_comp_handler_send (struct ib_cq * cq , void * context )
@@ -542,8 +541,7 @@ static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
542
541
543
542
rds_ib_stats_inc (s_ib_evt_handler_call );
544
543
545
- queue_work_on (smp_processor_id (),
546
- rds_evt_wq , & ic -> i_send_w );
544
+ tasklet_schedule (& ic -> i_stasklet );
547
545
}
548
546
549
547
static void rds_ib_cq_comp_handler_recv (struct ib_cq * cq , void * context )
@@ -555,8 +553,7 @@ static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
555
553
556
554
rds_ib_stats_inc (s_ib_evt_handler_call );
557
555
558
- queue_work_on (smp_processor_id (),
559
- rds_evt_wq , & ic -> i_recv_w );
556
+ tasklet_schedule (& ic -> i_rtasklet );
560
557
}
561
558
562
559
static void poll_fcq (struct rds_ib_device * rds_ibdev , struct ib_cq * cq ,
@@ -623,22 +620,18 @@ static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
623
620
}
624
621
}
625
622
626
- static void rds_ib_cq_comp_handler_fastreg_w ( struct work_struct * work )
623
+ static void rds_ib_tasklet_fn_fastreg ( unsigned long data )
627
624
{
628
- struct rds_ib_device * rds_ibdev = container_of (work ,
629
- struct rds_ib_device ,
630
- fastreg_w );
625
+ struct rds_ib_device * rds_ibdev = (struct rds_ib_device * )data ;
631
626
632
627
poll_fcq (rds_ibdev , rds_ibdev -> fastreg_cq , rds_ibdev -> fastreg_wc );
633
628
ib_req_notify_cq (rds_ibdev -> fastreg_cq , IB_CQ_NEXT_COMP );
634
629
poll_fcq (rds_ibdev , rds_ibdev -> fastreg_cq , rds_ibdev -> fastreg_wc );
635
630
}
636
631
637
- void rds_ib_send_w ( struct work_struct * work )
632
+ void rds_ib_tasklet_fn_send ( unsigned long data )
638
633
{
639
- struct rds_ib_connection * ic = container_of (work ,
640
- struct rds_ib_connection ,
641
- i_send_w );
634
+ struct rds_ib_connection * ic = (struct rds_ib_connection * ) data ;
642
635
struct rds_connection * conn = ic -> conn ;
643
636
644
637
rds_ib_stats_inc (s_ib_tasklet_call );
@@ -712,11 +705,9 @@ static void rds_ib_rx(struct rds_ib_connection *ic)
712
705
}
713
706
}
714
707
715
- void rds_ib_recv_w ( struct work_struct * work )
708
+ void rds_ib_tasklet_fn_recv ( unsigned long data )
716
709
{
717
- struct rds_ib_connection * ic = container_of (work ,
718
- struct rds_ib_connection ,
719
- i_recv_w );
710
+ struct rds_ib_connection * ic = (struct rds_ib_connection * ) data ;
720
711
721
712
spin_lock_bh (& ic -> i_rx_lock );
722
713
if (ic -> i_rx_wait_for_handler )
@@ -2122,11 +2113,10 @@ void rds_ib_conn_path_shutdown_prepare(struct rds_conn_path *cp)
2122
2113
} else if (rds_ib_srq_enabled && ic -> rds_ibdev ) {
2123
2114
/*
2124
2115
wait for the last wqe to complete, then schedule
2125
- the recv work to drain the RX CQ.
2116
+ the recv tasklet to drain the RX CQ.
2126
2117
*/
2127
2118
wait_for_completion (& ic -> i_last_wqe_complete );
2128
- queue_work_on (smp_processor_id (),
2129
- rds_evt_wq , & ic -> i_recv_w );
2119
+ tasklet_schedule (& ic -> i_rtasklet );
2130
2120
}
2131
2121
}
2132
2122
}
@@ -2166,7 +2156,8 @@ void rds_ib_conn_path_shutdown_final(struct rds_conn_path *cp)
2166
2156
if (ic -> i_cm_id ) {
2167
2157
cancel_delayed_work_sync (& ic -> i_rx_w .work );
2168
2158
2169
- flush_workqueue (rds_evt_wq );
2159
+ tasklet_kill (& ic -> i_stasklet );
2160
+ tasklet_kill (& ic -> i_rtasklet );
2170
2161
2171
2162
ic -> i_flags &= ~RDS_IB_CQ_ERR ;
2172
2163
@@ -2279,8 +2270,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
2279
2270
INIT_LIST_HEAD (& ic -> ib_node );
2280
2271
init_rwsem (& ic -> i_cm_id_free_lock );
2281
2272
2282
- INIT_WORK (& ic -> i_send_w , rds_ib_send_w );
2283
- INIT_WORK (& ic -> i_recv_w , rds_ib_recv_w );
2273
+ tasklet_init (& ic -> i_stasklet , rds_ib_tasklet_fn_send , ( unsigned long ) ic );
2274
+ tasklet_init (& ic -> i_rtasklet , rds_ib_tasklet_fn_recv , ( unsigned long ) ic );
2284
2275
mutex_init (& ic -> i_recv_mutex );
2285
2276
#ifndef KERNEL_HAS_ATOMIC64
2286
2277
spin_lock_init (& ic -> i_ack_lock );
@@ -2365,7 +2356,7 @@ void rds_ib_destroy_fastreg(struct rds_ib_device *rds_ibdev)
2365
2356
*/
2366
2357
WARN_ON (atomic_read (& rds_ibdev -> fastreg_wrs ) != RDS_IB_DEFAULT_FREG_WR );
2367
2358
2368
- flush_workqueue ( rds_evt_wq );
2359
+ tasklet_kill ( & rds_ibdev -> fastreg_tasklet );
2369
2360
if (rds_ibdev -> fastreg_qp ) {
2370
2361
/* Destroy qp */
2371
2362
if (ib_destroy_qp (rds_ibdev -> fastreg_qp ))
@@ -2526,7 +2517,8 @@ int rds_ib_setup_fastreg(struct rds_ib_device *rds_ibdev)
2526
2517
"moved qp to RTS state for device" ,
2527
2518
0 );
2528
2519
2529
- INIT_WORK (& rds_ibdev -> fastreg_w , rds_ib_cq_comp_handler_fastreg_w );
2520
+ tasklet_init (& rds_ibdev -> fastreg_tasklet , rds_ib_tasklet_fn_fastreg ,
2521
+ (unsigned long )rds_ibdev );
2530
2522
atomic_set (& rds_ibdev -> fastreg_wrs , RDS_IB_DEFAULT_FREG_WR );
2531
2523
2532
2524
clean_up :
0 commit comments