@@ -109,14 +109,13 @@ struct p9_trans_rdma {
109
109
/**
110
110
* p9_rdma_context - Keeps track of in-process WR
111
111
*
112
- * @wc_op: The original WR op for when the CQE completes in error.
113
112
* @busa: Bus address to unmap when the WR completes
114
113
* @req: Keeps track of requests (send)
115
114
* @rc: Keepts track of replies (receive)
116
115
*/
117
116
struct p9_rdma_req ;
118
117
struct p9_rdma_context {
119
- enum ib_wc_opcode wc_op ;
118
+ struct ib_cqe cqe ;
120
119
dma_addr_t busa ;
121
120
union {
122
121
struct p9_req_t * req ;
@@ -284,9 +283,12 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
284
283
}
285
284
286
285
static void
287
- handle_recv (struct p9_client * client , struct p9_trans_rdma * rdma ,
288
- struct p9_rdma_context * c , enum ib_wc_status status , u32 byte_len )
286
+ recv_done (struct ib_cq * cq , struct ib_wc * wc )
289
287
{
288
+ struct p9_client * client = cq -> cq_context ;
289
+ struct p9_trans_rdma * rdma = client -> trans ;
290
+ struct p9_rdma_context * c =
291
+ container_of (wc -> wr_cqe , struct p9_rdma_context , cqe );
290
292
struct p9_req_t * req ;
291
293
int err = 0 ;
292
294
int16_t tag ;
@@ -295,7 +297,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
295
297
ib_dma_unmap_single (rdma -> cm_id -> device , c -> busa , client -> msize ,
296
298
DMA_FROM_DEVICE );
297
299
298
- if (status != IB_WC_SUCCESS )
300
+ if (wc -> status != IB_WC_SUCCESS )
299
301
goto err_out ;
300
302
301
303
err = p9_parse_header (c -> rc , NULL , NULL , & tag , 1 );
@@ -316,21 +318,32 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
316
318
req -> rc = c -> rc ;
317
319
p9_client_cb (client , req , REQ_STATUS_RCVD );
318
320
321
+ out :
322
+ up (& rdma -> rq_sem );
323
+ kfree (c );
319
324
return ;
320
325
321
326
err_out :
322
- p9_debug (P9_DEBUG_ERROR , "req %p err %d status %d\n" , req , err , status );
327
+ p9_debug (P9_DEBUG_ERROR , "req %p err %d status %d\n" ,
328
+ req , err , wc -> status );
323
329
rdma -> state = P9_RDMA_FLUSHING ;
324
330
client -> status = Disconnected ;
331
+ goto out ;
325
332
}
326
333
327
334
static void
328
- handle_send (struct p9_client * client , struct p9_trans_rdma * rdma ,
329
- struct p9_rdma_context * c , enum ib_wc_status status , u32 byte_len )
335
+ send_done (struct ib_cq * cq , struct ib_wc * wc )
330
336
{
337
+ struct p9_client * client = cq -> cq_context ;
338
+ struct p9_trans_rdma * rdma = client -> trans ;
339
+ struct p9_rdma_context * c =
340
+ container_of (wc -> wr_cqe , struct p9_rdma_context , cqe );
341
+
331
342
ib_dma_unmap_single (rdma -> cm_id -> device ,
332
343
c -> busa , c -> req -> tc -> size ,
333
344
DMA_TO_DEVICE );
345
+ up (& rdma -> sq_sem );
346
+ kfree (c );
334
347
}
335
348
336
349
static void qp_event_handler (struct ib_event * event , void * context )
@@ -339,42 +352,6 @@ static void qp_event_handler(struct ib_event *event, void *context)
339
352
event -> event , context );
340
353
}
341
354
342
- static void cq_comp_handler (struct ib_cq * cq , void * cq_context )
343
- {
344
- struct p9_client * client = cq_context ;
345
- struct p9_trans_rdma * rdma = client -> trans ;
346
- int ret ;
347
- struct ib_wc wc ;
348
-
349
- ib_req_notify_cq (rdma -> cq , IB_CQ_NEXT_COMP );
350
- while ((ret = ib_poll_cq (cq , 1 , & wc )) > 0 ) {
351
- struct p9_rdma_context * c = (void * ) (unsigned long ) wc .wr_id ;
352
-
353
- switch (c -> wc_op ) {
354
- case IB_WC_RECV :
355
- handle_recv (client , rdma , c , wc .status , wc .byte_len );
356
- up (& rdma -> rq_sem );
357
- break ;
358
-
359
- case IB_WC_SEND :
360
- handle_send (client , rdma , c , wc .status , wc .byte_len );
361
- up (& rdma -> sq_sem );
362
- break ;
363
-
364
- default :
365
- pr_err ("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n" ,
366
- c -> wc_op , wc .opcode , wc .status );
367
- break ;
368
- }
369
- kfree (c );
370
- }
371
- }
372
-
373
- static void cq_event_handler (struct ib_event * e , void * v )
374
- {
375
- p9_debug (P9_DEBUG_ERROR , "CQ event %d context %p\n" , e -> event , v );
376
- }
377
-
378
355
static void rdma_destroy_trans (struct p9_trans_rdma * rdma )
379
356
{
380
357
if (!rdma )
@@ -387,7 +364,7 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
387
364
ib_dealloc_pd (rdma -> pd );
388
365
389
366
if (rdma -> cq && !IS_ERR (rdma -> cq ))
390
- ib_destroy_cq (rdma -> cq );
367
+ ib_free_cq (rdma -> cq );
391
368
392
369
if (rdma -> cm_id && !IS_ERR (rdma -> cm_id ))
393
370
rdma_destroy_id (rdma -> cm_id );
@@ -408,13 +385,14 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
408
385
if (ib_dma_mapping_error (rdma -> cm_id -> device , c -> busa ))
409
386
goto error ;
410
387
388
+ c -> cqe .done = recv_done ;
389
+
411
390
sge .addr = c -> busa ;
412
391
sge .length = client -> msize ;
413
392
sge .lkey = rdma -> pd -> local_dma_lkey ;
414
393
415
394
wr .next = NULL ;
416
- c -> wc_op = IB_WC_RECV ;
417
- wr .wr_id = (unsigned long ) c ;
395
+ wr .wr_cqe = & c -> cqe ;
418
396
wr .sg_list = & sge ;
419
397
wr .num_sge = 1 ;
420
398
return ib_post_recv (rdma -> qp , & wr , & bad_wr );
@@ -499,13 +477,14 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
499
477
goto send_error ;
500
478
}
501
479
480
+ c -> cqe .done = send_done ;
481
+
502
482
sge .addr = c -> busa ;
503
483
sge .length = c -> req -> tc -> size ;
504
484
sge .lkey = rdma -> pd -> local_dma_lkey ;
505
485
506
486
wr .next = NULL ;
507
- c -> wc_op = IB_WC_SEND ;
508
- wr .wr_id = (unsigned long ) c ;
487
+ wr .wr_cqe = & c -> cqe ;
509
488
wr .opcode = IB_WR_SEND ;
510
489
wr .send_flags = IB_SEND_SIGNALED ;
511
490
wr .sg_list = & sge ;
@@ -642,7 +621,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
642
621
struct p9_trans_rdma * rdma ;
643
622
struct rdma_conn_param conn_param ;
644
623
struct ib_qp_init_attr qp_attr ;
645
- struct ib_cq_init_attr cq_attr = {};
646
624
647
625
/* Parse the transport specific mount options */
648
626
err = parse_opts (args , & opts );
@@ -695,13 +673,11 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
695
673
goto error ;
696
674
697
675
/* Create the Completion Queue */
698
- cq_attr .cqe = opts .sq_depth + opts .rq_depth + 1 ;
699
- rdma -> cq = ib_create_cq (rdma -> cm_id -> device , cq_comp_handler ,
700
- cq_event_handler , client ,
701
- & cq_attr );
676
+ rdma -> cq = ib_alloc_cq (rdma -> cm_id -> device , client ,
677
+ opts .sq_depth + opts .rq_depth + 1 ,
678
+ 0 , IB_POLL_SOFTIRQ );
702
679
if (IS_ERR (rdma -> cq ))
703
680
goto error ;
704
- ib_req_notify_cq (rdma -> cq , IB_CQ_NEXT_COMP );
705
681
706
682
/* Create the Protection Domain */
707
683
rdma -> pd = ib_alloc_pd (rdma -> cm_id -> device );
0 commit comments