53
53
# define RPCDBG_FACILITY RPCDBG_TRANS
54
54
#endif
55
55
56
- enum rpcrdma_chunktype {
57
- rpcrdma_noch = 0 ,
58
- rpcrdma_readch ,
59
- rpcrdma_areadch ,
60
- rpcrdma_writech ,
61
- rpcrdma_replych
62
- };
63
-
64
56
#ifdef RPC_DEBUG
65
57
static const char transfertypes [][12 ] = {
66
58
"pure inline" , /* no chunks */
@@ -285,6 +277,28 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
285
277
return n ;
286
278
}
287
279
280
+ /*
281
+ * Marshal chunks. This routine returns the header length
282
+ * consumed by marshaling.
283
+ *
284
+ * Returns positive RPC/RDMA header size, or negative errno.
285
+ */
286
+
287
+ ssize_t
288
+ rpcrdma_marshal_chunks (struct rpc_rqst * rqst , ssize_t result )
289
+ {
290
+ struct rpcrdma_req * req = rpcr_to_rdmar (rqst );
291
+ struct rpcrdma_msg * headerp = (struct rpcrdma_msg * )req -> rl_base ;
292
+
293
+ if (req -> rl_rtype != rpcrdma_noch )
294
+ result = rpcrdma_create_chunks (rqst , & rqst -> rq_snd_buf ,
295
+ headerp , req -> rl_rtype );
296
+ else if (req -> rl_wtype != rpcrdma_noch )
297
+ result = rpcrdma_create_chunks (rqst , & rqst -> rq_rcv_buf ,
298
+ headerp , req -> rl_wtype );
299
+ return result ;
300
+ }
301
+
288
302
/*
289
303
* Copy write data inline.
290
304
* This function is used for "small" requests. Data which is passed
@@ -377,7 +391,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
377
391
char * base ;
378
392
size_t rpclen , padlen ;
379
393
ssize_t hdrlen ;
380
- enum rpcrdma_chunktype rtype , wtype ;
381
394
struct rpcrdma_msg * headerp ;
382
395
383
396
/*
@@ -415,13 +428,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
415
428
* into pages; otherwise use reply chunks.
416
429
*/
417
430
if (rqst -> rq_rcv_buf .buflen <= RPCRDMA_INLINE_READ_THRESHOLD (rqst ))
418
- wtype = rpcrdma_noch ;
431
+ req -> rl_wtype = rpcrdma_noch ;
419
432
else if (rqst -> rq_rcv_buf .page_len == 0 )
420
- wtype = rpcrdma_replych ;
433
+ req -> rl_wtype = rpcrdma_replych ;
421
434
else if (rqst -> rq_rcv_buf .flags & XDRBUF_READ )
422
- wtype = rpcrdma_writech ;
435
+ req -> rl_wtype = rpcrdma_writech ;
423
436
else
424
- wtype = rpcrdma_replych ;
437
+ req -> rl_wtype = rpcrdma_replych ;
425
438
426
439
/*
427
440
* Chunks needed for arguments?
@@ -438,16 +451,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
438
451
* TBD check NFSv4 setacl
439
452
*/
440
453
if (rqst -> rq_snd_buf .len <= RPCRDMA_INLINE_WRITE_THRESHOLD (rqst ))
441
- rtype = rpcrdma_noch ;
454
+ req -> rl_rtype = rpcrdma_noch ;
442
455
else if (rqst -> rq_snd_buf .page_len == 0 )
443
- rtype = rpcrdma_areadch ;
456
+ req -> rl_rtype = rpcrdma_areadch ;
444
457
else
445
- rtype = rpcrdma_readch ;
458
+ req -> rl_rtype = rpcrdma_readch ;
446
459
447
460
/* The following simplification is not true forever */
448
- if (rtype != rpcrdma_noch && wtype == rpcrdma_replych )
449
- wtype = rpcrdma_noch ;
450
- if (rtype != rpcrdma_noch && wtype != rpcrdma_noch ) {
461
+ if (req -> rl_rtype != rpcrdma_noch && req -> rl_wtype == rpcrdma_replych )
462
+ req -> rl_wtype = rpcrdma_noch ;
463
+ if (req -> rl_rtype != rpcrdma_noch && req -> rl_wtype != rpcrdma_noch ) {
451
464
dprintk ("RPC: %s: cannot marshal multiple chunk lists\n" ,
452
465
__func__ );
453
466
return - EIO ;
@@ -461,7 +474,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
461
474
* When padding is in use and applies to the transfer, insert
462
475
* it and change the message type.
463
476
*/
464
- if (rtype == rpcrdma_noch ) {
477
+ if (req -> rl_rtype == rpcrdma_noch ) {
465
478
466
479
padlen = rpcrdma_inline_pullup (rqst ,
467
480
RPCRDMA_INLINE_PAD_VALUE (rqst ));
@@ -476,7 +489,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
476
489
headerp -> rm_body .rm_padded .rm_pempty [1 ] = xdr_zero ;
477
490
headerp -> rm_body .rm_padded .rm_pempty [2 ] = xdr_zero ;
478
491
hdrlen += 2 * sizeof (u32 ); /* extra words in padhdr */
479
- if (wtype != rpcrdma_noch ) {
492
+ if (req -> rl_wtype != rpcrdma_noch ) {
480
493
dprintk ("RPC: %s: invalid chunk list\n" ,
481
494
__func__ );
482
495
return - EIO ;
@@ -497,30 +510,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
497
510
* on receive. Therefore, we request a reply chunk
498
511
* for non-writes wherever feasible and efficient.
499
512
*/
500
- if (wtype == rpcrdma_noch )
501
- wtype = rpcrdma_replych ;
513
+ if (req -> rl_wtype == rpcrdma_noch )
514
+ req -> rl_wtype = rpcrdma_replych ;
502
515
}
503
516
}
504
517
505
- /*
506
- * Marshal chunks. This routine will return the header length
507
- * consumed by marshaling.
508
- */
509
- if (rtype != rpcrdma_noch ) {
510
- hdrlen = rpcrdma_create_chunks (rqst ,
511
- & rqst -> rq_snd_buf , headerp , rtype );
512
- wtype = rtype ; /* simplify dprintk */
513
-
514
- } else if (wtype != rpcrdma_noch ) {
515
- hdrlen = rpcrdma_create_chunks (rqst ,
516
- & rqst -> rq_rcv_buf , headerp , wtype );
517
- }
518
+ hdrlen = rpcrdma_marshal_chunks (rqst , hdrlen );
518
519
if (hdrlen < 0 )
519
520
return hdrlen ;
520
521
521
522
dprintk ("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
522
523
" headerp 0x%p base 0x%p lkey 0x%x\n" ,
523
- __func__ , transfertypes [wtype ], hdrlen , rpclen , padlen ,
524
+ __func__ , transfertypes [req -> rl_wtype ], hdrlen , rpclen , padlen ,
524
525
headerp , base , req -> rl_iov .lkey );
525
526
526
527
/*
0 commit comments