@@ -494,13 +494,10 @@ static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
494
494
* Marshal a request: the primary job of this routine is to choose
495
495
* the transfer modes. See comments below.
496
496
*
497
- * Uses multiple RDMA IOVs for a request:
498
- * [0] -- RPC RDMA header, which uses memory from the *start* of the
499
- * preregistered buffer that already holds the RPC data in
500
- * its middle.
501
- * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
502
- * [2] -- optional padding.
503
- * [3] -- if padded, header only in [1] and data here.
497
+ * Prepares up to two IOVs per Call message:
498
+ *
499
+ * [0] -- RPC RDMA header
500
+ * [1] -- the RPC header/data
504
501
*
505
502
* Returns zero on success, otherwise a negative errno.
506
503
*/
@@ -624,13 +621,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
624
621
__func__ , transfertypes [wtype ], hdrlen , rpclen ,
625
622
headerp , base , rdmab_lkey (req -> rl_rdmabuf ));
626
623
627
- /*
628
- * initialize send_iov's - normally only two: rdma chunk header and
629
- * single preregistered RPC header buffer, but if padding is present,
630
- * then use a preregistered (and zeroed) pad buffer between the RPC
631
- * header and any write data. In all non-rdma cases, any following
632
- * data has been copied into the RPC header buffer.
633
- */
634
624
req -> rl_send_iov [0 ].addr = rdmab_addr (req -> rl_rdmabuf );
635
625
req -> rl_send_iov [0 ].length = hdrlen ;
636
626
req -> rl_send_iov [0 ].lkey = rdmab_lkey (req -> rl_rdmabuf );
0 commit comments