70
70
* on extra page for the RPCRMDA header.
71
71
*/
72
72
static int fast_reg_xdr (struct svcxprt_rdma * xprt ,
73
- struct xdr_buf * xdr ,
74
- struct svc_rdma_req_map * vec )
73
+ struct xdr_buf * xdr ,
74
+ struct svc_rdma_req_map * vec )
75
75
{
76
76
int sge_no ;
77
77
u32 sge_bytes ;
@@ -96,21 +96,25 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
96
96
vec -> count = 2 ;
97
97
sge_no ++ ;
98
98
99
- /* Build the FRMR */
99
+ /* Map the XDR head */
100
100
frmr -> kva = frva ;
101
101
frmr -> direction = DMA_TO_DEVICE ;
102
102
frmr -> access_flags = 0 ;
103
103
frmr -> map_len = PAGE_SIZE ;
104
104
frmr -> page_list_len = 1 ;
105
+ page_off = (unsigned long )xdr -> head [0 ].iov_base & ~PAGE_MASK ;
105
106
frmr -> page_list -> page_list [page_no ] =
106
- ib_dma_map_single (xprt -> sc_cm_id -> device ,
107
- (void * )xdr -> head [0 ].iov_base ,
108
- PAGE_SIZE , DMA_TO_DEVICE );
107
+ ib_dma_map_page (xprt -> sc_cm_id -> device ,
108
+ virt_to_page (xdr -> head [0 ].iov_base ),
109
+ page_off ,
110
+ PAGE_SIZE - page_off ,
111
+ DMA_TO_DEVICE );
109
112
if (ib_dma_mapping_error (xprt -> sc_cm_id -> device ,
110
113
frmr -> page_list -> page_list [page_no ]))
111
114
goto fatal_err ;
112
115
atomic_inc (& xprt -> sc_dma_used );
113
116
117
+ /* Map the XDR page list */
114
118
page_off = xdr -> page_base ;
115
119
page_bytes = xdr -> page_len + page_off ;
116
120
if (!page_bytes )
@@ -128,9 +132,9 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
128
132
page_bytes -= sge_bytes ;
129
133
130
134
frmr -> page_list -> page_list [page_no ] =
131
- ib_dma_map_single (xprt -> sc_cm_id -> device ,
132
- page_address ( page ) ,
133
- PAGE_SIZE , DMA_TO_DEVICE );
135
+ ib_dma_map_page (xprt -> sc_cm_id -> device ,
136
+ page , page_off ,
137
+ sge_bytes , DMA_TO_DEVICE );
134
138
if (ib_dma_mapping_error (xprt -> sc_cm_id -> device ,
135
139
frmr -> page_list -> page_list [page_no ]))
136
140
goto fatal_err ;
@@ -166,8 +170,10 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
166
170
vec -> sge [sge_no ].iov_base = frva + frmr -> map_len + page_off ;
167
171
168
172
frmr -> page_list -> page_list [page_no ] =
169
- ib_dma_map_single (xprt -> sc_cm_id -> device , va , PAGE_SIZE ,
170
- DMA_TO_DEVICE );
173
+ ib_dma_map_page (xprt -> sc_cm_id -> device , virt_to_page (va ),
174
+ page_off ,
175
+ PAGE_SIZE ,
176
+ DMA_TO_DEVICE );
171
177
if (ib_dma_mapping_error (xprt -> sc_cm_id -> device ,
172
178
frmr -> page_list -> page_list [page_no ]))
173
179
goto fatal_err ;
@@ -245,6 +251,35 @@ static int map_xdr(struct svcxprt_rdma *xprt,
245
251
return 0 ;
246
252
}
247
253
254
+ static dma_addr_t dma_map_xdr (struct svcxprt_rdma * xprt ,
255
+ struct xdr_buf * xdr ,
256
+ u32 xdr_off , size_t len , int dir )
257
+ {
258
+ struct page * page ;
259
+ dma_addr_t dma_addr ;
260
+ if (xdr_off < xdr -> head [0 ].iov_len ) {
261
+ /* This offset is in the head */
262
+ xdr_off += (unsigned long )xdr -> head [0 ].iov_base & ~PAGE_MASK ;
263
+ page = virt_to_page (xdr -> head [0 ].iov_base );
264
+ } else {
265
+ xdr_off -= xdr -> head [0 ].iov_len ;
266
+ if (xdr_off < xdr -> page_len ) {
267
+ /* This offset is in the page list */
268
+ page = xdr -> pages [xdr_off >> PAGE_SHIFT ];
269
+ xdr_off &= ~PAGE_MASK ;
270
+ } else {
271
+ /* This offset is in the tail */
272
+ xdr_off -= xdr -> page_len ;
273
+ xdr_off += (unsigned long )
274
+ xdr -> tail [0 ].iov_base & ~PAGE_MASK ;
275
+ page = virt_to_page (xdr -> tail [0 ].iov_base );
276
+ }
277
+ }
278
+ dma_addr = ib_dma_map_page (xprt -> sc_cm_id -> device , page , xdr_off ,
279
+ min_t (size_t , PAGE_SIZE , len ), dir );
280
+ return dma_addr ;
281
+ }
282
+
248
283
/* Assumptions:
249
284
* - We are using FRMR
250
285
* - or -
@@ -293,10 +328,9 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
293
328
sge [sge_no ].length = sge_bytes ;
294
329
if (!vec -> frmr ) {
295
330
sge [sge_no ].addr =
296
- ib_dma_map_single (xprt -> sc_cm_id -> device ,
297
- (void * )
298
- vec -> sge [xdr_sge_no ].iov_base + sge_off ,
299
- sge_bytes , DMA_TO_DEVICE );
331
+ dma_map_xdr (xprt , & rqstp -> rq_res , xdr_off ,
332
+ sge_bytes , DMA_TO_DEVICE );
333
+ xdr_off += sge_bytes ;
300
334
if (ib_dma_mapping_error (xprt -> sc_cm_id -> device ,
301
335
sge [sge_no ].addr ))
302
336
goto err ;
@@ -494,7 +528,8 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
494
528
* In all three cases, this function prepares the RPCRDMA header in
495
529
* sge[0], the 'type' parameter indicates the type to place in the
496
530
* RPCRDMA header, and the 'byte_count' field indicates how much of
497
- * the XDR to include in this RDMA_SEND.
531
+ * the XDR to include in this RDMA_SEND. NB: The offset of the payload
532
+ * to send is zero in the XDR.
498
533
*/
499
534
static int send_reply (struct svcxprt_rdma * rdma ,
500
535
struct svc_rqst * rqstp ,
@@ -536,23 +571,24 @@ static int send_reply(struct svcxprt_rdma *rdma,
536
571
ctxt -> sge [0 ].lkey = rdma -> sc_dma_lkey ;
537
572
ctxt -> sge [0 ].length = svc_rdma_xdr_get_reply_hdr_len (rdma_resp );
538
573
ctxt -> sge [0 ].addr =
539
- ib_dma_map_single (rdma -> sc_cm_id -> device , page_address ( page ) ,
540
- ctxt -> sge [0 ].length , DMA_TO_DEVICE );
574
+ ib_dma_map_page (rdma -> sc_cm_id -> device , page , 0 ,
575
+ ctxt -> sge [0 ].length , DMA_TO_DEVICE );
541
576
if (ib_dma_mapping_error (rdma -> sc_cm_id -> device , ctxt -> sge [0 ].addr ))
542
577
goto err ;
543
578
atomic_inc (& rdma -> sc_dma_used );
544
579
545
580
ctxt -> direction = DMA_TO_DEVICE ;
546
581
547
- /* Determine how many of our SGE are to be transmitted */
582
+ /* Map the payload indicated by 'byte_count' */
548
583
for (sge_no = 1 ; byte_count && sge_no < vec -> count ; sge_no ++ ) {
584
+ int xdr_off = 0 ;
549
585
sge_bytes = min_t (size_t , vec -> sge [sge_no ].iov_len , byte_count );
550
586
byte_count -= sge_bytes ;
551
587
if (!vec -> frmr ) {
552
588
ctxt -> sge [sge_no ].addr =
553
- ib_dma_map_single (rdma -> sc_cm_id -> device ,
554
- vec -> sge [ sge_no ]. iov_base ,
555
- sge_bytes , DMA_TO_DEVICE ) ;
589
+ dma_map_xdr (rdma , & rqstp -> rq_res , xdr_off ,
590
+ sge_bytes , DMA_TO_DEVICE );
591
+ xdr_off += sge_bytes ;
556
592
if (ib_dma_mapping_error (rdma -> sc_cm_id -> device ,
557
593
ctxt -> sge [sge_no ].addr ))
558
594
goto err ;
0 commit comments