Skip to content

Commit 99a0605

Browse files
Jiasen Linjonmason
authored andcommitted
NTB: ntb_perf: Fix address err in perf_copy_chunk
peer->outbuf is a virtual address which is get by ioremap, it can not be converted to a physical address by virt_to_page and page_to_phys. This conversion will result in DMA error, because the destination address which is converted by page_to_phys is invalid. This patch save the MMIO address of NTB BARx in perf_setup_peer_mw, and map the BAR space to DMA address after we assign the DMA channel. Then fill the destination address of DMA descriptor with this DMA address to guarantee that the address of memory write requests fall into memory window of NBT BARx with IOMMU enabled and disabled. Fixes: 5648e56 ("NTB: ntb_perf: Add full multi-port NTB API support") Signed-off-by: Jiasen Lin <[email protected]> Reviewed-by: Logan Gunthorpe <[email protected]> Signed-off-by: Jon Mason <[email protected]>
1 parent bb81bf6 commit 99a0605

File tree

1 file changed

+47
-10
lines changed

1 file changed

+47
-10
lines changed

drivers/ntb/test/ntb_perf.c

Lines changed: 47 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,8 @@ struct perf_peer {
149149
u64 outbuf_xlat;
150150
resource_size_t outbuf_size;
151151
void __iomem *outbuf;
152-
152+
phys_addr_t out_phys_addr;
153+
dma_addr_t dma_dst_addr;
153154
/* Inbound MW params */
154155
dma_addr_t inbuf_xlat;
155156
resource_size_t inbuf_size;
@@ -782,6 +783,10 @@ static int perf_copy_chunk(struct perf_thread *pthr,
782783
struct dmaengine_unmap_data *unmap;
783784
struct device *dma_dev;
784785
int try = 0, ret = 0;
786+
struct perf_peer *peer = pthr->perf->test_peer;
787+
void __iomem *vbase;
788+
void __iomem *dst_vaddr;
789+
dma_addr_t dst_dma_addr;
785790

786791
if (!use_dma) {
787792
memcpy_toio(dst, src, len);
@@ -794,6 +799,10 @@ static int perf_copy_chunk(struct perf_thread *pthr,
794799
offset_in_page(dst), len))
795800
return -EIO;
796801

802+
vbase = peer->outbuf;
803+
dst_vaddr = dst;
804+
dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
805+
797806
unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
798807
if (!unmap)
799808
return -ENOMEM;
@@ -807,8 +816,7 @@ static int perf_copy_chunk(struct perf_thread *pthr,
807816
}
808817
unmap->to_cnt = 1;
809818

810-
unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst),
811-
offset_in_page(dst), len, DMA_FROM_DEVICE);
819+
unmap->addr[1] = dst_dma_addr;
812820
if (dma_mapping_error(dma_dev, unmap->addr[1])) {
813821
ret = -EIO;
814822
goto err_free_resource;
@@ -865,6 +873,7 @@ static int perf_init_test(struct perf_thread *pthr)
865873
{
866874
struct perf_ctx *perf = pthr->perf;
867875
dma_cap_mask_t dma_mask;
876+
struct perf_peer *peer = pthr->perf->test_peer;
868877

869878
pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
870879
dev_to_node(&perf->ntb->dev));
@@ -882,15 +891,33 @@ static int perf_init_test(struct perf_thread *pthr)
882891
if (!pthr->dma_chan) {
883892
dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
884893
pthr->tidx);
885-
atomic_dec(&perf->tsync);
886-
wake_up(&perf->twait);
887-
kfree(pthr->src);
888-
return -ENODEV;
894+
goto err_free;
889895
}
896+
peer->dma_dst_addr =
897+
dma_map_resource(pthr->dma_chan->device->dev,
898+
peer->out_phys_addr, peer->outbuf_size,
899+
DMA_FROM_DEVICE, 0);
900+
if (dma_mapping_error(pthr->dma_chan->device->dev,
901+
peer->dma_dst_addr)) {
902+
dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n",
903+
pthr->tidx);
904+
peer->dma_dst_addr = 0;
905+
dma_release_channel(pthr->dma_chan);
906+
goto err_free;
907+
}
908+
dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n",
909+
pthr->tidx,
910+
&peer->out_phys_addr,
911+
&peer->dma_dst_addr);
890912

891913
atomic_set(&pthr->dma_sync, 0);
892-
893914
return 0;
915+
916+
err_free:
917+
atomic_dec(&perf->tsync);
918+
wake_up(&perf->twait);
919+
kfree(pthr->src);
920+
return -ENODEV;
894921
}
895922

896923
static int perf_run_test(struct perf_thread *pthr)
@@ -978,8 +1005,13 @@ static void perf_clear_test(struct perf_thread *pthr)
9781005
* We call it anyway just to be sure of the transfers completion.
9791006
*/
9801007
(void)dmaengine_terminate_sync(pthr->dma_chan);
981-
982-
dma_release_channel(pthr->dma_chan);
1008+
if (pthr->perf->test_peer->dma_dst_addr)
1009+
dma_unmap_resource(pthr->dma_chan->device->dev,
1010+
pthr->perf->test_peer->dma_dst_addr,
1011+
pthr->perf->test_peer->outbuf_size,
1012+
DMA_FROM_DEVICE, 0);
1013+
if (pthr->dma_chan)
1014+
dma_release_channel(pthr->dma_chan);
9831015

9841016
no_dma_notify:
9851017
atomic_dec(&perf->tsync);
@@ -1194,6 +1226,9 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
11941226
pos += scnprintf(buf + pos, buf_size - pos,
11951227
"\tOut buffer addr 0x%pK\n", peer->outbuf);
11961228

1229+
pos += scnprintf(buf + pos, buf_size - pos,
1230+
"\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
1231+
11971232
pos += scnprintf(buf + pos, buf_size - pos,
11981233
"\tOut buffer size %pa\n", &peer->outbuf_size);
11991234

@@ -1388,6 +1423,8 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
13881423
if (!peer->outbuf)
13891424
return -ENOMEM;
13901425

1426+
peer->out_phys_addr = phys_addr;
1427+
13911428
if (max_mw_size && peer->outbuf_size > max_mw_size) {
13921429
peer->outbuf_size = max_mw_size;
13931430
dev_warn(&peer->perf->ntb->dev,

0 commit comments

Comments
 (0)